diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile deleted file mode 100644 index 748efc4d9..000000000 --- a/.devcontainer/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -FROM freqtradeorg/freqtrade:develop_freqairl - -USER root -# Install dependencies -COPY requirements-dev.txt /freqtrade/ - -RUN apt-get update \ - && apt-get -y install --no-install-recommends apt-utils dialog \ - && apt-get -y install --no-install-recommends git sudo vim build-essential \ - && apt-get clean \ - && mkdir -p /home/ftuser/.vscode-server /home/ftuser/.vscode-server-insiders /home/ftuser/commandhistory \ - && echo "export PROMPT_COMMAND='history -a'" >> /home/ftuser/.bashrc \ - && echo "export HISTFILE=~/commandhistory/.bash_history" >> /home/ftuser/.bashrc \ - && chown ftuser:ftuser -R /home/ftuser/.local/ \ - && chown ftuser: -R /home/ftuser/ - -USER ftuser - -RUN pip install --user autopep8 -r docs/requirements-docs.txt -r requirements-dev.txt --no-cache-dir - -# Empty the ENTRYPOINT to allow all commands -ENTRYPOINT [] diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 08b8240b9..a480ae1eb 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,42 +1,44 @@ { "name": "freqtrade Develop", - "build": { - "dockerfile": "Dockerfile", - "context": ".." - }, + "image": "ghcr.io/freqtrade/freqtrade-devcontainer:latest", // Use 'forwardPorts' to make a list of ports inside the container available locally. "forwardPorts": [ 8080 ], - "mounts": [ - "source=freqtrade-bashhistory,target=/home/ftuser/commandhistory,type=volume" - ], "workspaceMount": "source=${localWorkspaceFolder},target=/workspaces/freqtrade,type=bind,consistency=cached", // Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "ftuser", - "onCreateCommand": "pip install --user -e .", "postCreateCommand": "freqtrade create-userdir --userdir user_data/", - "workspaceFolder": "/workspaces/freqtrade", "customizations": { - "settings": { - "terminal.integrated.shell.linux": "/bin/bash", - "editor.insertSpaces": true, - "files.trimTrailingWhitespace": true, - "[markdown]": { - "files.trimTrailingWhitespace": false, + "vscode": { + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "editor.insertSpaces": true, + "files.trimTrailingWhitespace": true, + "[markdown]": { + "files.trimTrailingWhitespace": false + }, + "python.pythonPath": "/usr/local/bin/python", + "[python]": { + "editor.codeActionsOnSave": { + "source.organizeImports": "explicit" + }, + "editor.formatOnSave": true, + "editor.defaultFormatter": "charliermarsh.ruff" + } }, - "python.pythonPath": "/usr/local/bin/python", - }, - - // Add the IDs of extensions you want installed when the container is created. - "extensions": [ - "ms-python.python", - "ms-python.vscode-pylance", - "davidanson.vscode-markdownlint", - "ms-azuretools.vscode-docker", - "vscode-icons-team.vscode-icons", - ], + // Add the IDs of extensions you want installed when the container is created. + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "charliermarsh.ruff", + "davidanson.vscode-markdownlint", + "ms-azuretools.vscode-docker", + "vscode-icons-team.vscode-icons", + "github.vscode-github-actions", + ], + } } } diff --git a/.github/.devcontainer/Dockerfile b/.github/.devcontainer/Dockerfile new file mode 100644 index 000000000..1deab0f54 --- /dev/null +++ b/.github/.devcontainer/Dockerfile @@ -0,0 +1,21 @@ +FROM freqtradeorg/freqtrade:develop_freqairl + +USER root +# Install dependencies +COPY requirements-dev.txt /freqtrade/ + +ARG USERNAME=ftuser + +RUN apt-get update \ + && apt-get -y install --no-install-recommends apt-utils dialog git ssh vim build-essential zsh \ + && apt-get clean \ + && mkdir -p /home/${USERNAME}/.vscode-server /home/${USERNAME}/.vscode-server-insiders /home/${USERNAME}/commandhistory \ + && chown ${USERNAME}:${USERNAME} -R /home/${USERNAME}/.local/ \ + && chown ${USERNAME}: -R /home/${USERNAME}/ + +USER ftuser + +RUN pip install --user autopep8 -r docs/requirements-docs.txt -r requirements-dev.txt --no-cache-dir + +# Empty the ENTRYPOINT to allow all commands +ENTRYPOINT [] diff --git a/.github/.devcontainer/devcontainer.json b/.github/.devcontainer/devcontainer.json new file mode 100644 index 000000000..d87ea5fda --- /dev/null +++ b/.github/.devcontainer/devcontainer.json @@ -0,0 +1,12 @@ +{ + "name": "freqtrade Dev container image builder", + "build": { + "dockerfile": "Dockerfile", + "context": "../../" + }, + "features": { + "ghcr.io/devcontainers/features/common-utils:2": { + }, + "ghcr.io/stuartleeks/dev-container-features/shell-history:0.0.3": {} + } +} diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8c9a3f936..ddea42684 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,6 +21,9 @@ updates: pytest: patterns: - "pytest*" + mkdocs: + patterns: + - "mkdocs*" - package-ecosystem: "github-actions" directory: "/" diff --git a/.github/workflows/binance-lev-tier-update.yml b/.github/workflows/binance-lev-tier-update.yml index f06251cca..2e0a3d3b2 100644 --- a/.github/workflows/binance-lev-tier-update.yml +++ b/.github/workflows/binance-lev-tier-update.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Install ccxt run: pip install ccxt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1a87b86cf..5d8368f95 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,11 @@ jobs: - name: Run Ruff run: | - ruff check --output-format=github . + ruff check --output-format=github + + - name: Run Ruff format check + run: | + ruff format --check - name: Mypy run: | @@ -230,7 +234,11 @@ jobs: - name: Run Ruff run: | - ruff check --output-format=github . + ruff check --output-format=github + + - name: Run Ruff format check + run: | + ruff format --check - name: Mypy run: | @@ -300,7 +308,11 @@ jobs: - name: Run Ruff run: | - ruff check --output-format=github . + ruff check --output-format=github + + - name: Run Ruff format check + run: | + ruff format --check - name: Mypy run: | @@ -322,7 +334,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - name: pre-commit dependencies run: | @@ -336,7 +348,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: "3.10" + python-version: "3.12" - uses: pre-commit/action@v3.0.1 docs-check: @@ -351,7 +363,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Documentation build run: | @@ -377,7 +389,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Cache_dependencies uses: actions/cache@v4 @@ -459,7 +471,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Build distribution run: | @@ -530,7 +542,7 @@ jobs: - name: Set up Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Extract branch name id: extract-branch @@ -553,12 +565,12 @@ jobs: sudo systemctl restart docker docker version -f '{{.Server.Experimental}}' + - name: Set up QEMU + uses: docker/setup-qemu-action@v1 + - name: Set up Docker Buildx id: buildx - uses: crazy-max/ghaction-docker-buildx@v3.3.1 - with: - buildx-version: latest - qemu-version: latest + uses: docker/setup-buildx-action@v1 - name: Available platforms run: echo ${{ steps.buildx.outputs.platforms }} diff --git a/.github/workflows/devcontainer-build.yml b/.github/workflows/devcontainer-build.yml new file mode 100644 index 000000000..e3b510b03 --- /dev/null +++ b/.github/workflows/devcontainer-build.yml @@ -0,0 +1,45 @@ +name: Devcontainer Pre-Build + +on: + workflow_dispatch: + schedule: + - cron: "0 3 * * 0" + # push: + # branches: + # - "master" + # tags: + # - "v*.*.*" + # pull_requests: + # branches: + # - "master" + +concurrency: + group: "${{ github.workflow }}" + cancel-in-progress: true + +permissions: + packages: write + +jobs: + build-and-push: + runs-on: ubuntu-latest + steps: + - + name: Checkout + id: checkout + uses: actions/checkout@v4 + - + name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - + name: Pre-build dev container image + uses: devcontainers/ci@v0.3 + with: + subFolder: .github + imageName: ghcr.io/${{ github.repository }}-devcontainer + cacheFrom: ghcr.io/${{ github.repository }}-devcontainer + push: always diff --git a/.github/workflows/pre-commit-update.yml b/.github/workflows/pre-commit-update.yml index 69f5dbb4e..d30fdd1bf 100644 --- a/.github/workflows/pre-commit-update.yml +++ b/.github/workflows/pre-commit-update.yml @@ -17,7 +17,7 @@ jobs: - uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" - name: Install pre-commit @@ -26,9 +26,6 @@ jobs: - name: Run auto-update run: pre-commit autoupdate - - name: Run pre-commit - run: pre-commit run --all-files - - uses: peter-evans/create-pull-request@v6 with: token: ${{ secrets.REPO_SCOPED_TOKEN }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f88d94072..af54ccf7d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,17 +9,17 @@ repos: # stages: [push] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.9.0" + rev: "v1.10.0" hooks: - id: mypy exclude: build_helpers additional_dependencies: - types-cachetools==5.3.0.7 - types-filelock==3.2.7 - - types-requests==2.31.0.20240406 + - types-requests==2.32.0.20240523 - types-tabulate==0.9.0.20240106 - types-python-dateutil==2.9.0.20240316 - - SQLAlchemy==2.0.29 + - SQLAlchemy==2.0.30 # stages: [push] - repo: https://github.com/pycqa/isort @@ -31,7 +31,7 @@ repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # Ruff version. - rev: 'v0.4.1' + rev: 'v0.4.5' hooks: - id: ruff @@ -56,7 +56,7 @@ repos: )$ - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 000000000..10c16da00 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,11 @@ +{ + "recommendations": [ + "ms-python.python", + "ms-python.vscode-pylance", + "charliermarsh.ruff", + "davidanson.vscode-markdownlint", + "ms-azuretools.vscode-docker", + "vscode-icons-team.vscode-icons", + "github.vscode-github-actions", + ] +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f9ab29592..66ba4f830 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -72,12 +72,12 @@ you can manually run pre-commit with `pre-commit run -a`. mypy freqtrade ``` -### 4. Ensure all imports are correct +### 4. Ensure formatting is correct -#### Run isort +#### Run ruff ``` bash -isort . +ruff format . ``` ## (Core)-Committer Guide diff --git a/README.md b/README.md index c6e54b112..d7ab7c05c 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even - [X] [Binance](https://www.binance.com/) - [X] [Bitmart](https://bitmart.com/) +- [X] [BingX](https://bingx.com/invite/0EM9RX) - [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [HTX](https://www.htx.com/) (Former Huobi) - [X] [Kraken](https://kraken.com/) diff --git a/build_helpers/TA_Lib-0.4.28-cp310-cp310-win_amd64.whl b/build_helpers/TA_Lib-0.4.28-cp310-cp310-win_amd64.whl deleted file mode 100644 index cb7fdf907..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp310-cp310-win_amd64.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.28-cp311-cp311-linux_armv7l.whl b/build_helpers/TA_Lib-0.4.28-cp311-cp311-linux_armv7l.whl deleted file mode 100644 index f88bc8002..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp311-cp311-linux_armv7l.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.28-cp311-cp311-win_amd64.whl b/build_helpers/TA_Lib-0.4.28-cp311-cp311-win_amd64.whl deleted file mode 100644 index 4cb902191..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp311-cp311-win_amd64.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.28-cp312-cp312-win_amd64.whl b/build_helpers/TA_Lib-0.4.28-cp312-cp312-win_amd64.whl deleted file mode 100644 index 81ccc4818..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp312-cp312-win_amd64.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.28-cp39-cp39-linux_armv7l.whl b/build_helpers/TA_Lib-0.4.28-cp39-cp39-linux_armv7l.whl deleted file mode 100644 index 596b5923d..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp39-cp39-linux_armv7l.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.28-cp39-cp39-win_amd64.whl b/build_helpers/TA_Lib-0.4.28-cp39-cp39-win_amd64.whl deleted file mode 100644 index 64a61ff0d..000000000 Binary files a/build_helpers/TA_Lib-0.4.28-cp39-cp39-win_amd64.whl and /dev/null differ diff --git a/build_helpers/TA_Lib-0.4.29-cp310-cp310-win_amd64.whl b/build_helpers/TA_Lib-0.4.29-cp310-cp310-win_amd64.whl new file mode 100644 index 000000000..f7b8ca8ee Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp310-cp310-win_amd64.whl differ diff --git a/build_helpers/TA_Lib-0.4.29-cp311-cp311-linux_armv7l.whl b/build_helpers/TA_Lib-0.4.29-cp311-cp311-linux_armv7l.whl new file mode 100644 index 000000000..4ec20aa11 Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp311-cp311-linux_armv7l.whl differ diff --git a/build_helpers/TA_Lib-0.4.29-cp311-cp311-win_amd64.whl b/build_helpers/TA_Lib-0.4.29-cp311-cp311-win_amd64.whl new file mode 100644 index 000000000..509a5e710 Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp311-cp311-win_amd64.whl differ diff --git a/build_helpers/TA_Lib-0.4.29-cp312-cp312-win_amd64.whl b/build_helpers/TA_Lib-0.4.29-cp312-cp312-win_amd64.whl new file mode 100644 index 000000000..7a4ef378a Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp312-cp312-win_amd64.whl differ diff --git a/build_helpers/TA_Lib-0.4.29-cp39-cp39-linux_armv7l.whl b/build_helpers/TA_Lib-0.4.29-cp39-cp39-linux_armv7l.whl new file mode 100644 index 000000000..ec8d7c119 Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp39-cp39-linux_armv7l.whl differ diff --git a/build_helpers/TA_Lib-0.4.29-cp39-cp39-win_amd64.whl b/build_helpers/TA_Lib-0.4.29-cp39-cp39-win_amd64.whl new file mode 100644 index 000000000..cc50b4be4 Binary files /dev/null and b/build_helpers/TA_Lib-0.4.29-cp39-cp39-win_amd64.whl differ diff --git a/build_helpers/binance_update_lev_tiers.py b/build_helpers/binance_update_lev_tiers.py index c0f3a7c1c..6d5fa5c1d 100644 --- a/build_helpers/binance_update_lev_tiers.py +++ b/build_helpers/binance_update_lev_tiers.py @@ -6,21 +6,18 @@ from pathlib import Path import ccxt -key = os.environ.get('FREQTRADE__EXCHANGE__KEY') -secret = os.environ.get('FREQTRADE__EXCHANGE__SECRET') +key = os.environ.get("FREQTRADE__EXCHANGE__KEY") +secret = os.environ.get("FREQTRADE__EXCHANGE__SECRET") -proxy = os.environ.get('CI_WEB_PROXY') +proxy = os.environ.get("CI_WEB_PROXY") -exchange = ccxt.binance({ - 'apiKey': key, - 'secret': secret, - 'httpsProxy': proxy, - 'options': {'defaultType': 'swap'} - }) +exchange = ccxt.binance( + {"apiKey": key, "secret": secret, "httpsProxy": proxy, "options": {"defaultType": "swap"}} +) _ = exchange.load_markets() lev_tiers = exchange.fetch_leverage_tiers() # Assumes this is running in the root of the repository. -file = Path('freqtrade/exchange/binance_leverage_tiers.json') -json.dump(dict(sorted(lev_tiers.items())), file.open('w'), indent=2) +file = Path("freqtrade/exchange/binance_leverage_tiers.json") +json.dump(dict(sorted(lev_tiers.items())), file.open("w"), indent=2) diff --git a/build_helpers/freqtrade_client_version_align.py b/build_helpers/freqtrade_client_version_align.py index 3e2c32e20..3e425100c 100755 --- a/build_helpers/freqtrade_client_version_align.py +++ b/build_helpers/freqtrade_client_version_align.py @@ -1,18 +1,15 @@ #!/usr/bin/env python3 -from freqtrade_client import __version__ as client_version - from freqtrade import __version__ as ft_version +from freqtrade_client import __version__ as client_version def main(): if ft_version != client_version: - print(f"Versions do not match: \n" - f"ft: {ft_version} \n" - f"client: {client_version}") + print(f"Versions do not match: \nft: {ft_version} \nclient: {client_version}") exit(1) print(f"Versions match: ft: {ft_version}, client: {client_version}") exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/build_helpers/pre_commit_update.py b/build_helpers/pre_commit_update.py index 7774523d2..9d313efd2 100644 --- a/build_helpers/pre_commit_update.py +++ b/build_helpers/pre_commit_update.py @@ -6,28 +6,30 @@ from pathlib import Path import yaml -pre_commit_file = Path('.pre-commit-config.yaml') -require_dev = Path('requirements-dev.txt') -require = Path('requirements.txt') +pre_commit_file = Path(".pre-commit-config.yaml") +require_dev = Path("requirements-dev.txt") +require = Path("requirements.txt") -with require_dev.open('r') as rfile: +with require_dev.open("r") as rfile: requirements = rfile.readlines() -with require.open('r') as rfile: +with require.open("r") as rfile: requirements.extend(rfile.readlines()) # Extract types only -type_reqs = [r.strip('\n') for r in requirements if r.startswith( - 'types-') or r.startswith('SQLAlchemy')] +type_reqs = [ + r.strip("\n") for r in requirements if r.startswith("types-") or r.startswith("SQLAlchemy") +] -with pre_commit_file.open('r') as file: - f = yaml.load(file, Loader=yaml.FullLoader) +with pre_commit_file.open("r") as file: + f = yaml.load(file, Loader=yaml.SafeLoader) -mypy_repo = [repo for repo in f['repos'] if repo['repo'] - == 'https://github.com/pre-commit/mirrors-mypy'] +mypy_repo = [ + repo for repo in f["repos"] if repo["repo"] == "https://github.com/pre-commit/mirrors-mypy" +] -hooks = mypy_repo[0]['hooks'][0]['additional_dependencies'] +hooks = mypy_repo[0]["hooks"][0]["additional_dependencies"] errors = [] for hook in hooks: diff --git a/build_helpers/pyarrow-16.0.0-cp311-cp311-linux_armv7l.whl b/build_helpers/pyarrow-16.1.0-cp311-cp311-linux_armv7l.whl similarity index 66% rename from build_helpers/pyarrow-16.0.0-cp311-cp311-linux_armv7l.whl rename to build_helpers/pyarrow-16.1.0-cp311-cp311-linux_armv7l.whl index f8022ffbf..345f2dd63 100644 Binary files a/build_helpers/pyarrow-16.0.0-cp311-cp311-linux_armv7l.whl and b/build_helpers/pyarrow-16.1.0-cp311-cp311-linux_armv7l.whl differ diff --git a/build_helpers/pyarrow-16.0.0-cp39-cp39-linux_armv7l.whl b/build_helpers/pyarrow-16.1.0-cp39-cp39-linux_armv7l.whl similarity index 66% rename from build_helpers/pyarrow-16.0.0-cp39-cp39-linux_armv7l.whl rename to build_helpers/pyarrow-16.1.0-cp39-cp39-linux_armv7l.whl index c1cbf19de..ffbe09bf4 100644 Binary files a/build_helpers/pyarrow-16.0.0-cp39-cp39-linux_armv7l.whl and b/build_helpers/pyarrow-16.1.0-cp39-cp39-linux_armv7l.whl differ diff --git a/docker/Dockerfile.armhf b/docker/Dockerfile.armhf index 1165f305c..688254122 100644 --- a/docker/Dockerfile.armhf +++ b/docker/Dockerfile.armhf @@ -35,7 +35,7 @@ COPY build_helpers/* /tmp/ COPY --chown=ftuser:ftuser requirements.txt /freqtrade/ USER ftuser RUN pip install --user --no-cache-dir numpy \ - && pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib==0.4.28 \ + && pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib \ && pip install --user --no-cache-dir -r requirements.txt # Copy dependencies to runtime-image diff --git a/docs/assets/freqUI-backtesting-dark.png b/docs/assets/freqUI-backtesting-dark.png new file mode 100644 index 000000000..1420d8121 Binary files /dev/null and b/docs/assets/freqUI-backtesting-dark.png differ diff --git a/docs/assets/freqUI-backtesting-light.png b/docs/assets/freqUI-backtesting-light.png new file mode 100644 index 000000000..b0769219d Binary files /dev/null and b/docs/assets/freqUI-backtesting-light.png differ diff --git a/docs/assets/freqUI-plot-configurator-dark.png b/docs/assets/freqUI-plot-configurator-dark.png new file mode 100644 index 000000000..6c9ce2901 Binary files /dev/null and b/docs/assets/freqUI-plot-configurator-dark.png differ diff --git a/docs/assets/freqUI-plot-configurator-light.png b/docs/assets/freqUI-plot-configurator-light.png new file mode 100644 index 000000000..7d007c82d Binary files /dev/null and b/docs/assets/freqUI-plot-configurator-light.png differ diff --git a/docs/assets/freqUI-trade-pane-dark.png b/docs/assets/freqUI-trade-pane-dark.png new file mode 100644 index 000000000..0573e4b04 Binary files /dev/null and b/docs/assets/freqUI-trade-pane-dark.png differ diff --git a/docs/assets/freqUI-trade-pane-light.png b/docs/assets/freqUI-trade-pane-light.png new file mode 100644 index 000000000..84f137ef3 Binary files /dev/null and b/docs/assets/freqUI-trade-pane-light.png differ diff --git a/docs/assets/freqUI-trade-pane.png b/docs/assets/freqUI-trade-pane.png new file mode 100644 index 000000000..96a6ae718 Binary files /dev/null and b/docs/assets/freqUI-trade-pane.png differ diff --git a/docs/assets/frequi-login-CORS-light.png b/docs/assets/frequi-login-CORS-light.png new file mode 100644 index 000000000..019ae4e8c Binary files /dev/null and b/docs/assets/frequi-login-CORS-light.png differ diff --git a/docs/assets/frequi-login-CORS.png b/docs/assets/frequi-login-CORS.png new file mode 100644 index 000000000..56c8e829e Binary files /dev/null and b/docs/assets/frequi-login-CORS.png differ diff --git a/docs/assets/frequi-settings-dark.png b/docs/assets/frequi-settings-dark.png new file mode 100644 index 000000000..459998b80 Binary files /dev/null and b/docs/assets/frequi-settings-dark.png differ diff --git a/docs/assets/frequi-settings-light.png b/docs/assets/frequi-settings-light.png new file mode 100644 index 000000000..03c27e798 Binary files /dev/null and b/docs/assets/frequi-settings-light.png differ diff --git a/docs/backtesting.md b/docs/backtesting.md index 6cfc9597f..5fdfd6556 100644 --- a/docs/backtesting.md +++ b/docs/backtesting.md @@ -522,8 +522,8 @@ To save time, by default backtest will reuse a cached result from within the las ### Further backtest-result analysis -To further analyze your backtest results, you can [export the trades](#exporting-trades-to-file). -You can then load the trades to perform further analysis as shown in the [data analysis](data-analysis.md#backtesting) backtesting section. +To further analyze your backtest results, freqtrade will export the trades to file by default. +You can then load the trades to perform further analysis as shown in the [data analysis](strategy_analysis_example.md#load-backtest-results-to-pandas-dataframe) backtesting section. ## Assumptions made by backtesting @@ -531,12 +531,13 @@ Since backtesting lacks some detailed information about what happens within a ca - Exchange [trading limits](#trading-limits-in-backtesting) are respected - Entries happen at open-price -- All orders are filled at the requested price (no slippage, no unfilled orders) +- All orders are filled at the requested price (no slippage) as long as the price is within the candle's high/low range - Exit-signal exits happen at open-price of the consecutive candle +- Exits don't free their trade slot for a new trade until the next candle - Exit-signal is favored over Stoploss, because exit-signals are assumed to trigger on candle's open - ROI - - exits are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the exit will be at 2%) - - exits are never "below the candle", so a ROI of 2% may result in a exit at 2.4% if low was at 2.4% profit + - Exits are compared to high - but the ROI value is used (e.g. ROI = 2%, high=5% - so the exit will be at 2%) + - Exits are never "below the candle", so a ROI of 2% may result in a exit at 2.4% if low was at 2.4% profit - ROI entries which came into effect on the triggering candle (e.g. `120: 0.02` for 1h candles, from `60: 0.05`) will use the candle's open as exit rate - Force-exits caused by `=-1` ROI entries use low as exit value, unless N falls on the candle open (e.g. `120: -1` for 1h candles) - Stoploss exits happen exactly at stoploss price, even if low was lower, but the loss will be `2 * fees` higher than the stoploss price diff --git a/docs/configuration.md b/docs/configuration.md index 6a85f445e..e2501cf48 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -197,7 +197,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `position_adjustment_enable` | Enables the strategy to use position adjustments (additional buys or sells). [More information here](strategy-callbacks.md#adjust-trade-position).
[Strategy Override](#parameters-in-the-strategy).
*Defaults to `false`.*
**Datatype:** Boolean | `max_entry_position_adjustment` | Maximum additional order(s) for each open trade on top of the first entry Order. Set it to `-1` for unlimited additional orders. [More information here](strategy-callbacks.md#adjust-trade-position).
[Strategy Override](#parameters-in-the-strategy).
*Defaults to `-1`.*
**Datatype:** Positive Integer or -1 | | **Exchange** -| `exchange.name` | **Required.** Name of the exchange class to use. [List below](#user-content-what-values-for-exchangename).
**Datatype:** String +| `exchange.name` | **Required.** Name of the exchange class to use.
**Datatype:** String | `exchange.key` | API key to use for the exchange. Only required when you are in production mode.
**Keep it in secret, do not disclose publicly.**
**Datatype:** String | `exchange.secret` | API secret to use for the exchange. Only required when you are in production mode.
**Keep it in secret, do not disclose publicly.**
**Datatype:** String | `exchange.password` | API password to use for the exchange. Only required when you are in production mode and for exchanges that use password for API requests.
**Keep it in secret, do not disclose publicly.**
**Datatype:** String @@ -252,7 +252,7 @@ Mandatory parameters are marked as **Required**, which means that they are requi | `disable_dataframe_checks` | Disable checking the OHLCV dataframe returned from the strategy methods for correctness. Only use when intentionally changing the dataframe and understand what you are doing. [Strategy Override](#parameters-in-the-strategy).
*Defaults to `False`*.
**Datatype:** Boolean | `internals.process_throttle_secs` | Set the process throttle, or minimum loop duration for one bot iteration loop. Value in second.
*Defaults to `5` seconds.*
**Datatype:** Positive Integer | `internals.heartbeat_interval` | Print heartbeat message every N seconds. Set to 0 to disable heartbeat messages.
*Defaults to `60` seconds.*
**Datatype:** Positive Integer or 0 -| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](installation.md#7-optional-configure-freqtrade-as-a-systemd-service) for more details.
**Datatype:** Boolean +| `internals.sd_notify` | Enables use of the sd_notify protocol to tell systemd service manager about changes in the bot state and issue keep-alive pings. See [here](advanced-setup.md#configure-the-bot-running-as-a-systemd-service) for more details.
**Datatype:** Boolean | `strategy` | **Required** Defines Strategy class to use. Recommended to be set via `--strategy NAME`.
**Datatype:** ClassName | `strategy_path` | Adds an additional strategy lookup path (must be a directory).
**Datatype:** String | `recursive_strategy_search` | Set to `true` to recursively search sub-directories inside `user_data/strategies` for a strategy.
**Datatype:** Boolean @@ -370,7 +370,7 @@ This setting works in combination with `max_open_trades`. The maximum capital en For example, the bot will at most use (0.05 BTC x 3) = 0.15 BTC, assuming a configuration of `max_open_trades=3` and `stake_amount=0.05`. !!! Note - This setting respects the [available balance configuration](#available-balance). + This setting respects the [available balance configuration](#tradable-balance). #### Dynamic stake amount @@ -568,7 +568,14 @@ The possible values are: `GTC` (default), `FOK` or `IOC`. This is ongoing work. For now, it is supported only for binance, gate and kucoin. Please don't change the default value unless you know what you are doing and have researched the impact of using different values for your particular exchange. -### What values can be used for fiat_display_currency? +### Fiat conversion + +Freqtrade uses the Coingecko API to convert the coin value to it's corresponding fiat value for the Telegram reports. +The FIAT currency can be set in the configuration file as `fiat_display_currency`. + +Removing `fiat_display_currency` completely from the configuration will skip initializing coingecko, and will not show any FIAT currency conversion. This has no importance for the correct functioning of the bot. + +#### What values can be used for fiat_display_currency? The `fiat_display_currency` configuration parameter sets the base currency to use for the conversion from coin to fiat in the bot Telegram reports. @@ -587,7 +594,25 @@ The valid values are: "BTC", "ETH", "XRP", "LTC", "BCH", "BNB" ``` -Removing `fiat_display_currency` completely from the configuration will skip initializing coingecko, and will not show any FIAT currency conversion. This has no importance for the correct functioning of the bot. +#### Coingecko Rate limit problems + +On some IP ranges, coingecko is heavily rate-limiting. +In such cases, you may want to add your coingecko API key to the configuration. + +``` json +{ + "fiat_display_currency": "USD", + "coingecko": { + "api_key": "your-api", + "is_demo": true + } +} +``` + +Freqtrade supports both Demo and Pro coingecko API keys. + +The Coingecko API key is NOT required for the bot to function correctly. +It is only used for the conversion of coin to fiat in the Telegram reports, which usually also work without API key. ## Using Dry-run mode diff --git a/docs/data-download.md b/docs/data-download.md index 890a89b60..2a51edb0b 100644 --- a/docs/data-download.md +++ b/docs/data-download.md @@ -24,10 +24,10 @@ usage: freqtrade download-data [-h] [-v] [--logfile FILE] [-V] [-c PATH] [--days INT] [--new-pairs-days INT] [--include-inactive-pairs] [--timerange TIMERANGE] [--dl-trades] - [--exchange EXCHANGE] + [--convert] [--exchange EXCHANGE] [-t TIMEFRAMES [TIMEFRAMES ...]] [--erase] [--data-format-ohlcv {json,jsongz,hdf5,feather,parquet}] - [--data-format-trades {json,jsongz,hdf5,feather}] + [--data-format-trades {json,jsongz,hdf5,feather,parquet}] [--trading-mode {spot,margin,futures}] [--prepend] @@ -48,6 +48,11 @@ options: --dl-trades Download trades instead of OHLCV data. The bot will resample trades to the desired timeframe as specified as --timeframes/-t. + --convert Convert downloaded trades to OHLCV data. Only + applicable in combination with `--dl-trades`. Will be + automatic for exchanges which don't have historic + OHLCV (e.g. Kraken). If not provided, use `trades-to- + ohlcv` to convert trades data to OHLCV data. --exchange EXCHANGE Exchange name. Only valid if no config is provided. -t TIMEFRAMES [TIMEFRAMES ...], --timeframes TIMEFRAMES [TIMEFRAMES ...] Specify which tickers to download. Space-separated @@ -57,7 +62,7 @@ options: --data-format-ohlcv {json,jsongz,hdf5,feather,parquet} Storage format for downloaded candle (OHLCV) data. (default: `feather`). - --data-format-trades {json,jsongz,hdf5,feather} + --data-format-trades {json,jsongz,hdf5,feather,parquet} Storage format for downloaded trades data. (default: `feather`). --trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures} @@ -471,15 +476,20 @@ ETH/USDT 5m, 15m, 30m, 1h, 2h, 4h ## Trades (tick) data -By default, `download-data` sub-command downloads Candles (OHLCV) data. Some exchanges also provide historic trade-data via their API. +By default, `download-data` sub-command downloads Candles (OHLCV) data. Most exchanges also provide historic trade-data via their API. This data can be useful if you need many different timeframes, since it is only downloaded once, and then resampled locally to the desired timeframes. -Since this data is large by default, the files use the feather fileformat by default. They are stored in your data-directory with the naming convention of `-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository. +Since this data is large by default, the files use the feather file format by default. They are stored in your data-directory with the naming convention of `-trades.feather` (`ETH_BTC-trades.feather`). Incremental mode is also supported, as for historic OHLCV data, so downloading the data once per week with `--days 8` will create an incremental data-repository. -To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades, and resamples the data locally. +To use this mode, simply add `--dl-trades` to your call. This will swap the download method to download trades. +If `--convert` is also provided, the resample step will happen automatically and overwrite eventually existing OHLCV data for the given pair/timeframe combinations. -!!! Warning "do not use" - You should not use this unless you're a kraken user. Most other exchanges provide OHLCV data with sufficient history. +!!! Warning "Do not use" + You should not use this unless you're a kraken user (Kraken does not provide historic OHLCV data). + Most other exchanges provide OHLCV data with sufficient history, so downloading multiple timeframes through that method will still proof to be a lot faster than downloading trades data. + +!!! Note "Kraken user" + Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data. Example call: @@ -490,12 +500,6 @@ freqtrade download-data --exchange kraken --pairs XRP/EUR ETH/EUR --days 20 --dl !!! Note While this method uses async calls, it will be slow, since it requires the result of the previous call to generate the next request to the exchange. -!!! Warning - The historic trades are not available during Freqtrade dry-run and live trade modes because all exchanges tested provide this data with a delay of few 100 candles, so it's not suitable for real-time trading. - -!!! Note "Kraken user" - Kraken users should read [this](exchanges.md#historic-kraken-data) before starting to download data. - ## Next step -Great, you now have backtest data downloaded, so you can now start [backtesting](backtesting.md) your strategy. +Great, you now have some data downloaded, so you can now start [backtesting](backtesting.md) your strategy. diff --git a/docs/developer.md b/docs/developer.md index f1218471f..705e8d116 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -83,7 +83,7 @@ Details will obviously vary between setups - but this should work to get you sta ``` json { "name": "freqtrade trade", - "type": "python", + "type": "debugpy", "request": "launch", "module": "freqtrade", "console": "integratedTerminal", diff --git a/docs/exchanges.md b/docs/exchanges.md index a8c4a8b4f..f3550e97e 100644 --- a/docs/exchanges.md +++ b/docs/exchanges.md @@ -127,6 +127,13 @@ These settings will be checked on startup, and freqtrade will show an error if t Freqtrade will not attempt to change these settings. +## Bingx + +BingX supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings. + +!!! Tip "Stoploss on Exchange" + Bingx supports `stoploss_on_exchange` and can use both stop-limit and stop-market orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange. + ## Kraken Kraken supports [time_in_force](configuration.md#understand-order_time_in_force) with settings "GTC" (good till cancelled), "IOC" (immediate-or-cancel) and "PO" (Post only) settings. @@ -299,7 +306,7 @@ $ pip3 install web3 Most exchanges return current incomplete candle via their OHLCV/klines API interface. By default, Freqtrade assumes that incomplete candle is fetched from the exchange and removes the last candle assuming it's the incomplete candle. -Whether your exchange returns incomplete candles or not can be checked using [the helper script](developer.md#Incomplete-candles) from the Contributor documentation. +Whether your exchange returns incomplete candles or not can be checked using [the helper script](developer.md#incomplete-candles) from the Contributor documentation. Due to the danger of repainting, Freqtrade does not allow you to use this incomplete candle. diff --git a/docs/freq-ui.md b/docs/freq-ui.md new file mode 100644 index 000000000..9b319d808 --- /dev/null +++ b/docs/freq-ui.md @@ -0,0 +1,85 @@ +# FreqUI + +Freqtrade provides a builtin webserver, which can serve [FreqUI](https://github.com/freqtrade/frequi), the freqtrade frontend. + +By default, the UI is automatically installed as part of the installation (script, docker). +freqUI can also be manually installed by using the `freqtrade install-ui` command. +This same command can also be used to update freqUI to new new releases. + +Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured API port (by default `http://127.0.0.1:8080`). + +??? Note "Looking to contribute to freqUI?" + Developers should not use this method, but instead clone the corresponding use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI. A working installation of node will be required to build the frontend. + +!!! tip "freqUI is not required to run freqtrade" + freqUI is an optional component of freqtrade, and is not required to run the bot. + It is a frontend that can be used to monitor the bot and to interact with it - but freqtrade itself will work perfectly fine without it. + +## Configuration + +FreqUI does not have it's own configuration file - but assumes a working setup for the [rest-api](rest-api.md) is available. +Please refer to the corresponding documentation page to get setup with freqUI + +## UI + +FreqUI is a modern, responsive web application that can be used to monitor and interact with your bot. + +FreqUI provides a light, as well as a dark theme. +Themes can be easily switched via a prominent button at the top of the page. +The theme of the screenshots on this page will adapt to the selected documentation Theme, so to see the dark (or light) version, please switch the theme of the Documentation. + +### Login + +The below screenshot shows the login screen of freqUI. + +![FreqUI - login](assets/frequi-login-CORS.png#only-dark) +![FreqUI - login](assets/frequi-login-CORS-light.png#only-light) + +!!! Hint "CORS" + The Cors error shown in this screenshot is due to the fact that the UI is running on a different port than the API, and [CORS](#cors) has not been setup correctly yet. + +### Trade view + +The trade view allows you to visualize the trades that the bot is making and to interact with the bot. +On this page, you can also interact with the bot by starting and stopping it and - if configured - force trade entries and exits. + +![FreqUI - trade view](assets/freqUI-trade-pane-dark.png#only-dark) +![FreqUI - trade view](assets/freqUI-trade-pane-light.png#only-light) + +### Plot Configurator + +FreqUI Plots can be configured either via a `plot_config` configuration object in the strategy (which can be loaded via "from strategy" button) or via the UI. +Multiple plot configurations can be created and switched at will - allowing for flexible, different views into your charts. + +The plot configuration can be accessed via the "Plot Configurator" (Cog icon) button in the top right corner of the trade view. + +![FreqUI - plot configuration](assets/freqUI-plot-configurator-dark.png#only-dark) +![FreqUI - plot configuration](assets/freqUI-plot-configurator-light.png#only-light) + +### Settings + + +Several UI related settings can be changed by accessing the settings page. + +Things you can change (among others): + +* Timezone of the UI +* Visualization of open trades as part of the favicon (browser tab) +* Candle colors (up/down -> red/green) +* Enable / disable in-app notification types + +![FreqUI - Settings view](assets/frequi-settings-dark.png#only-dark) +![FreqUI - Settings view](assets/frequi-settings-light.png#only-light) + +## Backtesting + +When freqtrade is started in [webserver mode](utils.md#webserver-mode) (freqtrade started with `freqtrade webserver`), the backtesting view becomes available. +This view allows you to backtest strategies and visualize the results. + +You can also load and visualize previous backtest results, as well as compare the results with each other. + +![FreqUI - Backtesting](assets/freqUI-backtesting-dark.png#only-dark) +![FreqUI - Backtesting](assets/freqUI-backtesting-light.png#only-light) + + +--8<-- "includes/cors.md" diff --git a/docs/freqai-feature-engineering.md b/docs/freqai-feature-engineering.md index 6a1537d91..d25051291 100644 --- a/docs/freqai-feature-engineering.md +++ b/docs/freqai-feature-engineering.md @@ -224,7 +224,7 @@ where $W_i$ is the weight of data point $i$ in a total set of $n$ data points. B ## Building the data pipeline -By default, FreqAI builds a dynamic pipeline based on user congfiguration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`. +By default, FreqAI builds a dynamic pipeline based on user configuration settings. The default settings are robust and designed to work with a variety of methods. These two steps are a `MinMaxScaler(-1,1)` and a `VarianceThreshold` which removes any column that has 0 variance. Users can activate other steps with more configuration parameters. For example if users add `use_SVM_to_remove_outliers: true` to the `freqai` config, then FreqAI will automatically add the [`SVMOutlierExtractor`](#identifying-outliers-using-a-support-vector-machine-svm) to the pipeline. Likewise, users can add `principal_component_analysis: true` to the `freqai` config to activate PCA. The [DissimilarityIndex](#identifying-outliers-with-the-dissimilarity-index-di) is activated with `DI_threshold: 1`. Finally, noise can also be added to the data with `noise_standard_deviation: 0.1`. Finally, users can add [DBSCAN](#identifying-outliers-with-dbscan) outlier removal with `use_DBSCAN_to_remove_outliers: true`. !!! note "More information available" Please review the [parameter table](freqai-parameter-table.md) for more information on these parameters. @@ -235,7 +235,7 @@ By default, FreqAI builds a dynamic pipeline based on user congfiguration settin Users are encouraged to customize the data pipeline to their needs by building their own data pipeline. This can be done by simply setting `dk.feature_pipeline` to their desired `Pipeline` object inside their `IFreqaiModel` `train()` function, or if they prefer not to touch the `train()` function, they can override `define_data_pipeline`/`define_label_pipeline` functions in their `IFreqaiModel`: !!! note "More information available" - FreqAI uses the the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following. + FreqAI uses the [`DataSieve`](https://github.com/emergentmethods/datasieve) pipeline, which follows the SKlearn pipeline API, but adds, among other features, coherence between the X, y, and sample_weight vector point removals, feature removal, feature name following. ```python from datasieve.transforms import SKLearnWrapper, DissimilarityIndex @@ -391,3 +391,18 @@ Given a number of data points $N$, and a distance $\varepsilon$, DBSCAN clusters ![dbscan](assets/freqai_dbscan.jpg) FreqAI uses `sklearn.cluster.DBSCAN` (details are available on scikit-learn's webpage [here](https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html) (external website)) with `min_samples` ($N$) taken as 1/4 of the no. of time points (candles) in the feature set. `eps` ($\varepsilon$) is computed automatically as the elbow point in the *k-distance graph* computed from the nearest neighbors in the pairwise distances of all data points in the feature set. + + +### Data dimensionality reduction with Principal Component Analysis + +You can reduce the dimensionality of your features by activating the principal_component_analysis in the config: + +```json + "freqai": { + "feature_parameters" : { + "principal_component_analysis": true + } + } +``` + +This will perform PCA on the features and reduce their dimensionality so that the explained variance of the data set is >= 0.999. Reducing data dimensionality makes training the model faster and hence allows for more up-to-date models. diff --git a/docs/freqai-parameter-table.md b/docs/freqai-parameter-table.md index 56043bb0f..8a02faad2 100644 --- a/docs/freqai-parameter-table.md +++ b/docs/freqai-parameter-table.md @@ -36,7 +36,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the | `weight_factor` | Weight training data points according to their recency (see details [here](freqai-feature-engineering.md#weighting-features-for-temporal-importance)).
**Datatype:** Positive float (typically < 1). | `indicator_max_period_candles` | **No longer used (#7325)**. Replaced by `startup_candle_count` which is set in the [strategy](freqai-configuration.md#building-a-freqai-strategy). `startup_candle_count` is timeframe independent and defines the maximum *period* used in `feature_engineering_*()` for indicator creation. FreqAI uses this parameter together with the maximum timeframe in `include_time_frames` to calculate how many data points to download such that the first data point does not include a NaN.
**Datatype:** Positive integer. | `indicator_periods_candles` | Time periods to calculate indicators for. The indicators are added to the base indicator dataset.
**Datatype:** List of positive integers. -| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](#reducing-data-dimensionality-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. +| `principal_component_analysis` | Automatically reduce the dimensionality of the data set using Principal Component Analysis. See details about how it works [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis)
**Datatype:** Boolean.
Default: `False`. | `plot_feature_importances` | Create a feature importance plot for each model for the top/bottom `plot_feature_importances` number of features. Plot is stored in `user_data/models//sub-train-_.html`.
**Datatype:** Integer.
Default: `0`. | `DI_threshold` | Activates the use of the Dissimilarity Index for outlier detection when set to > 0. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di).
**Datatype:** Positive float (typically < 1). | `use_SVM_to_remove_outliers` | Train a support vector machine to detect and remove outliers from the training dataset, as well as from incoming data points. See details about how it works [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm).
**Datatype:** Boolean. diff --git a/docs/hyperopt.md b/docs/hyperopt.md index d3371d771..f88928344 100644 --- a/docs/hyperopt.md +++ b/docs/hyperopt.md @@ -14,8 +14,7 @@ To learn how to get data for the pairs and exchange you're interested in, head o !!! Note Since 2021.4 release you no longer have to write a separate hyperopt class, but can configure the parameters directly in the strategy. - The legacy method is still supported, but it is no longer the recommended way of setting up hyperopt. - The legacy documentation is available at [Legacy Hyperopt](advanced-hyperopt.md#legacy-hyperopt). + The legacy method was supported up to 2021.8 and has been removed in 2021.9. ## Install hyperopt dependencies @@ -765,7 +764,7 @@ Override the `roi_space()` method if you need components of the ROI tables to va A sample for these methods can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces). !!! Note "Reduced search space" - To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#pverriding-pre-defined-spaces) to change this to your needs. + To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#overriding-pre-defined-spaces) to change this to your needs. ### Understand Hyperopt Stoploss results @@ -807,7 +806,7 @@ If you have the `stoploss_space()` method in your custom hyperopt file, remove i Override the `stoploss_space()` method and define the desired range in it if you need stoploss values to vary in other range during hyperoptimization. A sample for this method can be found in the [overriding pre-defined spaces section](advanced-hyperopt.md#overriding-pre-defined-spaces). !!! Note "Reduced search space" - To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#pverriding-pre-defined-spaces) to change this to your needs. + To limit the search space further, Decimals are limited to 3 decimal places (a precision of 0.001). This is usually sufficient, every value more precise than this will usually result in overfitted results. You can however [overriding pre-defined spaces](advanced-hyperopt.md#overriding-pre-defined-spaces) to change this to your needs. ### Understand Hyperopt Trailing Stop results diff --git a/docs/includes/cors.md b/docs/includes/cors.md new file mode 100644 index 000000000..f79017747 --- /dev/null +++ b/docs/includes/cors.md @@ -0,0 +1,43 @@ +## CORS + +This whole section is only necessary in cross-origin cases (where you multiple bot API's running on `localhost:8081`, `localhost:8082`, ...), and want to combine them into one FreqUI instance. + +??? info "Technical explanation" + All web-based front-ends are subject to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) - Cross-Origin Resource Sharing. + Since most of the requests to the Freqtrade API must be authenticated, a proper CORS policy is key to avoid security problems. + Also, the standard disallows `*` CORS policies for requests with credentials, so this setting must be set appropriately. + +Users can allow access from different origin URL's to the bot API via the `CORS_origins` configuration setting. +It consists of a list of allowed URL's that are allowed to consume resources from the bot's API. + +Assuming your application is deployed as `https://frequi.freqtrade.io/home/` - this would mean that the following configuration becomes necessary: + +```jsonc +{ + //... + "jwt_secret_key": "somethingrandom", + "CORS_origins": ["https://frequi.freqtrade.io"], + //... +} +``` + +In the following (pretty common) case, FreqUI is accessible on `http://localhost:8080/trade` (this is what you see in your navbar when navigating to freqUI). +![freqUI url](assets/frequi_url.png) + +The correct configuration for this case is `http://localhost:8080` - the main part of the URL including the port. + +```jsonc +{ + //... + "jwt_secret_key": "somethingrandom", + "CORS_origins": ["http://localhost:8080"], + //... +} +``` + +!!! Tip "trailing Slash" + The trailing slash is not allowed in the `CORS_origins` configuration (e.g. `"http://localhots:8080/"`). + Such a configuration will not take effect, and the cors errors will remain. + +!!! Note + We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot. diff --git a/docs/index.md b/docs/index.md index 26debe993..55835f555 100644 --- a/docs/index.md +++ b/docs/index.md @@ -41,6 +41,7 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual, - [X] [Binance](https://www.binance.com/) - [X] [Bitmart](https://bitmart.com/) +- [X] [BingX](https://bingx.com/invite/0EM9RX) - [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [HTX](https://www.htx.com/) (Former Huobi) - [X] [Kraken](https://kraken.com/) diff --git a/docs/installation.md b/docs/installation.md index a87a3ff4e..f86043fb3 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -51,7 +51,7 @@ These requirements apply to both [Script Installation](#script-installation) and ### Install code We've included/collected install instructions for Ubuntu, MacOS, and Windows. These are guidelines and your success may vary with other distros. -OS Specific steps are listed first, the [Common](#common) section below is necessary for all systems. +OS Specific steps are listed first, the common section below is necessary for all systems. !!! Note Python3.9 or higher and the corresponding pip are assumed to be available. @@ -286,7 +286,7 @@ cd freqtrade #### Freqtrade install: Conda Environment ```bash -conda create --name freqtrade python=3.11 +conda create --name freqtrade python=3.12 ``` !!! Note "Creating Conda Environment" diff --git a/docs/leverage.md b/docs/leverage.md index 09ebf1075..2fbd13145 100644 --- a/docs/leverage.md +++ b/docs/leverage.md @@ -17,7 +17,7 @@ If you already have an existing strategy, please read the [strategy migration gu ## Shorting -Shorting is not possible when trading with [`trading_mode`](#understand-tradingmode) set to `spot`. To short trade, `trading_mode` must be set to `margin`(currently unavailable) or [`futures`](#futures), with [`margin_mode`](#margin-mode) set to `cross`(currently unavailable) or [`isolated`](#isolated-margin-mode) +Shorting is not possible when trading with [`trading_mode`](#leverage-trading-modes) set to `spot`. To short trade, `trading_mode` must be set to `margin`(currently unavailable) or [`futures`](#futures), with [`margin_mode`](#margin-mode) set to `cross`(currently unavailable) or [`isolated`](#isolated-margin-mode) For a strategy to short, the strategy class must set the class variable `can_short = True` diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index 74be94c1a..489370b16 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -1,6 +1,6 @@ markdown==3.6 -mkdocs==1.5.3 -mkdocs-material==9.5.18 +mkdocs==1.6.0 +mkdocs-material==9.5.24 mdx_truly_sane_lists==1.3 -pymdown-extensions==10.8 -jinja2==3.1.3 +pymdown-extensions==10.8.1 +jinja2==3.1.4 diff --git a/docs/rest-api.md b/docs/rest-api.md index ab5e9db9f..2b55c2563 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -1,16 +1,8 @@ -# REST API & FreqUI +# REST API ## FreqUI -Freqtrade provides a builtin webserver, which can serve [FreqUI](https://github.com/freqtrade/frequi), the freqtrade UI. - -By default, the UI is not included in the installation (except for docker images), and must be installed explicitly with `freqtrade install-ui`. -This same command can also be used to update freqUI, should there be a new release. - -Once the bot is started in trade / dry-run mode (with `freqtrade trade`) - the UI will be available under the configured port below (usually `http://127.0.0.1:8080`). - -!!! Note "developers" - Developers should not use this method, but instead use the method described in the [freqUI repository](https://github.com/freqtrade/frequi) to get the source-code of freqUI. +FreqUI now has it's own dedicated [documentation section](frequi.md) - please refer to that section for all information regarding the FreqUI. ## Configuration @@ -169,7 +161,7 @@ freqtrade-client --config rest_config.json [optional parameters] | `delete_lock ` | Deletes (disables) the lock by id. | `locks add , , [side], [reason]` | Locks a pair until "until". (Until will be rounded up to the nearest timeframe). | `profit` | Display a summary of your profit/loss from close trades and some stats about your performance. -| `forceexit ` | Instantly exits the given trade (Ignoring `minimum_roi`). +| `forceexit [order_type] [amount]` | Instantly exits the given trade (ignoring `minimum_roi`), using the given order type ("market" or "limit", uses your config setting if not specified), and the chosen amount (full sell if not specified). | `forceexit all` | Instantly exits all open trades (Ignoring `minimum_roi`). | `forceenter [rate]` | Instantly enters the given pair. Rate is optional. (`force_entry_enable` must be set to True) | `forceenter [rate]` | Instantly longs or shorts the given pair. Rate is optional. (`force_entry_enable` must be set to True) @@ -488,42 +480,4 @@ Since the access token has a short timeout (15 min) - the `token/refresh` reques {"access_token":"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1ODkxMTk5NzQsIm5iZiI6MTU4OTExOTk3NCwianRpIjoiMDBjNTlhMWUtMjBmYS00ZTk0LTliZjAtNWQwNTg2MTdiZDIyIiwiZXhwIjoxNTg5MTIwODc0LCJpZGVudGl0eSI6eyJ1IjoiRnJlcXRyYWRlciJ9LCJmcmVzaCI6ZmFsc2UsInR5cGUiOiJhY2Nlc3MifQ.1seHlII3WprjjclY6DpRhen0rqdF4j6jbvxIhUFaSbs"} ``` -### CORS - -This whole section is only necessary in cross-origin cases (where you multiple bot API's running on `localhost:8081`, `localhost:8082`, ...), and want to combine them into one FreqUI instance. - -??? info "Technical explanation" - All web-based front-ends are subject to [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) - Cross-Origin Resource Sharing. - Since most of the requests to the Freqtrade API must be authenticated, a proper CORS policy is key to avoid security problems. - Also, the standard disallows `*` CORS policies for requests with credentials, so this setting must be set appropriately. - -Users can allow access from different origin URL's to the bot API via the `CORS_origins` configuration setting. -It consists of a list of allowed URL's that are allowed to consume resources from the bot's API. - -Assuming your application is deployed as `https://frequi.freqtrade.io/home/` - this would mean that the following configuration becomes necessary: - -```jsonc -{ - //... - "jwt_secret_key": "somethingrandom", - "CORS_origins": ["https://frequi.freqtrade.io"], - //... -} -``` - -In the following (pretty common) case, FreqUI is accessible on `http://localhost:8080/trade` (this is what you see in your navbar when navigating to freqUI). -![freqUI url](assets/frequi_url.png) - -The correct configuration for this case is `http://localhost:8080` - the main part of the URL including the port. - -```jsonc -{ - //... - "jwt_secret_key": "somethingrandom", - "CORS_origins": ["http://localhost:8080"], - //... -} -``` - -!!! Note - We strongly recommend to also set `jwt_secret_key` to something random and known only to yourself to avoid unauthorized access to your bot. +--8<-- "includes/cors.md" diff --git a/docs/stoploss.md b/docs/stoploss.md index 19683aadd..e0353d4da 100644 --- a/docs/stoploss.md +++ b/docs/stoploss.md @@ -30,6 +30,7 @@ The Order-type will be ignored if only one mode is available. |----------|-------------| | Binance | limit | | Binance Futures | market, limit | +| Bingx | market, limit | | HTX (former Huobi) | limit | | kraken | market, limit | | Gate | limit | @@ -158,7 +159,7 @@ You could also have a default stop loss when you are in the red with your buy (b For example, your default stop loss is -10%, but once you have more than 0% profit (example 0.1%) a different trailing stoploss will be used. !!! Note - If you want the stoploss to only be changed when you break even of making a profit (what most users want) please refer to next section with [offset enabled](#Trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). + If you want the stoploss to only be changed when you break even of making a profit (what most users want) please refer to next section with [offset enabled](#trailing-stop-loss-only-once-the-trade-has-reached-a-certain-offset). Both values require `trailing_stop` to be set to true and `trailing_stop_positive` with a value. diff --git a/docs/strategy-advanced.md b/docs/strategy-advanced.md index c5a17c364..3cd0259f4 100644 --- a/docs/strategy-advanced.md +++ b/docs/strategy-advanced.md @@ -209,7 +209,7 @@ def custom_exit(self, pair: str, trade: Trade, current_time: datetime, current_r ## Exit tag -Similar to [Buy Tagging](#buy-tag), you can also specify a sell tag. +Similar to [Entry Tagging](#enter-tag), you can also specify an exit tag. ``` python def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame: diff --git a/docs/strategy-callbacks.md b/docs/strategy-callbacks.md index e39a4f102..b1e46d356 100644 --- a/docs/strategy-callbacks.md +++ b/docs/strategy-callbacks.md @@ -167,7 +167,7 @@ During backtesting, `current_rate` (and `current_profit`) are provided against t The absolute value of the return value is used (the sign is ignored), so returning `0.05` or `-0.05` have the same result, a stoploss 5% below the current price. Returning None will be interpreted as "no desire to change", and is the only safe way to return when you'd like to not modify the stoploss. -Stoploss on exchange works similar to `trailing_stop`, and the stoploss on exchange is updated as configured in `stoploss_on_exchange_interval` ([More details about stoploss on exchange](stoploss.md#stop-loss-on-exchange-freqtrade)). +Stoploss on exchange works similar to `trailing_stop`, and the stoploss on exchange is updated as configured in `stoploss_on_exchange_interval` ([More details about stoploss on exchange](stoploss.md#stop-loss-on-exchangefreqtrade)). !!! Note "Use of dates" All time-based calculations should be done based on `current_time` - using `datetime.now()` or `datetime.utcnow()` is discouraged, as this will break backtesting support. @@ -450,7 +450,7 @@ Stoploss values returned from `custom_stoploss()` must specify a percentage rela ``` - Full examples can be found in the [Custom stoploss](strategy-advanced.md#custom-stoploss) section of the Documentation. + Full examples can be found in the [Custom stoploss](strategy-callbacks.md#custom-stoploss) section of the Documentation. !!! Note Providing invalid input to `stoploss_from_open()` may produce "CustomStoploss function did not return valid stoploss" warnings. diff --git a/docs/strategy-customization.md b/docs/strategy-customization.md index 59efc0e02..48f629df5 100644 --- a/docs/strategy-customization.md +++ b/docs/strategy-customization.md @@ -405,7 +405,7 @@ The metadata-dict (available for `populate_entry_trend`, `populate_exit_trend`, Currently this is `pair`, which can be accessed using `metadata['pair']` - and will return a pair in the format `XRP/BTC`. The Metadata-dict should not be modified and does not persist information across multiple calls. -Instead, have a look at the [Storing information](strategy-advanced.md#Storing-information) section. +Instead, have a look at the [Storing information](strategy-advanced.md#storing-information-persistent) section. ## Strategy file loading diff --git a/docs/telegram-usage.md b/docs/telegram-usage.md index f878fc2b3..377479a90 100644 --- a/docs/telegram-usage.md +++ b/docs/telegram-usage.md @@ -53,7 +53,7 @@ You can use bots in telegram groups by just adding them to the group. You can fi } ``` -For the Freqtrade configuration, you can then use the the full value (including `-` if it's there) as string: +For the Freqtrade configuration, you can then use the full value (including `-` if it's there) as string: ```json "chat_id": "-1001332619709" diff --git a/docs/windows_installation.md b/docs/windows_installation.md index cd9007d98..d513c0af5 100644 --- a/docs/windows_installation.md +++ b/docs/windows_installation.md @@ -24,7 +24,7 @@ git clone https://github.com/freqtrade/freqtrade.git Install ta-lib according to the [ta-lib documentation](https://github.com/TA-Lib/ta-lib-python#windows). -As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10 and 3.11) and for 64bit Windows. +As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10, 3.11 and 3.12) and for 64bit Windows. These Wheels are also used by CI running on windows, and are therefore tested together with freqtrade. Other versions must be downloaded from the above link. diff --git a/freqtrade/__init__.py b/freqtrade/__init__.py index 5c1f5a343..932f9b701 100644 --- a/freqtrade/__init__.py +++ b/freqtrade/__init__.py @@ -1,21 +1,33 @@ -""" Freqtrade bot """ -__version__ = '2024.4-dev' +"""Freqtrade bot""" -if 'dev' in __version__: +__version__ = "2024.6-dev" + +if "dev" in __version__: from pathlib import Path + try: import subprocess + freqtrade_basedir = Path(__file__).parent - __version__ = __version__ + '-' + subprocess.check_output( - ['git', 'log', '--format="%h"', '-n 1'], - stderr=subprocess.DEVNULL, cwd=freqtrade_basedir).decode("utf-8").rstrip().strip('"') + __version__ = ( + __version__ + + "-" + + subprocess.check_output( + ["git", "log", '--format="%h"', "-n 1"], + stderr=subprocess.DEVNULL, + cwd=freqtrade_basedir, + ) + .decode("utf-8") + .rstrip() + .strip('"') + ) except Exception: # pragma: no cover # git not available, ignore try: # Try Fallback to freqtrade_commit file (created by CI while building docker image) - versionfile = Path('./freqtrade_commit') + versionfile = Path("./freqtrade_commit") if versionfile.is_file(): __version__ = f"docker-{__version__}-{versionfile.read_text()[:8]}" except Exception: diff --git a/freqtrade/__main__.py b/freqtrade/__main__.py index ed950fa01..f39321c83 100755 --- a/freqtrade/__main__.py +++ b/freqtrade/__main__.py @@ -9,5 +9,5 @@ To launch Freqtrade as a module from freqtrade import main -if __name__ == '__main__': +if __name__ == "__main__": main.main() diff --git a/freqtrade/commands/__init__.py b/freqtrade/commands/__init__.py index e0fa0fd51..48ee18e93 100644 --- a/freqtrade/commands/__init__.py +++ b/freqtrade/commands/__init__.py @@ -6,22 +6,39 @@ Contains all start-commands, subcommands and CLI Interface creation. Note: Be careful with file-scoped imports in these subfiles. as they are parsed on startup, nothing containing optional modules should be loaded. """ + from freqtrade.commands.analyze_commands import start_analysis_entries_exits from freqtrade.commands.arguments import Arguments from freqtrade.commands.build_config_commands import start_new_config, start_show_config -from freqtrade.commands.data_commands import (start_convert_data, start_convert_trades, - start_download_data, start_list_data) +from freqtrade.commands.data_commands import ( + start_convert_data, + start_convert_trades, + start_download_data, + start_list_data, +) from freqtrade.commands.db_commands import start_convert_db -from freqtrade.commands.deploy_commands import (start_create_userdir, start_install_ui, - start_new_strategy) +from freqtrade.commands.deploy_commands import ( + start_create_userdir, + start_install_ui, + start_new_strategy, +) from freqtrade.commands.hyperopt_commands import start_hyperopt_list, start_hyperopt_show -from freqtrade.commands.list_commands import (start_list_exchanges, start_list_freqAI_models, - start_list_markets, start_list_strategies, - start_list_timeframes, start_show_trades) -from freqtrade.commands.optimize_commands import (start_backtesting, start_backtesting_show, - start_edge, start_hyperopt, - start_lookahead_analysis, - start_recursive_analysis) +from freqtrade.commands.list_commands import ( + start_list_exchanges, + start_list_freqAI_models, + start_list_markets, + start_list_strategies, + start_list_timeframes, + start_show_trades, +) +from freqtrade.commands.optimize_commands import ( + start_backtesting, + start_backtesting_show, + start_edge, + start_hyperopt, + start_lookahead_analysis, + start_recursive_analysis, +) from freqtrade.commands.pairlist_commands import start_test_pairlist from freqtrade.commands.plot_commands import start_plot_dataframe, start_plot_profit from freqtrade.commands.strategy_utils_commands import start_strategy_update diff --git a/freqtrade/commands/analyze_commands.py b/freqtrade/commands/analyze_commands.py index d271a82e3..7d605a228 100644 --- a/freqtrade/commands/analyze_commands.py +++ b/freqtrade/commands/analyze_commands.py @@ -20,25 +20,25 @@ def setup_analyze_configuration(args: Dict[str, Any], method: RunMode) -> Dict[s config = setup_utils_configuration(args, method) no_unlimited_runmodes = { - RunMode.BACKTEST: 'backtesting', + RunMode.BACKTEST: "backtesting", } if method in no_unlimited_runmodes.keys(): from freqtrade.data.btanalysis import get_latest_backtest_filename - if 'exportfilename' in config: - if config['exportfilename'].is_dir(): - btfile = Path(get_latest_backtest_filename(config['exportfilename'])) + if "exportfilename" in config: + if config["exportfilename"].is_dir(): + btfile = Path(get_latest_backtest_filename(config["exportfilename"])) signals_file = f"{config['exportfilename']}/{btfile.stem}_signals.pkl" else: - if config['exportfilename'].exists(): - btfile = Path(config['exportfilename']) + if config["exportfilename"].exists(): + btfile = Path(config["exportfilename"]) signals_file = f"{btfile.parent}/{btfile.stem}_signals.pkl" else: raise ConfigurationError(f"{config['exportfilename']} does not exist.") else: - raise ConfigurationError('exportfilename not in config.') + raise ConfigurationError("exportfilename not in config.") - if (not Path(signals_file).exists()): + if not Path(signals_file).exists(): raise OperationalException( f"Cannot find latest backtest signals file: {signals_file}." "Run backtesting with `--export signals`." @@ -58,6 +58,6 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None: # Initialize configuration config = setup_analyze_configuration(args, RunMode.BACKTEST) - logger.info('Starting freqtrade in analysis mode') + logger.info("Starting freqtrade in analysis mode") process_entry_exit_reasons(config) diff --git a/freqtrade/commands/arguments.py b/freqtrade/commands/arguments.py index 55c3aa586..c527a80d6 100755 --- a/freqtrade/commands/arguments.py +++ b/freqtrade/commands/arguments.py @@ -1,6 +1,7 @@ """ This module contains the argument manager class """ + import argparse from functools import partial from pathlib import Path @@ -12,35 +13,72 @@ from freqtrade.constants import DEFAULT_CONFIG ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"] -ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search", "freqaimodel", - "freqaimodel_path"] +ARGS_STRATEGY = [ + "strategy", + "strategy_path", + "recursive_strategy_search", + "freqaimodel", + "freqaimodel_path", +] ARGS_TRADE = ["db_url", "sd_notify", "dry_run", "dry_run_wallet", "fee"] ARGS_WEBSERVER: List[str] = [] -ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", "dataformat_ohlcv", - "max_open_trades", "stake_amount", "fee", "pairs"] +ARGS_COMMON_OPTIMIZE = [ + "timeframe", + "timerange", + "dataformat_ohlcv", + "max_open_trades", + "stake_amount", + "fee", + "pairs", +] -ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions", - "enable_protections", "dry_run_wallet", "timeframe_detail", - "strategy_list", "export", "exportfilename", - "backtest_breakdown", "backtest_cache", - "freqai_backtest_live_models"] +ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [ + "position_stacking", + "use_max_market_positions", + "enable_protections", + "dry_run_wallet", + "timeframe_detail", + "strategy_list", + "export", + "exportfilename", + "backtest_breakdown", + "backtest_cache", + "freqai_backtest_live_models", +] -ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path", - "position_stacking", "use_max_market_positions", - "enable_protections", "dry_run_wallet", "timeframe_detail", - "epochs", "spaces", "print_all", - "print_colorized", "print_json", "hyperopt_jobs", - "hyperopt_random_state", "hyperopt_min_trades", - "hyperopt_loss", "disableparamexport", - "hyperopt_ignore_missing_space", "analyze_per_epoch"] +ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [ + "hyperopt", + "hyperopt_path", + "position_stacking", + "use_max_market_positions", + "enable_protections", + "dry_run_wallet", + "timeframe_detail", + "epochs", + "spaces", + "print_all", + "print_colorized", + "print_json", + "hyperopt_jobs", + "hyperopt_random_state", + "hyperopt_min_trades", + "hyperopt_loss", + "disableparamexport", + "hyperopt_ignore_missing_space", + "analyze_per_epoch", +] ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"] -ARGS_LIST_STRATEGIES = ["strategy_path", "print_one_column", "print_colorized", - "recursive_strategy_search"] +ARGS_LIST_STRATEGIES = [ + "strategy_path", + "print_one_column", + "print_colorized", + "recursive_strategy_search", +] ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column", "print_colorized"] @@ -52,12 +90,27 @@ ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"] ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"] -ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column", - "print_csv", "base_currencies", "quote_currencies", "list_pairs_all", - "trading_mode"] +ARGS_LIST_PAIRS = [ + "exchange", + "print_list", + "list_pairs_print_json", + "print_one_column", + "print_csv", + "base_currencies", + "quote_currencies", + "list_pairs_all", + "trading_mode", +] -ARGS_TEST_PAIRLIST = ["user_data_dir", "verbosity", "config", "quote_currencies", - "print_one_column", "list_pairs_print_json", "exchange"] +ARGS_TEST_PAIRLIST = [ + "user_data_dir", + "verbosity", + "config", + "quote_currencies", + "print_one_column", + "list_pairs_print_json", + "exchange", +] ARGS_CREATE_USERDIR = ["user_data_dir", "reset"] @@ -70,22 +123,59 @@ ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase", ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"] ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"] -ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades", - "trading_mode"] +ARGS_CONVERT_TRADES = [ + "pairs", + "timeframes", + "exchange", + "dataformat_ohlcv", + "dataformat_trades", + "trading_mode", +] ARGS_LIST_DATA = ["exchange", "dataformat_ohlcv", "pairs", "trading_mode", "show_timerange"] -ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "new_pairs_days", "include_inactive", - "timerange", "download_trades", "exchange", "timeframes", - "erase", "dataformat_ohlcv", "dataformat_trades", "trading_mode", - "prepend_data"] +ARGS_DOWNLOAD_DATA = [ + "pairs", + "pairs_file", + "days", + "new_pairs_days", + "include_inactive", + "timerange", + "download_trades", + "convert_trades", + "exchange", + "timeframes", + "erase", + "dataformat_ohlcv", + "dataformat_trades", + "trading_mode", + "prepend_data", +] -ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit", - "db_url", "trade_source", "export", "exportfilename", - "timerange", "timeframe", "no_trades"] +ARGS_PLOT_DATAFRAME = [ + "pairs", + "indicators1", + "indicators2", + "plot_limit", + "db_url", + "trade_source", + "export", + "exportfilename", + "timerange", + "timeframe", + "no_trades", +] -ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url", - "trade_source", "timeframe", "plot_auto_open", ] +ARGS_PLOT_PROFIT = [ + "pairs", + "timerange", + "export", + "exportfilename", + "db_url", + "trade_source", + "timeframe", + "plot_auto_open", +] ARGS_CONVERT_DB = ["db_url", "db_url_from"] @@ -93,36 +183,76 @@ ARGS_INSTALL_UI = ["erase_ui_only", "ui_version"] ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"] -ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable", - "hyperopt_list_min_trades", "hyperopt_list_max_trades", - "hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time", - "hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit", - "hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit", - "hyperopt_list_min_objective", "hyperopt_list_max_objective", - "print_colorized", "print_json", "hyperopt_list_no_details", - "hyperoptexportfilename", "export_csv"] +ARGS_HYPEROPT_LIST = [ + "hyperopt_list_best", + "hyperopt_list_profitable", + "hyperopt_list_min_trades", + "hyperopt_list_max_trades", + "hyperopt_list_min_avg_time", + "hyperopt_list_max_avg_time", + "hyperopt_list_min_avg_profit", + "hyperopt_list_max_avg_profit", + "hyperopt_list_min_total_profit", + "hyperopt_list_max_total_profit", + "hyperopt_list_min_objective", + "hyperopt_list_max_objective", + "print_colorized", + "print_json", + "hyperopt_list_no_details", + "hyperoptexportfilename", + "export_csv", +] -ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index", - "print_json", "hyperoptexportfilename", "hyperopt_show_no_header", - "disableparamexport", "backtest_breakdown"] +ARGS_HYPEROPT_SHOW = [ + "hyperopt_list_best", + "hyperopt_list_profitable", + "hyperopt_show_index", + "print_json", + "hyperoptexportfilename", + "hyperopt_show_no_header", + "disableparamexport", + "backtest_breakdown", +] -ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", - "exit_reason_list", "indicator_list", "timerange", - "analysis_rejected", "analysis_to_csv", "analysis_csv_path"] +ARGS_ANALYZE_ENTRIES_EXITS = [ + "exportfilename", + "analysis_groups", + "enter_reason_list", + "exit_reason_list", + "indicator_list", + "timerange", + "analysis_rejected", + "analysis_to_csv", + "analysis_csv_path", +] -NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", - "list-markets", "list-pairs", "list-strategies", "list-freqaimodels", - "list-data", "hyperopt-list", "hyperopt-show", "backtest-filter", - "plot-dataframe", "plot-profit", "show-trades", "trades-to-ohlcv", - "strategy-updater"] +NO_CONF_REQURIED = [ + "convert-data", + "convert-trade-data", + "download-data", + "list-timeframes", + "list-markets", + "list-pairs", + "list-strategies", + "list-freqaimodels", + "list-data", + "hyperopt-list", + "hyperopt-show", + "backtest-filter", + "plot-dataframe", + "plot-profit", + "show-trades", + "trades-to-ohlcv", + "strategy-updater", +] NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"] ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"] ARGS_LOOKAHEAD_ANALYSIS = [ - a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache') - ] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"] + a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", "cache") +] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"] ARGS_RECURSIVE_ANALYSIS = ["timeframe", "timerange", "dataformat_ohlcv", "pairs", "startup_candle"] @@ -156,14 +286,14 @@ class Arguments: # Workaround issue in argparse with action='append' and default value # (see https://bugs.python.org/issue16399) # Allow no-config for certain commands (like downloading / plotting) - if ('config' in parsed_arg and parsed_arg.config is None): - conf_required = ('command' in parsed_arg and parsed_arg.command in NO_CONF_REQURIED) + if "config" in parsed_arg and parsed_arg.config is None: + conf_required = "command" in parsed_arg and parsed_arg.command in NO_CONF_REQURIED - if 'user_data_dir' in parsed_arg and parsed_arg.user_data_dir is not None: + if "user_data_dir" in parsed_arg and parsed_arg.user_data_dir is not None: user_dir = parsed_arg.user_data_dir else: # Default case - user_dir = 'user_data' + user_dir = "user_data" # Try loading from "user_data/config.json" cfgfile = Path(user_dir) / DEFAULT_CONFIG if cfgfile.is_file(): @@ -177,7 +307,6 @@ class Arguments: return parsed_arg def _build_args(self, optionlist, parser): - for val in optionlist: opt = AVAILABLE_CLI_OPTIONS[val] parser.add_argument(*opt.cli, dest=val, **opt.kwargs) @@ -197,41 +326,62 @@ class Arguments: self._build_args(optionlist=ARGS_STRATEGY, parser=strategy_group) # Build main command - self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot') - self._build_args(optionlist=['version'], parser=self.parser) + self.parser = argparse.ArgumentParser( + prog="freqtrade", description="Free, open source crypto trading bot" + ) + self._build_args(optionlist=["version"], parser=self.parser) - from freqtrade.commands import (start_analysis_entries_exits, start_backtesting, - start_backtesting_show, start_convert_data, - start_convert_db, start_convert_trades, - start_create_userdir, start_download_data, start_edge, - start_hyperopt, start_hyperopt_list, start_hyperopt_show, - start_install_ui, start_list_data, start_list_exchanges, - start_list_freqAI_models, start_list_markets, - start_list_strategies, start_list_timeframes, - start_lookahead_analysis, start_new_config, - start_new_strategy, start_plot_dataframe, start_plot_profit, - start_recursive_analysis, start_show_config, - start_show_trades, start_strategy_update, - start_test_pairlist, start_trading, start_webserver) + from freqtrade.commands import ( + start_analysis_entries_exits, + start_backtesting, + start_backtesting_show, + start_convert_data, + start_convert_db, + start_convert_trades, + start_create_userdir, + start_download_data, + start_edge, + start_hyperopt, + start_hyperopt_list, + start_hyperopt_show, + start_install_ui, + start_list_data, + start_list_exchanges, + start_list_freqAI_models, + start_list_markets, + start_list_strategies, + start_list_timeframes, + start_lookahead_analysis, + start_new_config, + start_new_strategy, + start_plot_dataframe, + start_plot_profit, + start_recursive_analysis, + start_show_config, + start_show_trades, + start_strategy_update, + start_test_pairlist, + start_trading, + start_webserver, + ) - subparsers = self.parser.add_subparsers(dest='command', - # Use custom message when no subhandler is added - # shown from `main.py` - # required=True - ) + subparsers = self.parser.add_subparsers( + dest="command", + # Use custom message when no subhandler is added + # shown from `main.py` + # required=True + ) # Add trade subcommand trade_cmd = subparsers.add_parser( - 'trade', - help='Trade module.', - parents=[_common_parser, _strategy_parser] + "trade", help="Trade module.", parents=[_common_parser, _strategy_parser] ) trade_cmd.set_defaults(func=start_trading) self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd) # add create-userdir subcommand create_userdir_cmd = subparsers.add_parser( - 'create-userdir', + "create-userdir", help="Create user-data directory.", ) create_userdir_cmd.set_defaults(func=start_create_userdir) @@ -239,7 +389,7 @@ class Arguments: # add new-config subcommand build_config_cmd = subparsers.add_parser( - 'new-config', + "new-config", help="Create new config", ) build_config_cmd.set_defaults(func=start_new_config) @@ -247,7 +397,7 @@ class Arguments: # add show-config subcommand show_config_cmd = subparsers.add_parser( - 'show-config', + "show-config", help="Show resolved config", ) show_config_cmd.set_defaults(func=start_show_config) @@ -255,7 +405,7 @@ class Arguments: # add new-strategy subcommand build_strategy_cmd = subparsers.add_parser( - 'new-strategy', + "new-strategy", help="Create new strategy", ) build_strategy_cmd.set_defaults(func=start_new_strategy) @@ -263,8 +413,8 @@ class Arguments: # Add download-data subcommand download_data_cmd = subparsers.add_parser( - 'download-data', - help='Download backtesting data.', + "download-data", + help="Download backtesting data.", parents=[_common_parser], ) download_data_cmd.set_defaults(func=start_download_data) @@ -272,8 +422,8 @@ class Arguments: # Add convert-data subcommand convert_data_cmd = subparsers.add_parser( - 'convert-data', - help='Convert candle (OHLCV) data from one format to another.', + "convert-data", + help="Convert candle (OHLCV) data from one format to another.", parents=[_common_parser], ) convert_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=True)) @@ -281,8 +431,8 @@ class Arguments: # Add convert-trade-data subcommand convert_trade_data_cmd = subparsers.add_parser( - 'convert-trade-data', - help='Convert trade data from one format to another.', + "convert-trade-data", + help="Convert trade data from one format to another.", parents=[_common_parser], ) convert_trade_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=False)) @@ -290,8 +440,8 @@ class Arguments: # Add trades-to-ohlcv subcommand convert_trade_data_cmd = subparsers.add_parser( - 'trades-to-ohlcv', - help='Convert trade data to OHLCV data.', + "trades-to-ohlcv", + help="Convert trade data to OHLCV data.", parents=[_common_parser], ) convert_trade_data_cmd.set_defaults(func=start_convert_trades) @@ -299,8 +449,8 @@ class Arguments: # Add list-data subcommand list_data_cmd = subparsers.add_parser( - 'list-data', - help='List downloaded data.', + "list-data", + help="List downloaded data.", parents=[_common_parser], ) list_data_cmd.set_defaults(func=start_list_data) @@ -308,17 +458,15 @@ class Arguments: # Add backtesting subcommand backtesting_cmd = subparsers.add_parser( - 'backtesting', - help='Backtesting module.', - parents=[_common_parser, _strategy_parser] + "backtesting", help="Backtesting module.", parents=[_common_parser, _strategy_parser] ) backtesting_cmd.set_defaults(func=start_backtesting) self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd) # Add backtesting-show subcommand backtesting_show_cmd = subparsers.add_parser( - 'backtesting-show', - help='Show past Backtest results', + "backtesting-show", + help="Show past Backtest results", parents=[_common_parser], ) backtesting_show_cmd.set_defaults(func=start_backtesting_show) @@ -326,26 +474,22 @@ class Arguments: # Add backtesting analysis subcommand analysis_cmd = subparsers.add_parser( - 'backtesting-analysis', - help='Backtest Analysis module.', - parents=[_common_parser] + "backtesting-analysis", help="Backtest Analysis module.", parents=[_common_parser] ) analysis_cmd.set_defaults(func=start_analysis_entries_exits) self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd) # Add edge subcommand edge_cmd = subparsers.add_parser( - 'edge', - help='Edge module.', - parents=[_common_parser, _strategy_parser] + "edge", help="Edge module.", parents=[_common_parser, _strategy_parser] ) edge_cmd.set_defaults(func=start_edge) self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd) # Add hyperopt subcommand hyperopt_cmd = subparsers.add_parser( - 'hyperopt', - help='Hyperopt module.', + "hyperopt", + help="Hyperopt module.", parents=[_common_parser, _strategy_parser], ) hyperopt_cmd.set_defaults(func=start_hyperopt) @@ -353,8 +497,8 @@ class Arguments: # Add hyperopt-list subcommand hyperopt_list_cmd = subparsers.add_parser( - 'hyperopt-list', - help='List Hyperopt results', + "hyperopt-list", + help="List Hyperopt results", parents=[_common_parser], ) hyperopt_list_cmd.set_defaults(func=start_hyperopt_list) @@ -362,8 +506,8 @@ class Arguments: # Add hyperopt-show subcommand hyperopt_show_cmd = subparsers.add_parser( - 'hyperopt-show', - help='Show details of Hyperopt results', + "hyperopt-show", + help="Show details of Hyperopt results", parents=[_common_parser], ) hyperopt_show_cmd.set_defaults(func=start_hyperopt_show) @@ -371,8 +515,8 @@ class Arguments: # Add list-exchanges subcommand list_exchanges_cmd = subparsers.add_parser( - 'list-exchanges', - help='Print available exchanges.', + "list-exchanges", + help="Print available exchanges.", parents=[_common_parser], ) list_exchanges_cmd.set_defaults(func=start_list_exchanges) @@ -380,8 +524,8 @@ class Arguments: # Add list-markets subcommand list_markets_cmd = subparsers.add_parser( - 'list-markets', - help='Print markets on exchange.', + "list-markets", + help="Print markets on exchange.", parents=[_common_parser], ) list_markets_cmd.set_defaults(func=partial(start_list_markets, pairs_only=False)) @@ -389,8 +533,8 @@ class Arguments: # Add list-pairs subcommand list_pairs_cmd = subparsers.add_parser( - 'list-pairs', - help='Print pairs on exchange.', + "list-pairs", + help="Print pairs on exchange.", parents=[_common_parser], ) list_pairs_cmd.set_defaults(func=partial(start_list_markets, pairs_only=True)) @@ -398,8 +542,8 @@ class Arguments: # Add list-strategies subcommand list_strategies_cmd = subparsers.add_parser( - 'list-strategies', - help='Print available strategies.', + "list-strategies", + help="Print available strategies.", parents=[_common_parser], ) list_strategies_cmd.set_defaults(func=start_list_strategies) @@ -407,8 +551,8 @@ class Arguments: # Add list-freqAI Models subcommand list_freqaimodels_cmd = subparsers.add_parser( - 'list-freqaimodels', - help='Print available freqAI models.', + "list-freqaimodels", + help="Print available freqAI models.", parents=[_common_parser], ) list_freqaimodels_cmd.set_defaults(func=start_list_freqAI_models) @@ -416,8 +560,8 @@ class Arguments: # Add list-timeframes subcommand list_timeframes_cmd = subparsers.add_parser( - 'list-timeframes', - help='Print available timeframes for the exchange.', + "list-timeframes", + help="Print available timeframes for the exchange.", parents=[_common_parser], ) list_timeframes_cmd.set_defaults(func=start_list_timeframes) @@ -425,8 +569,8 @@ class Arguments: # Add show-trades subcommand show_trades = subparsers.add_parser( - 'show-trades', - help='Show trades.', + "show-trades", + help="Show trades.", parents=[_common_parser], ) show_trades.set_defaults(func=start_show_trades) @@ -434,8 +578,8 @@ class Arguments: # Add test-pairlist subcommand test_pairlist_cmd = subparsers.add_parser( - 'test-pairlist', - help='Test your pairlist configuration.', + "test-pairlist", + help="Test your pairlist configuration.", ) test_pairlist_cmd.set_defaults(func=start_test_pairlist) self._build_args(optionlist=ARGS_TEST_PAIRLIST, parser=test_pairlist_cmd) @@ -450,16 +594,16 @@ class Arguments: # Add install-ui subcommand install_ui_cmd = subparsers.add_parser( - 'install-ui', - help='Install FreqUI', + "install-ui", + help="Install FreqUI", ) install_ui_cmd.set_defaults(func=start_install_ui) self._build_args(optionlist=ARGS_INSTALL_UI, parser=install_ui_cmd) # Add Plotting subcommand plot_dataframe_cmd = subparsers.add_parser( - 'plot-dataframe', - help='Plot candles with indicators.', + "plot-dataframe", + help="Plot candles with indicators.", parents=[_common_parser, _strategy_parser], ) plot_dataframe_cmd.set_defaults(func=start_plot_dataframe) @@ -467,8 +611,8 @@ class Arguments: # Plot profit plot_profit_cmd = subparsers.add_parser( - 'plot-profit', - help='Generate plot showing profits.', + "plot-profit", + help="Generate plot showing profits.", parents=[_common_parser, _strategy_parser], ) plot_profit_cmd.set_defaults(func=start_plot_profit) @@ -476,40 +620,36 @@ class Arguments: # Add webserver subcommand webserver_cmd = subparsers.add_parser( - 'webserver', - help='Webserver module.', - parents=[_common_parser] + "webserver", help="Webserver module.", parents=[_common_parser] ) webserver_cmd.set_defaults(func=start_webserver) self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd) # Add strategy_updater subcommand strategy_updater_cmd = subparsers.add_parser( - 'strategy-updater', - help='updates outdated strategy files to the current version', - parents=[_common_parser] + "strategy-updater", + help="updates outdated strategy files to the current version", + parents=[_common_parser], ) strategy_updater_cmd.set_defaults(func=start_strategy_update) self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd) # Add lookahead_analysis subcommand lookahead_analayis_cmd = subparsers.add_parser( - 'lookahead-analysis', + "lookahead-analysis", help="Check for potential look ahead bias.", - parents=[_common_parser, _strategy_parser] + parents=[_common_parser, _strategy_parser], ) lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis) - self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, - parser=lookahead_analayis_cmd) + self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, parser=lookahead_analayis_cmd) # Add recursive_analysis subcommand recursive_analayis_cmd = subparsers.add_parser( - 'recursive-analysis', + "recursive-analysis", help="Check for potential recursive formula issue.", - parents=[_common_parser, _strategy_parser] + parents=[_common_parser, _strategy_parser], ) recursive_analayis_cmd.set_defaults(func=start_recursive_analysis) - self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS, - parser=recursive_analayis_cmd) + self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS, parser=recursive_analayis_cmd) diff --git a/freqtrade/commands/build_config_commands.py b/freqtrade/commands/build_config_commands.py index 63862c3bf..1e771a372 100644 --- a/freqtrade/commands/build_config_commands.py +++ b/freqtrade/commands/build_config_commands.py @@ -45,7 +45,7 @@ def ask_user_overwrite(config_path: Path) -> bool: }, ] answers = prompt(questions) - return answers['overwrite'] + return answers["overwrite"] def ask_user_config() -> Dict[str, Any]: @@ -65,7 +65,7 @@ def ask_user_config() -> Dict[str, Any]: "type": "text", "name": "stake_currency", "message": "Please insert your stake currency:", - "default": 'USDT', + "default": "USDT", }, { "type": "text", @@ -73,36 +73,38 @@ def ask_user_config() -> Dict[str, Any]: "message": f"Please insert your stake amount (Number or '{UNLIMITED_STAKE_AMOUNT}'):", "default": "unlimited", "validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val), - "filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"' - if val == UNLIMITED_STAKE_AMOUNT - else val + "filter": lambda val: ( + '"' + UNLIMITED_STAKE_AMOUNT + '"' if val == UNLIMITED_STAKE_AMOUNT else val + ), }, { "type": "text", "name": "max_open_trades", "message": "Please insert max_open_trades (Integer or -1 for unlimited open trades):", "default": "3", - "validate": lambda val: validate_is_int(val) + "validate": lambda val: validate_is_int(val), }, { "type": "select", "name": "timeframe_in_config", "message": "Time", - "choices": ["Have the strategy define timeframe.", "Override in configuration."] + "choices": ["Have the strategy define timeframe.", "Override in configuration."], }, { "type": "text", "name": "timeframe", "message": "Please insert your desired timeframe (e.g. 5m):", "default": "5m", - "when": lambda x: x["timeframe_in_config"] == 'Override in configuration.' - + "when": lambda x: x["timeframe_in_config"] == "Override in configuration.", }, { "type": "text", "name": "fiat_display_currency", - "message": "Please insert your display Currency (for reporting):", - "default": 'USD', + "message": ( + "Please insert your display Currency for reporting " + "(leave empty to disable FIAT conversion):" + ), + "default": "USD", }, { "type": "select", @@ -111,6 +113,7 @@ def ask_user_config() -> Dict[str, Any]: "choices": [ "binance", "binanceus", + "bingx", "gate", "htx", "kraken", @@ -125,33 +128,33 @@ def ask_user_config() -> Dict[str, Any]: "name": "trading_mode", "message": "Do you want to trade Perpetual Swaps (perpetual futures)?", "default": False, - "filter": lambda val: 'futures' if val else 'spot', - "when": lambda x: x["exchange_name"] in ['binance', 'gate', 'okx'], + "filter": lambda val: "futures" if val else "spot", + "when": lambda x: x["exchange_name"] in ["binance", "gate", "okx", "bybit"], }, { "type": "autocomplete", "name": "exchange_name", "message": "Type your exchange name (Must be supported by ccxt)", "choices": available_exchanges(), - "when": lambda x: x["exchange_name"] == 'other' + "when": lambda x: x["exchange_name"] == "other", }, { "type": "password", "name": "exchange_key", "message": "Insert Exchange Key", - "when": lambda x: not x['dry_run'] + "when": lambda x: not x["dry_run"], }, { "type": "password", "name": "exchange_secret", "message": "Insert Exchange Secret", - "when": lambda x: not x['dry_run'] + "when": lambda x: not x["dry_run"], }, { "type": "password", "name": "exchange_key_password", "message": "Insert Exchange API Key password", - "when": lambda x: not x['dry_run'] and x['exchange_name'] in ('kucoin', 'okx') + "when": lambda x: not x["dry_run"] and x["exchange_name"] in ("kucoin", "okx"), }, { "type": "confirm", @@ -163,13 +166,13 @@ def ask_user_config() -> Dict[str, Any]: "type": "password", "name": "telegram_token", "message": "Insert Telegram token", - "when": lambda x: x['telegram'] + "when": lambda x: x["telegram"], }, { "type": "password", "name": "telegram_chat_id", "message": "Insert Telegram chat id", - "when": lambda x: x['telegram'] + "when": lambda x: x["telegram"], }, { "type": "confirm", @@ -180,23 +183,25 @@ def ask_user_config() -> Dict[str, Any]: { "type": "text", "name": "api_server_listen_addr", - "message": ("Insert Api server Listen Address (0.0.0.0 for docker, " - "otherwise best left untouched)"), + "message": ( + "Insert Api server Listen Address (0.0.0.0 for docker, " + "otherwise best left untouched)" + ), "default": "127.0.0.1" if not running_in_docker() else "0.0.0.0", - "when": lambda x: x['api_server'] + "when": lambda x: x["api_server"], }, { "type": "text", "name": "api_server_username", "message": "Insert api-server username", "default": "freqtrader", - "when": lambda x: x['api_server'] + "when": lambda x: x["api_server"], }, { "type": "password", "name": "api_server_password", "message": "Insert api-server password", - "when": lambda x: x['api_server'] + "when": lambda x: x["api_server"], }, ] answers = prompt(questions) @@ -205,15 +210,11 @@ def ask_user_config() -> Dict[str, Any]: # Interrupted questionary sessions return an empty dict. raise OperationalException("User interrupted interactive questions.") # Ensure default is set for non-futures exchanges - answers['trading_mode'] = answers.get('trading_mode', "spot") - answers['margin_mode'] = ( - 'isolated' - if answers.get('trading_mode') == 'futures' - else '' - ) + answers["trading_mode"] = answers.get("trading_mode", "spot") + answers["margin_mode"] = "isolated" if answers.get("trading_mode") == "futures" else "" # Force JWT token to be a random string - answers['api_server_jwt_key'] = secrets.token_hex() - answers['api_server_ws_token'] = secrets.token_urlsafe(25) + answers["api_server_jwt_key"] = secrets.token_hex() + answers["api_server_ws_token"] = secrets.token_urlsafe(25) return answers @@ -225,26 +226,26 @@ def deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None: :param selections: Dict containing selections taken by the user. """ from jinja2.exceptions import TemplateNotFound + try: exchange_template = MAP_EXCHANGE_CHILDCLASS.get( - selections['exchange_name'], selections['exchange_name']) + selections["exchange_name"], selections["exchange_name"] + ) - selections['exchange'] = render_template( - templatefile=f"subtemplates/exchange_{exchange_template}.j2", - arguments=selections + selections["exchange"] = render_template( + templatefile=f"subtemplates/exchange_{exchange_template}.j2", arguments=selections ) except TemplateNotFound: - selections['exchange'] = render_template( - templatefile="subtemplates/exchange_generic.j2", - arguments=selections + selections["exchange"] = render_template( + templatefile="subtemplates/exchange_generic.j2", arguments=selections ) - config_text = render_template(templatefile='base_config.json.j2', - arguments=selections) + config_text = render_template(templatefile="base_config.json.j2", arguments=selections) logger.info(f"Writing config to `{config_path}`.") logger.info( - "Please make sure to check the configuration contents and adjust settings to your needs.") + "Please make sure to check the configuration contents and adjust settings to your needs." + ) config_path.write_text(config_text) @@ -255,7 +256,7 @@ def start_new_config(args: Dict[str, Any]) -> None: Asking the user questions to fill out the template accordingly. """ - config_path = Path(args['config'][0]) + config_path = Path(args["config"][0]) chown_user_directory(config_path.parent) if config_path.exists(): overwrite = ask_user_overwrite(config_path) @@ -264,22 +265,22 @@ def start_new_config(args: Dict[str, Any]) -> None: else: raise OperationalException( f"Configuration file `{config_path}` already exists. " - "Please delete it or use a different configuration file name.") + "Please delete it or use a different configuration file name." + ) selections = ask_user_config() deploy_new_config(config_path, selections) def start_show_config(args: Dict[str, Any]) -> None: - config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE, set_dry=False) # TODO: Sanitize from sensitive info before printing print("Your combined configuration is:") config_sanitized = sanitize_config( - config['original_config'], - show_sensitive=args.get('show_sensitive', False) + config["original_config"], show_sensitive=args.get("show_sensitive", False) ) from rich import print_json + print_json(data=config_sanitized) diff --git a/freqtrade/commands/cli_options.py b/freqtrade/commands/cli_options.py index 287ab2594..b9236a0ab 100755 --- a/freqtrade/commands/cli_options.py +++ b/freqtrade/commands/cli_options.py @@ -1,6 +1,7 @@ """ Definition of cli arguments used in arguments.py """ + from argparse import SUPPRESS, ArgumentTypeError from freqtrade import __version__, constants @@ -43,152 +44,165 @@ class Arg: AVAILABLE_CLI_OPTIONS = { # Common options "verbosity": Arg( - '-v', '--verbose', - help='Verbose mode (-vv for more, -vvv to get all messages).', - action='count', + "-v", + "--verbose", + help="Verbose mode (-vv for more, -vvv to get all messages).", + action="count", default=0, ), "logfile": Arg( - '--logfile', '--log-file', + "--logfile", + "--log-file", help="Log to the file specified. Special values are: 'syslog', 'journald'. " - "See the documentation for more details.", - metavar='FILE', + "See the documentation for more details.", + metavar="FILE", ), "version": Arg( - '-V', '--version', - action='version', - version=f'%(prog)s {__version__}', + "-V", + "--version", + action="version", + version=f"%(prog)s {__version__}", ), "config": Arg( - '-c', '--config', - help=f'Specify configuration file (default: `userdir/{constants.DEFAULT_CONFIG}` ' - f'or `config.json` whichever exists). ' - f'Multiple --config options may be used. ' - f'Can be set to `-` to read config from stdin.', - action='append', - metavar='PATH', + "-c", + "--config", + help=f"Specify configuration file (default: `userdir/{constants.DEFAULT_CONFIG}` " + f"or `config.json` whichever exists). " + f"Multiple --config options may be used. " + f"Can be set to `-` to read config from stdin.", + action="append", + metavar="PATH", ), "datadir": Arg( - '-d', '--datadir', '--data-dir', - help='Path to directory with historical backtesting data.', - metavar='PATH', + "-d", + "--datadir", + "--data-dir", + help="Path to directory with historical backtesting data.", + metavar="PATH", ), "user_data_dir": Arg( - '--userdir', '--user-data-dir', - help='Path to userdata directory.', - metavar='PATH', + "--userdir", + "--user-data-dir", + help="Path to userdata directory.", + metavar="PATH", ), "reset": Arg( - '--reset', - help='Reset sample files to their original state.', - action='store_true', + "--reset", + help="Reset sample files to their original state.", + action="store_true", ), "recursive_strategy_search": Arg( - '--recursive-strategy-search', - help='Recursively search for a strategy in the strategies folder.', - action='store_true', + "--recursive-strategy-search", + help="Recursively search for a strategy in the strategies folder.", + action="store_true", ), # Main options "strategy": Arg( - '-s', '--strategy', - help='Specify strategy class name which will be used by the bot.', - metavar='NAME', + "-s", + "--strategy", + help="Specify strategy class name which will be used by the bot.", + metavar="NAME", ), "strategy_path": Arg( - '--strategy-path', - help='Specify additional strategy lookup path.', - metavar='PATH', + "--strategy-path", + help="Specify additional strategy lookup path.", + metavar="PATH", ), "db_url": Arg( - '--db-url', - help=f'Override trades database URL, this is useful in custom deployments ' - f'(default: `{constants.DEFAULT_DB_PROD_URL}` for Live Run mode, ' - f'`{constants.DEFAULT_DB_DRYRUN_URL}` for Dry Run).', - metavar='PATH', + "--db-url", + help=f"Override trades database URL, this is useful in custom deployments " + f"(default: `{constants.DEFAULT_DB_PROD_URL}` for Live Run mode, " + f"`{constants.DEFAULT_DB_DRYRUN_URL}` for Dry Run).", + metavar="PATH", ), "db_url_from": Arg( - '--db-url-from', - help='Source db url to use when migrating a database.', - metavar='PATH', + "--db-url-from", + help="Source db url to use when migrating a database.", + metavar="PATH", ), "sd_notify": Arg( - '--sd-notify', - help='Notify systemd service manager.', - action='store_true', + "--sd-notify", + help="Notify systemd service manager.", + action="store_true", ), "dry_run": Arg( - '--dry-run', - help='Enforce dry-run for trading (removes Exchange secrets and simulates trades).', - action='store_true', + "--dry-run", + help="Enforce dry-run for trading (removes Exchange secrets and simulates trades).", + action="store_true", ), "dry_run_wallet": Arg( - '--dry-run-wallet', '--starting-balance', - help='Starting balance, used for backtesting / hyperopt and dry-runs.', + "--dry-run-wallet", + "--starting-balance", + help="Starting balance, used for backtesting / hyperopt and dry-runs.", type=float, ), # Optimize common "timeframe": Arg( - '-i', '--timeframe', - help='Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).', + "-i", + "--timeframe", + help="Specify timeframe (`1m`, `5m`, `30m`, `1h`, `1d`).", ), "timerange": Arg( - '--timerange', - help='Specify what timerange of data to use.', + "--timerange", + help="Specify what timerange of data to use.", ), "max_open_trades": Arg( - '--max-open-trades', - help='Override the value of the `max_open_trades` configuration setting.', + "--max-open-trades", + help="Override the value of the `max_open_trades` configuration setting.", type=int, - metavar='INT', + metavar="INT", ), "stake_amount": Arg( - '--stake-amount', - help='Override the value of the `stake_amount` configuration setting.', + "--stake-amount", + help="Override the value of the `stake_amount` configuration setting.", ), # Backtesting "timeframe_detail": Arg( - '--timeframe-detail', - help='Specify detail timeframe for backtesting (`1m`, `5m`, `30m`, `1h`, `1d`).', + "--timeframe-detail", + help="Specify detail timeframe for backtesting (`1m`, `5m`, `30m`, `1h`, `1d`).", ), "position_stacking": Arg( - '--eps', '--enable-position-stacking', - help='Allow buying the same pair multiple times (position stacking).', - action='store_true', + "--eps", + "--enable-position-stacking", + help="Allow buying the same pair multiple times (position stacking).", + action="store_true", default=False, ), "use_max_market_positions": Arg( - '--dmmp', '--disable-max-market-positions', - help='Disable applying `max_open_trades` during backtest ' - '(same as setting `max_open_trades` to a very high number).', - action='store_false', + "--dmmp", + "--disable-max-market-positions", + help="Disable applying `max_open_trades` during backtest " + "(same as setting `max_open_trades` to a very high number).", + action="store_false", default=True, ), "backtest_show_pair_list": Arg( - '--show-pair-list', - help='Show backtesting pairlist sorted by profit.', - action='store_true', + "--show-pair-list", + help="Show backtesting pairlist sorted by profit.", + action="store_true", default=False, ), "enable_protections": Arg( - '--enable-protections', '--enableprotections', - help='Enable protections for backtesting.' - 'Will slow backtesting down by a considerable amount, but will include ' - 'configured protections', - action='store_true', + "--enable-protections", + "--enableprotections", + help="Enable protections for backtesting." + "Will slow backtesting down by a considerable amount, but will include " + "configured protections", + action="store_true", default=False, ), "strategy_list": Arg( - '--strategy-list', - help='Provide a space-separated list of strategies to backtest. ' - 'Please note that timeframe needs to be set either in config ' - 'or via command line. When using this together with `--export trades`, ' - 'the strategy-name is injected into the filename ' - '(so `backtest-data.json` becomes `backtest-data-SampleStrategy.json`', - nargs='+', + "--strategy-list", + help="Provide a space-separated list of strategies to backtest. " + "Please note that timeframe needs to be set either in config " + "or via command line. When using this together with `--export trades`, " + "the strategy-name is injected into the filename " + "(so `backtest-data.json` becomes `backtest-data-SampleStrategy.json`", + nargs="+", ), "export": Arg( - '--export', - help='Export backtest results (default: trades).', + "--export", + help="Export backtest results (default: trades).", choices=constants.EXPORT_OPTIONS, ), "exportfilename": Arg( @@ -200,526 +214,563 @@ AVAILABLE_CLI_OPTIONS = { metavar="PATH", ), "disableparamexport": Arg( - '--disable-param-export', + "--disable-param-export", help="Disable automatic hyperopt parameter export.", - action='store_true', + action="store_true", ), "fee": Arg( - '--fee', - help='Specify fee ratio. Will be applied twice (on trade entry and exit).', + "--fee", + help="Specify fee ratio. Will be applied twice (on trade entry and exit).", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "backtest_breakdown": Arg( - '--breakdown', - help='Show backtesting breakdown per [day, week, month].', - nargs='+', - choices=constants.BACKTEST_BREAKDOWNS + "--breakdown", + help="Show backtesting breakdown per [day, week, month].", + nargs="+", + choices=constants.BACKTEST_BREAKDOWNS, ), "backtest_cache": Arg( - '--cache', - help='Load a cached backtest result no older than specified age (default: %(default)s).', + "--cache", + help="Load a cached backtest result no older than specified age (default: %(default)s).", default=constants.BACKTEST_CACHE_DEFAULT, choices=constants.BACKTEST_CACHE_AGE, ), # Edge "stoploss_range": Arg( - '--stoplosses', - help='Defines a range of stoploss values against which edge will assess the strategy. ' + "--stoplosses", + help="Defines a range of stoploss values against which edge will assess the strategy. " 'The format is "min,max,step" (without any space). ' - 'Example: `--stoplosses=-0.01,-0.1,-0.001`', + "Example: `--stoplosses=-0.01,-0.1,-0.001`", ), # Hyperopt "hyperopt": Arg( - '--hyperopt', + "--hyperopt", help=SUPPRESS, - metavar='NAME', + metavar="NAME", required=False, ), "hyperopt_path": Arg( - '--hyperopt-path', - help='Specify additional lookup path for Hyperopt Loss functions.', - metavar='PATH', + "--hyperopt-path", + help="Specify additional lookup path for Hyperopt Loss functions.", + metavar="PATH", ), "epochs": Arg( - '-e', '--epochs', - help='Specify number of epochs (default: %(default)d).', + "-e", + "--epochs", + help="Specify number of epochs (default: %(default)d).", type=check_int_positive, - metavar='INT', + metavar="INT", default=constants.HYPEROPT_EPOCH, ), "spaces": Arg( - '--spaces', - help='Specify which parameters to hyperopt. Space-separated list.', - choices=['all', 'buy', 'sell', 'roi', 'stoploss', - 'trailing', 'protection', 'trades', 'default'], - nargs='+', - default='default', + "--spaces", + help="Specify which parameters to hyperopt. Space-separated list.", + choices=[ + "all", + "buy", + "sell", + "roi", + "stoploss", + "trailing", + "protection", + "trades", + "default", + ], + nargs="+", + default="default", ), "analyze_per_epoch": Arg( - '--analyze-per-epoch', - help='Run populate_indicators once per epoch.', - action='store_true', + "--analyze-per-epoch", + help="Run populate_indicators once per epoch.", + action="store_true", default=False, ), - "print_all": Arg( - '--print-all', - help='Print all results, not only the best ones.', - action='store_true', + "--print-all", + help="Print all results, not only the best ones.", + action="store_true", default=False, ), "print_colorized": Arg( - '--no-color', - help='Disable colorization of hyperopt results. May be useful if you are ' - 'redirecting output to a file.', - action='store_false', + "--no-color", + help="Disable colorization of hyperopt results. May be useful if you are " + "redirecting output to a file.", + action="store_false", default=True, ), "print_json": Arg( - '--print-json', - help='Print output in JSON format.', - action='store_true', + "--print-json", + help="Print output in JSON format.", + action="store_true", default=False, ), "export_csv": Arg( - '--export-csv', - help='Export to CSV-File.' - ' This will disable table print.' - ' Example: --export-csv hyperopt.csv', - metavar='FILE', + "--export-csv", + help="Export to CSV-File." + " This will disable table print." + " Example: --export-csv hyperopt.csv", + metavar="FILE", ), "hyperopt_jobs": Arg( - '-j', '--job-workers', - help='The number of concurrently running jobs for hyperoptimization ' - '(hyperopt worker processes). ' - 'If -1 (default), all CPUs are used, for -2, all CPUs but one are used, etc. ' - 'If 1 is given, no parallel computing code is used at all.', + "-j", + "--job-workers", + help="The number of concurrently running jobs for hyperoptimization " + "(hyperopt worker processes). " + "If -1 (default), all CPUs are used, for -2, all CPUs but one are used, etc. " + "If 1 is given, no parallel computing code is used at all.", type=int, - metavar='JOBS', + metavar="JOBS", default=-1, ), "hyperopt_random_state": Arg( - '--random-state', - help='Set random state to some positive integer for reproducible hyperopt results.', + "--random-state", + help="Set random state to some positive integer for reproducible hyperopt results.", type=check_int_positive, - metavar='INT', + metavar="INT", ), "hyperopt_min_trades": Arg( - '--min-trades', + "--min-trades", help="Set minimal desired number of trades for evaluations in the hyperopt " "optimization path (default: 1).", type=check_int_positive, - metavar='INT', + metavar="INT", default=1, ), "hyperopt_loss": Arg( - '--hyperopt-loss', '--hyperoptloss', - help='Specify the class name of the hyperopt loss function class (IHyperOptLoss). ' - 'Different functions can generate completely different results, ' - 'since the target for optimization is different. Built-in Hyperopt-loss-functions are: ' + "--hyperopt-loss", + "--hyperoptloss", + help="Specify the class name of the hyperopt loss function class (IHyperOptLoss). " + "Different functions can generate completely different results, " + "since the target for optimization is different. Built-in Hyperopt-loss-functions are: " f'{", ".join(HYPEROPT_LOSS_BUILTIN)}', - metavar='NAME', + metavar="NAME", ), "hyperoptexportfilename": Arg( - '--hyperopt-filename', - help='Hyperopt result filename.' - 'Example: `--hyperopt-filename=hyperopt_results_2020-09-27_16-20-48.pickle`', - metavar='FILENAME', + "--hyperopt-filename", + help="Hyperopt result filename." + "Example: `--hyperopt-filename=hyperopt_results_2020-09-27_16-20-48.pickle`", + metavar="FILENAME", ), # List exchanges "print_one_column": Arg( - '-1', '--one-column', - help='Print output in one column.', - action='store_true', + "-1", + "--one-column", + help="Print output in one column.", + action="store_true", ), "list_exchanges_all": Arg( - '-a', '--all', - help='Print all exchanges known to the ccxt library.', - action='store_true', + "-a", + "--all", + help="Print all exchanges known to the ccxt library.", + action="store_true", ), # List pairs / markets "list_pairs_all": Arg( - '-a', '--all', - help='Print all pairs or market symbols. By default only active ' - 'ones are shown.', - action='store_true', + "-a", + "--all", + help="Print all pairs or market symbols. By default only active ones are shown.", + action="store_true", ), "print_list": Arg( - '--print-list', - help='Print list of pairs or market symbols. By default data is ' - 'printed in the tabular format.', - action='store_true', + "--print-list", + help="Print list of pairs or market symbols. By default data is " + "printed in the tabular format.", + action="store_true", ), "list_pairs_print_json": Arg( - '--print-json', - help='Print list of pairs or market symbols in JSON format.', - action='store_true', + "--print-json", + help="Print list of pairs or market symbols in JSON format.", + action="store_true", default=False, ), "print_csv": Arg( - '--print-csv', - help='Print exchange pair or market data in the csv format.', - action='store_true', + "--print-csv", + help="Print exchange pair or market data in the csv format.", + action="store_true", ), "quote_currencies": Arg( - '--quote', - help='Specify quote currency(-ies). Space-separated list.', - nargs='+', - metavar='QUOTE_CURRENCY', + "--quote", + help="Specify quote currency(-ies). Space-separated list.", + nargs="+", + metavar="QUOTE_CURRENCY", ), "base_currencies": Arg( - '--base', - help='Specify base currency(-ies). Space-separated list.', - nargs='+', - metavar='BASE_CURRENCY', + "--base", + help="Specify base currency(-ies). Space-separated list.", + nargs="+", + metavar="BASE_CURRENCY", ), "trading_mode": Arg( - '--trading-mode', '--tradingmode', - help='Select Trading mode', + "--trading-mode", + "--tradingmode", + help="Select Trading mode", choices=constants.TRADING_MODES, ), "candle_types": Arg( - '--candle-types', - help='Select candle type to convert. Defaults to all available types.', + "--candle-types", + help="Select candle type to convert. Defaults to all available types.", choices=[c.value for c in CandleType], - nargs='+', + nargs="+", ), # Script options "pairs": Arg( - '-p', '--pairs', - help='Limit command to these pairs. Pairs are space-separated.', - nargs='+', + "-p", + "--pairs", + help="Limit command to these pairs. Pairs are space-separated.", + nargs="+", ), # Download data "pairs_file": Arg( - '--pairs-file', - help='File containing a list of pairs. ' - 'Takes precedence over --pairs or pairs configured in the configuration.', - metavar='FILE', + "--pairs-file", + help="File containing a list of pairs. " + "Takes precedence over --pairs or pairs configured in the configuration.", + metavar="FILE", ), "days": Arg( - '--days', - help='Download data for given number of days.', + "--days", + help="Download data for given number of days.", type=check_int_positive, - metavar='INT', + metavar="INT", ), "include_inactive": Arg( - '--include-inactive-pairs', - help='Also download data from inactive pairs.', - action='store_true', + "--include-inactive-pairs", + help="Also download data from inactive pairs.", + action="store_true", ), "new_pairs_days": Arg( - '--new-pairs-days', - help='Download data of new pairs for given number of days. Default: `%(default)s`.', + "--new-pairs-days", + help="Download data of new pairs for given number of days. Default: `%(default)s`.", type=check_int_positive, - metavar='INT', + metavar="INT", ), "download_trades": Arg( - '--dl-trades', - help='Download trades instead of OHLCV data. The bot will resample trades to the ' - 'desired timeframe as specified as --timeframes/-t.', - action='store_true', + "--dl-trades", + help="Download trades instead of OHLCV data. The bot will resample trades to the " + "desired timeframe as specified as --timeframes/-t.", + action="store_true", + ), + "convert_trades": Arg( + "--convert", + help="Convert downloaded trades to OHLCV data. Only applicable in combination with " + "`--dl-trades`. " + "Will be automatic for exchanges which don't have historic OHLCV (e.g. Kraken). " + "If not provided, use `trades-to-ohlcv` to convert trades data to OHLCV data.", + action="store_true", ), "format_from_trades": Arg( - '--format-from', - help='Source format for data conversion.', - choices=constants.AVAILABLE_DATAHANDLERS + ['kraken_csv'], + "--format-from", + help="Source format for data conversion.", + choices=constants.AVAILABLE_DATAHANDLERS + ["kraken_csv"], required=True, ), "format_from": Arg( - '--format-from', - help='Source format for data conversion.', + "--format-from", + help="Source format for data conversion.", choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), "format_to": Arg( - '--format-to', - help='Destination format for data conversion.', + "--format-to", + help="Destination format for data conversion.", choices=constants.AVAILABLE_DATAHANDLERS, required=True, ), "dataformat_ohlcv": Arg( - '--data-format-ohlcv', - help='Storage format for downloaded candle (OHLCV) data. (default: `feather`).', + "--data-format-ohlcv", + help="Storage format for downloaded candle (OHLCV) data. (default: `feather`).", choices=constants.AVAILABLE_DATAHANDLERS, ), "dataformat_trades": Arg( - '--data-format-trades', - help='Storage format for downloaded trades data. (default: `feather`).', + "--data-format-trades", + help="Storage format for downloaded trades data. (default: `feather`).", choices=constants.AVAILABLE_DATAHANDLERS, ), "show_timerange": Arg( - '--show-timerange', - help='Show timerange available for available data. (May take a while to calculate).', - action='store_true', + "--show-timerange", + help="Show timerange available for available data. (May take a while to calculate).", + action="store_true", ), "exchange": Arg( - '--exchange', - help='Exchange name. Only valid if no config is provided.', + "--exchange", + help="Exchange name. Only valid if no config is provided.", ), "timeframes": Arg( - '-t', '--timeframes', - help='Specify which tickers to download. Space-separated list. ' - 'Default: `1m 5m`.', - nargs='+', + "-t", + "--timeframes", + help="Specify which tickers to download. Space-separated list. Default: `1m 5m`.", + nargs="+", ), "prepend_data": Arg( - '--prepend', - help='Allow data prepending. (Data-appending is disabled)', - action='store_true', + "--prepend", + help="Allow data prepending. (Data-appending is disabled)", + action="store_true", ), "erase": Arg( - '--erase', - help='Clean all existing data for the selected exchange/pairs/timeframes.', - action='store_true', + "--erase", + help="Clean all existing data for the selected exchange/pairs/timeframes.", + action="store_true", ), "erase_ui_only": Arg( - '--erase', + "--erase", help="Clean UI folder, don't download new version.", - action='store_true', + action="store_true", default=False, ), "ui_version": Arg( - '--ui-version', - help=('Specify a specific version of FreqUI to install. ' - 'Not specifying this installs the latest version.'), + "--ui-version", + help=( + "Specify a specific version of FreqUI to install. " + "Not specifying this installs the latest version." + ), type=str, ), # Templating options "template": Arg( - '--template', - help='Use a template which is either `minimal`, ' - '`full` (containing multiple sample indicators) or `advanced`. Default: `%(default)s`.', - choices=['full', 'minimal', 'advanced'], - default='full', + "--template", + help="Use a template which is either `minimal`, " + "`full` (containing multiple sample indicators) or `advanced`. Default: `%(default)s`.", + choices=["full", "minimal", "advanced"], + default="full", ), # Plot dataframe "indicators1": Arg( - '--indicators1', - help='Set indicators from your strategy you want in the first row of the graph. ' + "--indicators1", + help="Set indicators from your strategy you want in the first row of the graph. " "Space-separated list. Example: `ema3 ema5`. Default: `['sma', 'ema3', 'ema5']`.", - nargs='+', + nargs="+", ), "indicators2": Arg( - '--indicators2', - help='Set indicators from your strategy you want in the third row of the graph. ' + "--indicators2", + help="Set indicators from your strategy you want in the third row of the graph. " "Space-separated list. Example: `fastd fastk`. Default: `['macd', 'macdsignal']`.", - nargs='+', + nargs="+", ), "plot_limit": Arg( - '--plot-limit', - help='Specify tick limit for plotting. Notice: too high values cause huge files. ' - 'Default: %(default)s.', + "--plot-limit", + help="Specify tick limit for plotting. Notice: too high values cause huge files. " + "Default: %(default)s.", type=check_int_positive, - metavar='INT', + metavar="INT", default=750, ), "plot_auto_open": Arg( - '--auto-open', - help='Automatically open generated plot.', - action='store_true', + "--auto-open", + help="Automatically open generated plot.", + action="store_true", ), "no_trades": Arg( - '--no-trades', - help='Skip using trades from backtesting file and DB.', - action='store_true', + "--no-trades", + help="Skip using trades from backtesting file and DB.", + action="store_true", ), "trade_source": Arg( - '--trade-source', - help='Specify the source for trades (Can be DB or file (backtest file)) ' - 'Default: %(default)s', + "--trade-source", + help="Specify the source for trades (Can be DB or file (backtest file)) " + "Default: %(default)s", choices=["DB", "file"], default="file", ), "trade_ids": Arg( - '--trade-ids', - help='Specify the list of trade ids.', - nargs='+', + "--trade-ids", + help="Specify the list of trade ids.", + nargs="+", ), # hyperopt-list, hyperopt-show "hyperopt_list_profitable": Arg( - '--profitable', - help='Select only profitable epochs.', - action='store_true', + "--profitable", + help="Select only profitable epochs.", + action="store_true", ), "hyperopt_list_best": Arg( - '--best', - help='Select only best epochs.', - action='store_true', + "--best", + help="Select only best epochs.", + action="store_true", ), "hyperopt_list_min_trades": Arg( - '--min-trades', - help='Select epochs with more than INT trades.', + "--min-trades", + help="Select epochs with more than INT trades.", type=check_int_positive, - metavar='INT', + metavar="INT", ), "hyperopt_list_max_trades": Arg( - '--max-trades', - help='Select epochs with less than INT trades.', + "--max-trades", + help="Select epochs with less than INT trades.", type=check_int_positive, - metavar='INT', + metavar="INT", ), "hyperopt_list_min_avg_time": Arg( - '--min-avg-time', - help='Select epochs above average time.', + "--min-avg-time", + help="Select epochs above average time.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_max_avg_time": Arg( - '--max-avg-time', - help='Select epochs below average time.', + "--max-avg-time", + help="Select epochs below average time.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_min_avg_profit": Arg( - '--min-avg-profit', - help='Select epochs above average profit.', + "--min-avg-profit", + help="Select epochs above average profit.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_max_avg_profit": Arg( - '--max-avg-profit', - help='Select epochs below average profit.', + "--max-avg-profit", + help="Select epochs below average profit.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_min_total_profit": Arg( - '--min-total-profit', - help='Select epochs above total profit.', + "--min-total-profit", + help="Select epochs above total profit.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_max_total_profit": Arg( - '--max-total-profit', - help='Select epochs below total profit.', + "--max-total-profit", + help="Select epochs below total profit.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_min_objective": Arg( - '--min-objective', - help='Select epochs above objective.', + "--min-objective", + help="Select epochs above objective.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_max_objective": Arg( - '--max-objective', - help='Select epochs below objective.', + "--max-objective", + help="Select epochs below objective.", type=float, - metavar='FLOAT', + metavar="FLOAT", ), "hyperopt_list_no_details": Arg( - '--no-details', - help='Do not print best epoch details.', - action='store_true', + "--no-details", + help="Do not print best epoch details.", + action="store_true", ), "hyperopt_show_index": Arg( - '-n', '--index', - help='Specify the index of the epoch to print details for.', + "-n", + "--index", + help="Specify the index of the epoch to print details for.", type=check_int_nonzero, - metavar='INT', + metavar="INT", ), "hyperopt_show_no_header": Arg( - '--no-header', - help='Do not print epoch details header.', - action='store_true', + "--no-header", + help="Do not print epoch details header.", + action="store_true", ), "hyperopt_ignore_missing_space": Arg( - "--ignore-missing-spaces", "--ignore-unparameterized-spaces", - help=("Suppress errors for any requested Hyperopt spaces " - "that do not contain any parameters."), + "--ignore-missing-spaces", + "--ignore-unparameterized-spaces", + help=( + "Suppress errors for any requested Hyperopt spaces " + "that do not contain any parameters." + ), action="store_true", ), "analysis_groups": Arg( "--analysis-groups", - help=("grouping output - " - "0: simple wins/losses by enter tag, " - "1: by enter_tag, " - "2: by enter_tag and exit_tag, " - "3: by pair and enter_tag, " - "4: by pair, enter_ and exit_tag (this can get quite large), " - "5: by exit_tag"), - nargs='+', + help=( + "grouping output - " + "0: simple wins/losses by enter tag, " + "1: by enter_tag, " + "2: by enter_tag and exit_tag, " + "3: by pair and enter_tag, " + "4: by pair, enter_ and exit_tag (this can get quite large), " + "5: by exit_tag" + ), + nargs="+", default=[], - choices=['0', '1', '2', '3', '4', '5'], + choices=["0", "1", "2", "3", "4", "5"], ), "enter_reason_list": Arg( "--enter-reason-list", - help=("Space separated list of entry signals to analyse. Default: all. " - "e.g. 'entry_tag_a entry_tag_b'"), - nargs='+', - default=['all'], + help=( + "Space separated list of entry signals to analyse. Default: all. " + "e.g. 'entry_tag_a entry_tag_b'" + ), + nargs="+", + default=["all"], ), "exit_reason_list": Arg( "--exit-reason-list", - help=("Space separated list of exit signals to analyse. Default: all. " - "e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'"), - nargs='+', - default=['all'], + help=( + "Space separated list of exit signals to analyse. Default: all. " + "e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'" + ), + nargs="+", + default=["all"], ), "indicator_list": Arg( "--indicator-list", - help=("Space separated list of indicators to analyse. " - "e.g. 'close rsi bb_lowerband profit_abs'"), - nargs='+', + help=( + "Space separated list of indicators to analyse. " + "e.g. 'close rsi bb_lowerband profit_abs'" + ), + nargs="+", default=[], ), "analysis_rejected": Arg( - '--rejected-signals', - help='Analyse rejected signals', - action='store_true', + "--rejected-signals", + help="Analyse rejected signals", + action="store_true", ), "analysis_to_csv": Arg( - '--analysis-to-csv', - help='Save selected analysis tables to individual CSVs', - action='store_true', + "--analysis-to-csv", + help="Save selected analysis tables to individual CSVs", + action="store_true", ), "analysis_csv_path": Arg( - '--analysis-csv-path', - help=("Specify a path to save the analysis CSVs " - "if --analysis-to-csv is enabled. Default: user_data/basktesting_results/"), + "--analysis-csv-path", + help=( + "Specify a path to save the analysis CSVs " + "if --analysis-to-csv is enabled. Default: user_data/basktesting_results/" + ), ), "freqaimodel": Arg( - '--freqaimodel', - help='Specify a custom freqaimodels.', - metavar='NAME', + "--freqaimodel", + help="Specify a custom freqaimodels.", + metavar="NAME", ), "freqaimodel_path": Arg( - '--freqaimodel-path', - help='Specify additional lookup path for freqaimodels.', - metavar='PATH', + "--freqaimodel-path", + help="Specify additional lookup path for freqaimodels.", + metavar="PATH", ), "freqai_backtest_live_models": Arg( - '--freqai-backtest-live-models', - help='Run backtest with ready models.', - action='store_true' + "--freqai-backtest-live-models", help="Run backtest with ready models.", action="store_true" ), "minimum_trade_amount": Arg( - '--minimum-trade-amount', - help='Minimum trade amount for lookahead-analysis', + "--minimum-trade-amount", + help="Minimum trade amount for lookahead-analysis", type=check_int_positive, - metavar='INT', + metavar="INT", ), "targeted_trade_amount": Arg( - '--targeted-trade-amount', - help='Targeted trade amount for lookahead analysis', + "--targeted-trade-amount", + help="Targeted trade amount for lookahead analysis", type=check_int_positive, - metavar='INT', + metavar="INT", ), "lookahead_analysis_exportfilename": Arg( - '--lookahead-analysis-exportfilename', + "--lookahead-analysis-exportfilename", help="Use this csv-filename to store lookahead-analysis-results", - type=str + type=str, ), "startup_candle": Arg( - '--startup-candle', - help='Specify startup candles to be checked (`199`, `499`, `999`, `1999`).', - nargs='+', + "--startup-candle", + help="Specify startup candles to be checked (`199`, `499`, `999`, `1999`).", + nargs="+", ), "show_sensitive": Arg( - '--show-sensitive', - help='Show secrets in the output.', - action='store_true', + "--show-sensitive", + help="Show secrets in the output.", + action="store_true", default=False, ), } diff --git a/freqtrade/commands/data_commands.py b/freqtrade/commands/data_commands.py index 6762a83da..92e60daa4 100644 --- a/freqtrade/commands/data_commands.py +++ b/freqtrade/commands/data_commands.py @@ -5,8 +5,11 @@ from typing import Any, Dict from freqtrade.configuration import TimeRange, setup_utils_configuration from freqtrade.constants import DATETIME_PRINT_FORMAT, DL_DATA_TIMEFRAMES, Config -from freqtrade.data.converter import (convert_ohlcv_format, convert_trades_format, - convert_trades_to_ohlcv) +from freqtrade.data.converter import ( + convert_ohlcv_format, + convert_trades_format, + convert_trades_to_ohlcv, +) from freqtrade.data.history import download_data_main from freqtrade.enums import CandleType, RunMode, TradingMode from freqtrade.exceptions import ConfigurationError @@ -20,14 +23,17 @@ logger = logging.getLogger(__name__) def _check_data_config_download_sanity(config: Config) -> None: - if 'days' in config and 'timerange' in config: - raise ConfigurationError("--days and --timerange are mutually exclusive. " - "You can only specify one or the other.") + if "days" in config and "timerange" in config: + raise ConfigurationError( + "--days and --timerange are mutually exclusive. " + "You can only specify one or the other." + ) - if 'pairs' not in config: + if "pairs" not in config: raise ConfigurationError( "Downloading data requires a list of pairs. " - "Please check the documentation on how to configure this.") + "Please check the documentation on how to configure this." + ) def start_download_data(args: Dict[str, Any]) -> None: @@ -46,38 +52,41 @@ def start_download_data(args: Dict[str, Any]) -> None: def start_convert_trades(args: Dict[str, Any]) -> None: - config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) timerange = TimeRange() # Remove stake-currency to skip checks which are not relevant for datadownload - config['stake_currency'] = '' + config["stake_currency"] = "" - if 'timeframes' not in config: - config['timeframes'] = DL_DATA_TIMEFRAMES + if "timeframes" not in config: + config["timeframes"] = DL_DATA_TIMEFRAMES # Init exchange exchange = ExchangeResolver.load_exchange(config, validate=False) # Manual validations of relevant settings - for timeframe in config['timeframes']: + for timeframe in config["timeframes"]: exchange.validate_timeframes(timeframe) available_pairs = [ - p for p in exchange.get_markets( - tradable_only=True, active_only=not config.get('include_inactive') - ).keys() + p + for p in exchange.get_markets( + tradable_only=True, active_only=not config.get("include_inactive") + ).keys() ] expanded_pairs = dynamic_expand_pairlist(config, available_pairs) # Convert downloaded trade data to different timeframes convert_trades_to_ohlcv( - pairs=expanded_pairs, timeframes=config['timeframes'], - datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')), - data_format_ohlcv=config['dataformat_ohlcv'], - data_format_trades=config['dataformat_trades'], - candle_type=config.get('candle_type_def', CandleType.SPOT) + pairs=expanded_pairs, + timeframes=config["timeframes"], + datadir=config["datadir"], + timerange=timerange, + erase=bool(config.get("erase")), + data_format_ohlcv=config["dataformat_ohlcv"], + data_format_trades=config["dataformat_trades"], + candle_type=config.get("candle_type_def", CandleType.SPOT), ) @@ -88,14 +97,19 @@ def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) if ohlcv: migrate_data(config) - convert_ohlcv_format(config, - convert_from=args['format_from'], - convert_to=args['format_to'], - erase=args['erase']) + convert_ohlcv_format( + config, + convert_from=args["format_from"], + convert_to=args["format_to"], + erase=args["erase"], + ) else: - convert_trades_format(config, - convert_from=args['format_from_trades'], convert_to=args['format_to'], - erase=args['erase']) + convert_trades_format( + config, + convert_from=args["format_from_trades"], + convert_to=args["format_to"], + erase=args["erase"], + ) def start_list_data(args: Dict[str, Any]) -> None: @@ -108,45 +122,59 @@ def start_list_data(args: Dict[str, Any]) -> None: from tabulate import tabulate from freqtrade.data.history import get_datahandler - dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv']) + + dhc = get_datahandler(config["datadir"], config["dataformat_ohlcv"]) paircombs = dhc.ohlcv_get_available_data( - config['datadir'], - config.get('trading_mode', TradingMode.SPOT) - ) + config["datadir"], config.get("trading_mode", TradingMode.SPOT) + ) - if args['pairs']: - paircombs = [comb for comb in paircombs if comb[0] in args['pairs']] + if args["pairs"]: + paircombs = [comb for comb in paircombs if comb[0] in args["pairs"]] print(f"Found {len(paircombs)} pair / timeframe combinations.") - if not config.get('show_timerange'): + if not config.get("show_timerange"): groupedpair = defaultdict(list) for pair, timeframe, candle_type in sorted( - paircombs, - key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]) + paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]) ): groupedpair[(pair, candle_type)].append(timeframe) if groupedpair: - print(tabulate([ - (pair, ', '.join(timeframes), candle_type) - for (pair, candle_type), timeframes in groupedpair.items() - ], - headers=("Pair", "Timeframe", "Type"), - tablefmt='psql', stralign='right')) + print( + tabulate( + [ + (pair, ", ".join(timeframes), candle_type) + for (pair, candle_type), timeframes in groupedpair.items() + ], + headers=("Pair", "Timeframe", "Type"), + tablefmt="psql", + stralign="right", + ) + ) else: - paircombs1 = [( - pair, timeframe, candle_type, - *dhc.ohlcv_data_min_max(pair, timeframe, candle_type) - ) for pair, timeframe, candle_type in paircombs] + paircombs1 = [ + (pair, timeframe, candle_type, *dhc.ohlcv_data_min_max(pair, timeframe, candle_type)) + for pair, timeframe, candle_type in paircombs + ] - print(tabulate([ - (pair, timeframe, candle_type, - start.strftime(DATETIME_PRINT_FORMAT), - end.strftime(DATETIME_PRINT_FORMAT), length) - for pair, timeframe, candle_type, start, end, length in sorted( - paircombs1, - key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])) - ], - headers=("Pair", "Timeframe", "Type", 'From', 'To', 'Candles'), - tablefmt='psql', stralign='right')) + print( + tabulate( + [ + ( + pair, + timeframe, + candle_type, + start.strftime(DATETIME_PRINT_FORMAT), + end.strftime(DATETIME_PRINT_FORMAT), + length, + ) + for pair, timeframe, candle_type, start, end, length in sorted( + paircombs1, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]) + ) + ], + headers=("Pair", "Timeframe", "Type", "From", "To", "Candles"), + tablefmt="psql", + stralign="right", + ) + ) diff --git a/freqtrade/commands/db_commands.py b/freqtrade/commands/db_commands.py index d83605c6f..98af38ca4 100644 --- a/freqtrade/commands/db_commands.py +++ b/freqtrade/commands/db_commands.py @@ -19,9 +19,9 @@ def start_convert_db(args: Dict[str, Any]) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) - init_db(config['db_url']) + init_db(config["db_url"]) session_target = Trade.session - init_db(config['db_url_from']) + init_db(config["db_url_from"]) logger.info("Starting db migration.") trade_count = 0 @@ -47,9 +47,11 @@ def start_convert_db(args: Dict[str, Any]) -> None: max_order_id = session_target.scalar(select(func.max(Order.id))) max_pairlock_id = session_target.scalar(select(func.max(PairLock.id))) - set_sequence_ids(session_target.get_bind(), - trade_id=max_trade_id, - order_id=max_order_id, - pairlock_id=max_pairlock_id) + set_sequence_ids( + session_target.get_bind(), + trade_id=max_trade_id, + order_id=max_order_id, + pairlock_id=max_pairlock_id, + ) logger.info(f"Migrated {trade_count} Trades, and {pairlock_count} Pairlocks.") diff --git a/freqtrade/commands/deploy_commands.py b/freqtrade/commands/deploy_commands.py index 8de600c9e..3a784bda9 100644 --- a/freqtrade/commands/deploy_commands.py +++ b/freqtrade/commands/deploy_commands.py @@ -38,7 +38,7 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st """ Deploy new strategy from template to strategy_path """ - fallback = 'full' + fallback = "full" attributes = render_template_with_fallback( templatefile=f"strategy_subtemplates/strategy_attributes_{subtemplate}.j2", templatefallbackfile=f"strategy_subtemplates/strategy_attributes_{fallback}.j2", @@ -64,33 +64,35 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st templatefallbackfile="strategy_subtemplates/strategy_methods_empty.j2", ) - strategy_text = render_template(templatefile='base_strategy.py.j2', - arguments={"strategy": strategy_name, - "attributes": attributes, - "indicators": indicators, - "buy_trend": buy_trend, - "sell_trend": sell_trend, - "plot_config": plot_config, - "additional_methods": additional_methods, - }) + strategy_text = render_template( + templatefile="base_strategy.py.j2", + arguments={ + "strategy": strategy_name, + "attributes": attributes, + "indicators": indicators, + "buy_trend": buy_trend, + "sell_trend": sell_trend, + "plot_config": plot_config, + "additional_methods": additional_methods, + }, + ) logger.info(f"Writing strategy to `{strategy_path}`.") strategy_path.write_text(strategy_text) def start_new_strategy(args: Dict[str, Any]) -> None: - config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) if "strategy" in args and args["strategy"]: - - new_path = config['user_data_dir'] / USERPATH_STRATEGIES / (args['strategy'] + '.py') + new_path = config["user_data_dir"] / USERPATH_STRATEGIES / (args["strategy"] + ".py") if new_path.exists(): - raise OperationalException(f"`{new_path}` already exists. " - "Please choose another Strategy Name.") + raise OperationalException( + f"`{new_path}` already exists. Please choose another Strategy Name." + ) - deploy_new_strategy(args['strategy'], new_path, args['template']) + deploy_new_strategy(args["strategy"], new_path, args["template"]) else: raise ConfigurationError("`new-strategy` requires --strategy to be set.") @@ -100,8 +102,8 @@ def clean_ui_subdir(directory: Path): if directory.is_dir(): logger.info("Removing UI directory content.") - for p in reversed(list(directory.glob('**/*'))): # iterate contents from leaves to root - if p.name in ('.gitkeep', 'fallback_file.html'): + for p in reversed(list(directory.glob("**/*"))): # iterate contents from leaves to root + if p.name in (".gitkeep", "fallback_file.html"): continue if p.is_file(): p.unlink() @@ -110,11 +112,11 @@ def clean_ui_subdir(directory: Path): def read_ui_version(dest_folder: Path) -> Optional[str]: - file = dest_folder / '.uiversion' + file = dest_folder / ".uiversion" if not file.is_file(): return None - with file.open('r') as f: + with file.open("r") as f: return f.read() @@ -133,12 +135,12 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str): destfile.mkdir(exist_ok=True) else: destfile.write_bytes(x.read()) - with (dest_folder / '.uiversion').open('w') as f: + with (dest_folder / ".uiversion").open("w") as f: f.write(version) def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]: - base_url = 'https://api.github.com/repos/freqtrade/frequi/' + base_url = "https://api.github.com/repos/freqtrade/frequi/" # Get base UI Repo path resp = requests.get(f"{base_url}releases", timeout=req_timeout) @@ -146,42 +148,41 @@ def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]: r = resp.json() if version: - tmp = [x for x in r if x['name'] == version] + tmp = [x for x in r if x["name"] == version] if tmp: - latest_version = tmp[0]['name'] - assets = tmp[0].get('assets', []) + latest_version = tmp[0]["name"] + assets = tmp[0].get("assets", []) else: raise ValueError("UI-Version not found.") else: - latest_version = r[0]['name'] - assets = r[0].get('assets', []) - dl_url = '' + latest_version = r[0]["name"] + assets = r[0].get("assets", []) + dl_url = "" if assets and len(assets) > 0: - dl_url = assets[0]['browser_download_url'] + dl_url = assets[0]["browser_download_url"] # URL not found - try assets url if not dl_url: - assets = r[0]['assets_url'] + assets = r[0]["assets_url"] resp = requests.get(assets, timeout=req_timeout) r = resp.json() - dl_url = r[0]['browser_download_url'] + dl_url = r[0]["browser_download_url"] return dl_url, latest_version def start_install_ui(args: Dict[str, Any]) -> None: - - dest_folder = Path(__file__).parents[1] / 'rpc/api_server/ui/installed/' + dest_folder = Path(__file__).parents[1] / "rpc/api_server/ui/installed/" # First make sure the assets are removed. - dl_url, latest_version = get_ui_download_url(args.get('ui_version')) + dl_url, latest_version = get_ui_download_url(args.get("ui_version")) curr_version = read_ui_version(dest_folder) - if curr_version == latest_version and not args.get('erase_ui_only'): + if curr_version == latest_version and not args.get("erase_ui_only"): logger.info(f"UI already up-to-date, FreqUI Version {curr_version}.") return clean_ui_subdir(dest_folder) - if args.get('erase_ui_only'): + if args.get("erase_ui_only"): logger.info("Erased UI directory content. Not downloading new version.") else: # Download a new version diff --git a/freqtrade/commands/hyperopt_commands.py b/freqtrade/commands/hyperopt_commands.py index 19e291ea7..ac0b8453f 100644 --- a/freqtrade/commands/hyperopt_commands.py +++ b/freqtrade/commands/hyperopt_commands.py @@ -22,15 +22,15 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) - print_colorized = config.get('print_colorized', False) - print_json = config.get('print_json', False) - export_csv = config.get('export_csv') - no_details = config.get('hyperopt_list_no_details', False) + print_colorized = config.get("print_colorized", False) + print_json = config.get("print_json", False) + export_csv = config.get("export_csv") + no_details = config.get("hyperopt_list_no_details", False) no_header = False results_file = get_latest_hyperopt_file( - config['user_data_dir'] / 'hyperopt_results', - config.get('hyperoptexportfilename')) + config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename") + ) # Previous evaluations epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config) @@ -40,21 +40,26 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None: if not export_csv: try: - print(HyperoptTools.get_result_table(config, epochs, total_epochs, - not config.get('hyperopt_list_best', False), - print_colorized, 0)) + print( + HyperoptTools.get_result_table( + config, + epochs, + total_epochs, + not config.get("hyperopt_list_best", False), + print_colorized, + 0, + ) + ) except KeyboardInterrupt: - print('User interrupted..') + print("User interrupted..") if epochs and not no_details: - sorted_epochs = sorted(epochs, key=itemgetter('loss')) + sorted_epochs = sorted(epochs, key=itemgetter("loss")) results = sorted_epochs[0] HyperoptTools.show_epoch_details(results, total_epochs, print_json, no_header) if epochs and export_csv: - HyperoptTools.export_csv_file( - config, epochs, export_csv - ) + HyperoptTools.export_csv_file(config, epochs, export_csv) def start_hyperopt_show(args: Dict[str, Any]) -> None: @@ -65,13 +70,13 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) - print_json = config.get('print_json', False) - no_header = config.get('hyperopt_show_no_header', False) + print_json = config.get("print_json", False) + no_header = config.get("hyperopt_show_no_header", False) results_file = get_latest_hyperopt_file( - config['user_data_dir'] / 'hyperopt_results', - config.get('hyperoptexportfilename')) + config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename") + ) - n = config.get('hyperopt_show_index', -1) + n = config.get("hyperopt_show_index", -1) # Previous evaluations epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config) @@ -80,10 +85,12 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None: if n > filtered_epochs: raise OperationalException( - f"The index of the epoch to show should be less than {filtered_epochs + 1}.") + f"The index of the epoch to show should be less than {filtered_epochs + 1}." + ) if n < -filtered_epochs: raise OperationalException( - f"The index of the epoch to show should be greater than {-filtered_epochs - 1}.") + f"The index of the epoch to show should be greater than {-filtered_epochs - 1}." + ) # Translate epoch index from human-readable format to pythonic if n > 0: @@ -92,13 +99,18 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None: if epochs: val = epochs[n] - metrics = val['results_metrics'] - if 'strategy_name' in metrics: - strategy_name = metrics['strategy_name'] - show_backtest_result(strategy_name, metrics, - metrics['stake_currency'], config.get('backtest_breakdown', [])) + metrics = val["results_metrics"] + if "strategy_name" in metrics: + strategy_name = metrics["strategy_name"] + show_backtest_result( + strategy_name, + metrics, + metrics["stake_currency"], + config.get("backtest_breakdown", []), + ) HyperoptTools.try_export_params(config, strategy_name, val) - HyperoptTools.show_epoch_details(val, total_epochs, print_json, no_header, - header_str="Epoch details") + HyperoptTools.show_epoch_details( + val, total_epochs, print_json, no_header, header_str="Epoch details" + ) diff --git a/freqtrade/commands/list_commands.py b/freqtrade/commands/list_commands.py index 550c29f69..257166f9c 100644 --- a/freqtrade/commands/list_commands.py +++ b/freqtrade/commands/list_commands.py @@ -26,42 +26,47 @@ def start_list_exchanges(args: Dict[str, Any]) -> None: :param args: Cli args from Arguments() :return: None """ - exchanges = list_available_exchanges(args['list_exchanges_all']) + exchanges = list_available_exchanges(args["list_exchanges_all"]) - if args['print_one_column']: - print('\n'.join([e['name'] for e in exchanges])) + if args["print_one_column"]: + print("\n".join([e["name"] for e in exchanges])) else: headers = { - 'name': 'Exchange name', - 'supported': 'Supported', - 'trade_modes': 'Markets', - 'comment': 'Reason', - } - headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {}) + "name": "Exchange name", + "supported": "Supported", + "trade_modes": "Markets", + "comment": "Reason", + } + headers.update({"valid": "Valid"} if args["list_exchanges_all"] else {}) def build_entry(exchange: ValidExchangesType, valid: bool): - valid_entry = {'valid': exchange['valid']} if valid else {} + valid_entry = {"valid": exchange["valid"]} if valid else {} result: Dict[str, Union[str, bool]] = { - 'name': exchange['name'], + "name": exchange["name"], **valid_entry, - 'supported': 'Official' if exchange['supported'] else '', - 'trade_modes': ', '.join( - (f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode'] - for a in exchange['trade_modes'] + "supported": "Official" if exchange["supported"] else "", + "trade_modes": ", ".join( + (f"{a['margin_mode']} " if a["margin_mode"] else "") + a["trading_mode"] + for a in exchange["trade_modes"] ), - 'comment': exchange['comment'], + "comment": exchange["comment"], } return result - if args['list_exchanges_all']: + if args["list_exchanges_all"]: print("All exchanges supported by the ccxt library:") exchanges = [build_entry(e, True) for e in exchanges] else: print("Exchanges available for Freqtrade:") - exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False] + exchanges = [build_entry(e, False) for e in exchanges if e["valid"] is not False] - print(tabulate(exchanges, headers=headers, )) + print( + tabulate( + exchanges, + headers=headers, + ) + ) def _print_objs_tabular(objs: List, print_colorized: bool) -> None: @@ -71,26 +76,35 @@ def _print_objs_tabular(objs: List, print_colorized: bool) -> None: yellow = Fore.YELLOW reset = Style.RESET_ALL else: - red = '' - yellow = '' - reset = '' + red = "" + yellow = "" + reset = "" - names = [s['name'] for s in objs] - objs_to_print = [{ - 'name': s['name'] if s['name'] else "--", - 'location': s['location_rel'], - 'status': (red + "LOAD FAILED" + reset if s['class'] is None - else "OK" if names.count(s['name']) == 1 - else yellow + "DUPLICATE NAME" + reset) - } for s in objs] + names = [s["name"] for s in objs] + objs_to_print = [ + { + "name": s["name"] if s["name"] else "--", + "location": s["location_rel"], + "status": ( + red + "LOAD FAILED" + reset + if s["class"] is None + else "OK" + if names.count(s["name"]) == 1 + else yellow + "DUPLICATE NAME" + reset + ), + } + for s in objs + ] for idx, s in enumerate(objs): - if 'hyperoptable' in s: - objs_to_print[idx].update({ - 'hyperoptable': "Yes" if s['hyperoptable']['count'] > 0 else "No", - 'buy-Params': len(s['hyperoptable'].get('buy', [])), - 'sell-Params': len(s['hyperoptable'].get('sell', [])), - }) - print(tabulate(objs_to_print, headers='keys', tablefmt='psql', stralign='right')) + if "hyperoptable" in s: + objs_to_print[idx].update( + { + "hyperoptable": "Yes" if s["hyperoptable"]["count"] > 0 else "No", + "buy-Params": len(s["hyperoptable"].get("buy", [])), + "sell-Params": len(s["hyperoptable"].get("sell", [])), + } + ) + print(tabulate(objs_to_print, headers="keys", tablefmt="psql", stralign="right")) def start_list_strategies(args: Dict[str, Any]) -> None: @@ -100,19 +114,20 @@ def start_list_strategies(args: Dict[str, Any]) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) strategy_objs = StrategyResolver.search_all_objects( - config, not args['print_one_column'], config.get('recursive_strategy_search', False)) + config, not args["print_one_column"], config.get("recursive_strategy_search", False) + ) # Sort alphabetically - strategy_objs = sorted(strategy_objs, key=lambda x: x['name']) + strategy_objs = sorted(strategy_objs, key=lambda x: x["name"]) for obj in strategy_objs: - if obj['class']: - obj['hyperoptable'] = obj['class'].detect_all_parameters() + if obj["class"]: + obj["hyperoptable"] = obj["class"].detect_all_parameters() else: - obj['hyperoptable'] = {'count': 0} + obj["hyperoptable"] = {"count": 0} - if args['print_one_column']: - print('\n'.join([s['name'] for s in strategy_objs])) + if args["print_one_column"]: + print("\n".join([s["name"] for s in strategy_objs])) else: - _print_objs_tabular(strategy_objs, config.get('print_colorized', False)) + _print_objs_tabular(strategy_objs, config.get("print_colorized", False)) def start_list_freqAI_models(args: Dict[str, Any]) -> None: @@ -121,13 +136,14 @@ def start_list_freqAI_models(args: Dict[str, Any]) -> None: """ config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver - model_objs = FreqaiModelResolver.search_all_objects(config, not args['print_one_column']) + + model_objs = FreqaiModelResolver.search_all_objects(config, not args["print_one_column"]) # Sort alphabetically - model_objs = sorted(model_objs, key=lambda x: x['name']) - if args['print_one_column']: - print('\n'.join([s['name'] for s in model_objs])) + model_objs = sorted(model_objs, key=lambda x: x["name"]) + if args["print_one_column"]: + print("\n".join([s["name"] for s in model_objs])) else: - _print_objs_tabular(model_objs, config.get('print_colorized', False)) + _print_objs_tabular(model_objs, config.get("print_colorized", False)) def start_list_timeframes(args: Dict[str, Any]) -> None: @@ -136,16 +152,18 @@ def start_list_timeframes(args: Dict[str, Any]) -> None: """ config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) # Do not use timeframe set in the config - config['timeframe'] = None + config["timeframe"] = None # Init exchange exchange = ExchangeResolver.load_exchange(config, validate=False) - if args['print_one_column']: - print('\n'.join(exchange.timeframes)) + if args["print_one_column"]: + print("\n".join(exchange.timeframes)) else: - print(f"Timeframes available for the exchange `{exchange.name}`: " - f"{', '.join(exchange.timeframes)}") + print( + f"Timeframes available for the exchange `{exchange.name}`: " + f"{', '.join(exchange.timeframes)}" + ) def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None: @@ -161,51 +179,75 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None: exchange = ExchangeResolver.load_exchange(config, validate=False) # By default only active pairs/markets are to be shown - active_only = not args.get('list_pairs_all', False) + active_only = not args.get("list_pairs_all", False) - base_currencies = args.get('base_currencies', []) - quote_currencies = args.get('quote_currencies', []) + base_currencies = args.get("base_currencies", []) + quote_currencies = args.get("quote_currencies", []) try: - pairs = exchange.get_markets(base_currencies=base_currencies, - quote_currencies=quote_currencies, - tradable_only=pairs_only, - active_only=active_only) + pairs = exchange.get_markets( + base_currencies=base_currencies, + quote_currencies=quote_currencies, + tradable_only=pairs_only, + active_only=active_only, + ) # Sort the pairs/markets by symbol pairs = dict(sorted(pairs.items())) except Exception as e: raise OperationalException(f"Cannot get markets. Reason: {e}") from e else: - summary_str = ((f"Exchange {exchange.name} has {len(pairs)} ") + - ("active " if active_only else "") + - (plural(len(pairs), "pair" if pairs_only else "market")) + - (f" with {', '.join(base_currencies)} as base " - f"{plural(len(base_currencies), 'currency', 'currencies')}" - if base_currencies else "") + - (" and" if base_currencies and quote_currencies else "") + - (f" with {', '.join(quote_currencies)} as quote " - f"{plural(len(quote_currencies), 'currency', 'currencies')}" - if quote_currencies else "")) + summary_str = ( + (f"Exchange {exchange.name} has {len(pairs)} ") + + ("active " if active_only else "") + + (plural(len(pairs), "pair" if pairs_only else "market")) + + ( + f" with {', '.join(base_currencies)} as base " + f"{plural(len(base_currencies), 'currency', 'currencies')}" + if base_currencies + else "" + ) + + (" and" if base_currencies and quote_currencies else "") + + ( + f" with {', '.join(quote_currencies)} as quote " + f"{plural(len(quote_currencies), 'currency', 'currencies')}" + if quote_currencies + else "" + ) + ) - headers = ["Id", "Symbol", "Base", "Quote", "Active", - "Spot", "Margin", "Future", "Leverage"] + headers = [ + "Id", + "Symbol", + "Base", + "Quote", + "Active", + "Spot", + "Margin", + "Future", + "Leverage", + ] - tabular_data = [{ - 'Id': v['id'], - 'Symbol': v['symbol'], - 'Base': v['base'], - 'Quote': v['quote'], - 'Active': market_is_active(v), - 'Spot': 'Spot' if exchange.market_is_spot(v) else '', - 'Margin': 'Margin' if exchange.market_is_margin(v) else '', - 'Future': 'Future' if exchange.market_is_future(v) else '', - 'Leverage': exchange.get_max_leverage(v['symbol'], 20) - } for _, v in pairs.items()] + tabular_data = [ + { + "Id": v["id"], + "Symbol": v["symbol"], + "Base": v["base"], + "Quote": v["quote"], + "Active": market_is_active(v), + "Spot": "Spot" if exchange.market_is_spot(v) else "", + "Margin": "Margin" if exchange.market_is_margin(v) else "", + "Future": "Future" if exchange.market_is_future(v) else "", + "Leverage": exchange.get_max_leverage(v["symbol"], 20), + } + for _, v in pairs.items() + ] - if (args.get('print_one_column', False) or - args.get('list_pairs_print_json', False) or - args.get('print_csv', False)): + if ( + args.get("print_one_column", False) + or args.get("list_pairs_print_json", False) + or args.get("print_csv", False) + ): # Print summary string in the log in case of machine-readable # regular formats. logger.info(f"{summary_str}.") @@ -215,24 +257,26 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None: print() if pairs: - if args.get('print_list', False): + if args.get("print_list", False): # print data as a list, with human-readable summary print(f"{summary_str}: {', '.join(pairs.keys())}.") - elif args.get('print_one_column', False): - print('\n'.join(pairs.keys())) - elif args.get('list_pairs_print_json', False): + elif args.get("print_one_column", False): + print("\n".join(pairs.keys())) + elif args.get("list_pairs_print_json", False): print(rapidjson.dumps(list(pairs.keys()), default=str)) - elif args.get('print_csv', False): + elif args.get("print_csv", False): writer = csv.DictWriter(sys.stdout, fieldnames=headers) writer.writeheader() writer.writerows(tabular_data) else: # print data as a table, with the human-readable summary print(f"{summary_str}:") - print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right')) - elif not (args.get('print_one_column', False) or - args.get('list_pairs_print_json', False) or - args.get('print_csv', False)): + print(tabulate(tabular_data, headers="keys", tablefmt="psql", stralign="right")) + elif not ( + args.get("print_one_column", False) + or args.get("list_pairs_print_json", False) + or args.get("print_csv", False) + ): print(f"{summary_str}.") @@ -243,21 +287,22 @@ def start_show_trades(args: Dict[str, Any]) -> None: import json from freqtrade.persistence import Trade, init_db + config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) - if 'db_url' not in config: + if "db_url" not in config: raise ConfigurationError("--db-url is required for this command.") logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"') - init_db(config['db_url']) + init_db(config["db_url"]) tfilter = [] - if config.get('trade_ids'): - tfilter.append(Trade.id.in_(config['trade_ids'])) + if config.get("trade_ids"): + tfilter.append(Trade.id.in_(config["trade_ids"])) trades = Trade.get_trades(tfilter).all() logger.info(f"Printing {len(trades)} Trades: ") - if config.get('print_json', False): + if config.get("print_json", False): print(json.dumps([trade.to_json() for trade in trades], indent=4)) else: for trade in trades: diff --git a/freqtrade/commands/optimize_commands.py b/freqtrade/commands/optimize_commands.py index f010a3cee..aa055469a 100644 --- a/freqtrade/commands/optimize_commands.py +++ b/freqtrade/commands/optimize_commands.py @@ -21,20 +21,22 @@ def setup_optimize_configuration(args: Dict[str, Any], method: RunMode) -> Dict[ config = setup_utils_configuration(args, method) no_unlimited_runmodes = { - RunMode.BACKTEST: 'backtesting', - RunMode.HYPEROPT: 'hyperoptimization', + RunMode.BACKTEST: "backtesting", + RunMode.HYPEROPT: "hyperoptimization", } if method in no_unlimited_runmodes.keys(): - wallet_size = config['dry_run_wallet'] * config['tradable_balance_ratio'] + wallet_size = config["dry_run_wallet"] * config["tradable_balance_ratio"] # tradable_balance_ratio - if (config['stake_amount'] != constants.UNLIMITED_STAKE_AMOUNT - and config['stake_amount'] > wallet_size): - wallet = fmt_coin(wallet_size, config['stake_currency']) - stake = fmt_coin(config['stake_amount'], config['stake_currency']) + if ( + config["stake_amount"] != constants.UNLIMITED_STAKE_AMOUNT + and config["stake_amount"] > wallet_size + ): + wallet = fmt_coin(wallet_size, config["stake_currency"]) + stake = fmt_coin(config["stake_amount"], config["stake_currency"]) raise ConfigurationError( f"Starting balance ({wallet}) is smaller than stake_amount {stake}. " f"Wallet is calculated as `dry_run_wallet * tradable_balance_ratio`." - ) + ) return config @@ -51,7 +53,7 @@ def start_backtesting(args: Dict[str, Any]) -> None: # Initialize configuration config = setup_optimize_configuration(args, RunMode.BACKTEST) - logger.info('Starting freqtrade in Backtesting mode') + logger.info("Starting freqtrade in Backtesting mode") # Initialize backtesting object backtesting = Backtesting(config) @@ -68,7 +70,7 @@ def start_backtesting_show(args: Dict[str, Any]) -> None: from freqtrade.data.btanalysis import load_backtest_stats from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist - results = load_backtest_stats(config['exportfilename']) + results = load_backtest_stats(config["exportfilename"]) show_backtest_results(config, results) show_sorted_pairlist(config, results) @@ -87,20 +89,20 @@ def start_hyperopt(args: Dict[str, Any]) -> None: from freqtrade.optimize.hyperopt import Hyperopt except ImportError as e: raise OperationalException( - f"{e}. Please ensure that the hyperopt dependencies are installed.") from e + f"{e}. Please ensure that the hyperopt dependencies are installed." + ) from e # Initialize configuration config = setup_optimize_configuration(args, RunMode.HYPEROPT) - logger.info('Starting freqtrade in Hyperopt mode') + logger.info("Starting freqtrade in Hyperopt mode") lock = FileLock(Hyperopt.get_lock_filename(config)) try: with lock.acquire(timeout=1): - # Remove noisy log messages - logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING) - logging.getLogger('filelock').setLevel(logging.WARNING) + logging.getLogger("hyperopt.tpe").setLevel(logging.WARNING) + logging.getLogger("filelock").setLevel(logging.WARNING) # Initialize backtesting object hyperopt = Hyperopt(config) @@ -108,9 +110,11 @@ def start_hyperopt(args: Dict[str, Any]) -> None: except Timeout: logger.info("Another running instance of freqtrade Hyperopt detected.") - logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. " - "Hyperopt module is resource hungry. Please run your Hyperopt sequentially " - "or on separate machines.") + logger.info( + "Simultaneous execution of multiple Hyperopt commands is not supported. " + "Hyperopt module is resource hungry. Please run your Hyperopt sequentially " + "or on separate machines." + ) logger.info("Quitting now.") # TODO: return False here in order to help freqtrade to exit # with non-zero exit code... @@ -127,7 +131,7 @@ def start_edge(args: Dict[str, Any]) -> None: # Initialize configuration config = setup_optimize_configuration(args, RunMode.EDGE) - logger.info('Starting freqtrade in Edge mode') + logger.info("Starting freqtrade in Edge mode") # Initialize Edge object edge_cli = EdgeCli(config) diff --git a/freqtrade/commands/pairlist_commands.py b/freqtrade/commands/pairlist_commands.py index b1df7e98b..8b2963563 100644 --- a/freqtrade/commands/pairlist_commands.py +++ b/freqtrade/commands/pairlist_commands.py @@ -17,28 +17,29 @@ def start_test_pairlist(args: Dict[str, Any]) -> None: """ from freqtrade.persistence import FtNoDBContext from freqtrade.plugins.pairlistmanager import PairListManager + config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) exchange = ExchangeResolver.load_exchange(config, validate=False) - quote_currencies = args.get('quote_currencies') + quote_currencies = args.get("quote_currencies") if not quote_currencies: - quote_currencies = [config.get('stake_currency')] + quote_currencies = [config.get("stake_currency")] results = {} with FtNoDBContext(): for curr in quote_currencies: - config['stake_currency'] = curr + config["stake_currency"] = curr pairlists = PairListManager(exchange, config) pairlists.refresh_pairlist() results[curr] = pairlists.whitelist for curr, pairlist in results.items(): - if not args.get('print_one_column', False) and not args.get('list_pairs_print_json', False): + if not args.get("print_one_column", False) and not args.get("list_pairs_print_json", False): print(f"Pairs for {curr}: ") - if args.get('print_one_column', False): - print('\n'.join(pairlist)) - elif args.get('list_pairs_print_json', False): + if args.get("print_one_column", False): + print("\n".join(pairlist)) + elif args.get("list_pairs_print_json", False): print(rapidjson.dumps(list(pairlist), default=str)) else: print(pairlist) diff --git a/freqtrade/commands/plot_commands.py b/freqtrade/commands/plot_commands.py index 95ad3cdce..4b939cc80 100644 --- a/freqtrade/commands/plot_commands.py +++ b/freqtrade/commands/plot_commands.py @@ -6,10 +6,11 @@ from freqtrade.exceptions import ConfigurationError def validate_plot_args(args: Dict[str, Any]) -> None: - if not args.get('datadir') and not args.get('config'): + if not args.get("datadir") and not args.get("config"): raise ConfigurationError( "You need to specify either `--datadir` or `--config` " - "for plot-profit and plot-dataframe.") + "for plot-profit and plot-dataframe." + ) def start_plot_dataframe(args: Dict[str, Any]) -> None: @@ -18,6 +19,7 @@ def start_plot_dataframe(args: Dict[str, Any]) -> None: """ # Import here to avoid errors if plot-dependencies are not installed. from freqtrade.plot.plotting import load_and_plot_trades + validate_plot_args(args) config = setup_utils_configuration(args, RunMode.PLOT) @@ -30,6 +32,7 @@ def start_plot_profit(args: Dict[str, Any]) -> None: """ # Import here to avoid errors if plot-dependencies are not installed. from freqtrade.plot.plotting import plot_profit + validate_plot_args(args) config = setup_utils_configuration(args, RunMode.PLOT) diff --git a/freqtrade/commands/strategy_utils_commands.py b/freqtrade/commands/strategy_utils_commands.py index e579ec475..761a7262c 100644 --- a/freqtrade/commands/strategy_utils_commands.py +++ b/freqtrade/commands/strategy_utils_commands.py @@ -26,13 +26,15 @@ def start_strategy_update(args: Dict[str, Any]) -> None: config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE) strategy_objs = StrategyResolver.search_all_objects( - config, enum_failed=False, recursive=config.get('recursive_strategy_search', False)) + config, enum_failed=False, recursive=config.get("recursive_strategy_search", False) + ) filtered_strategy_objs = [] - if args['strategy_list']: + if args["strategy_list"]: filtered_strategy_objs = [ - strategy_obj for strategy_obj in strategy_objs - if strategy_obj['name'] in args['strategy_list'] + strategy_obj + for strategy_obj in strategy_objs + if strategy_obj["name"] in args["strategy_list"] ] else: @@ -41,8 +43,8 @@ def start_strategy_update(args: Dict[str, Any]) -> None: processed_locations = set() for strategy_obj in filtered_strategy_objs: - if strategy_obj['location'] not in processed_locations: - processed_locations.add(strategy_obj['location']) + if strategy_obj["location"] not in processed_locations: + processed_locations.add(strategy_obj["location"]) start_conversion(strategy_obj, config) diff --git a/freqtrade/configuration/config_secrets.py b/freqtrade/configuration/config_secrets.py index 47ee741bf..5a694dddf 100644 --- a/freqtrade/configuration/config_secrets.py +++ b/freqtrade/configuration/config_secrets.py @@ -24,13 +24,13 @@ def sanitize_config(config: Config, *, show_sensitive: bool = False) -> Config: ] config = deepcopy(config) for key in keys_to_remove: - if '.' in key: - nested_keys = key.split('.') + if "." in key: + nested_keys = key.split(".") nested_config = config for nested_key in nested_keys[:-1]: nested_config = nested_config.get(nested_key, {}) - nested_config[nested_keys[-1]] = 'REDACTED' + nested_config[nested_keys[-1]] = "REDACTED" else: - config[key] = 'REDACTED' + config[key] = "REDACTED" return config diff --git a/freqtrade/configuration/config_setup.py b/freqtrade/configuration/config_setup.py index 154ba5175..1246b6ea6 100644 --- a/freqtrade/configuration/config_setup.py +++ b/freqtrade/configuration/config_setup.py @@ -11,7 +11,8 @@ logger = logging.getLogger(__name__) def setup_utils_configuration( - args: Dict[str, Any], method: RunMode, *, set_dry: bool = True) -> Dict[str, Any]: + args: Dict[str, Any], method: RunMode, *, set_dry: bool = True +) -> Dict[str, Any]: """ Prepare the configuration for utils subcommands :param args: Cli args from Arguments() @@ -23,7 +24,7 @@ def setup_utils_configuration( # Ensure these modes are using Dry-run if set_dry: - config['dry_run'] = True + config["dry_run"] = True validate_config_consistency(config, preliminary=True) return config diff --git a/freqtrade/configuration/config_validation.py b/freqtrade/configuration/config_validation.py index 419af8347..3f8e5c9ef 100644 --- a/freqtrade/configuration/config_validation.py +++ b/freqtrade/configuration/config_validation.py @@ -20,18 +20,16 @@ def _extend_validator(validator_class): Extended validator for the Freqtrade configuration JSON Schema. Currently it only handles defaults for subschemas. """ - validate_properties = validator_class.VALIDATORS['properties'] + validate_properties = validator_class.VALIDATORS["properties"] def set_defaults(validator, properties, instance, schema): for prop, subschema in properties.items(): - if 'default' in subschema: - instance.setdefault(prop, subschema['default']) + if "default" in subschema: + instance.setdefault(prop, subschema["default"]) yield from validate_properties(validator, properties, instance, schema) - return validators.extend( - validator_class, {'properties': set_defaults} - ) + return validators.extend(validator_class, {"properties": set_defaults}) FreqtradeValidator = _extend_validator(Draft4Validator) @@ -44,27 +42,23 @@ def validate_config_schema(conf: Dict[str, Any], preliminary: bool = False) -> D :return: Returns the config if valid, otherwise throw an exception """ conf_schema = deepcopy(constants.CONF_SCHEMA) - if conf.get('runmode', RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE): - conf_schema['required'] = constants.SCHEMA_TRADE_REQUIRED - elif conf.get('runmode', RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT): + if conf.get("runmode", RunMode.OTHER) in (RunMode.DRY_RUN, RunMode.LIVE): + conf_schema["required"] = constants.SCHEMA_TRADE_REQUIRED + elif conf.get("runmode", RunMode.OTHER) in (RunMode.BACKTEST, RunMode.HYPEROPT): if preliminary: - conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED + conf_schema["required"] = constants.SCHEMA_BACKTEST_REQUIRED else: - conf_schema['required'] = constants.SCHEMA_BACKTEST_REQUIRED_FINAL - elif conf.get('runmode', RunMode.OTHER) == RunMode.WEBSERVER: - conf_schema['required'] = constants.SCHEMA_MINIMAL_WEBSERVER + conf_schema["required"] = constants.SCHEMA_BACKTEST_REQUIRED_FINAL + elif conf.get("runmode", RunMode.OTHER) == RunMode.WEBSERVER: + conf_schema["required"] = constants.SCHEMA_MINIMAL_WEBSERVER else: - conf_schema['required'] = constants.SCHEMA_MINIMAL_REQUIRED + conf_schema["required"] = constants.SCHEMA_MINIMAL_REQUIRED try: FreqtradeValidator(conf_schema).validate(conf) return conf except ValidationError as e: - logger.critical( - f"Invalid configuration. Reason: {e}" - ) - raise ValidationError( - best_match(Draft4Validator(conf_schema).iter_errors(conf)).message - ) + logger.critical(f"Invalid configuration. Reason: {e}") + raise ValidationError(best_match(Draft4Validator(conf_schema).iter_errors(conf)).message) def validate_config_consistency(conf: Dict[str, Any], *, preliminary: bool = False) -> None: @@ -91,7 +85,7 @@ def validate_config_consistency(conf: Dict[str, Any], *, preliminary: bool = Fal validate_migrated_strategy_settings(conf) # validate configuration before returning - logger.info('Validating configuration ...') + logger.info("Validating configuration ...") validate_config_schema(conf, preliminary=preliminary) @@ -100,9 +94,11 @@ def _validate_unlimited_amount(conf: Dict[str, Any]) -> None: If edge is disabled, either max_open_trades or stake_amount need to be set. :raise: ConfigurationError if config validation failed """ - if (not conf.get('edge', {}).get('enabled') - and conf.get('max_open_trades') == float('inf') - and conf.get('stake_amount') == constants.UNLIMITED_STAKE_AMOUNT): + if ( + not conf.get("edge", {}).get("enabled") + and conf.get("max_open_trades") == float("inf") + and conf.get("stake_amount") == constants.UNLIMITED_STAKE_AMOUNT + ): raise ConfigurationError("`max_open_trades` and `stake_amount` cannot both be unlimited.") @@ -111,45 +107,47 @@ def _validate_price_config(conf: Dict[str, Any]) -> None: When using market orders, price sides must be using the "other" side of the price """ # TODO: The below could be an enforced setting when using market orders - if (conf.get('order_types', {}).get('entry') == 'market' - and conf.get('entry_pricing', {}).get('price_side') not in ('ask', 'other')): - raise ConfigurationError( - 'Market entry orders require entry_pricing.price_side = "other".') + if conf.get("order_types", {}).get("entry") == "market" and conf.get("entry_pricing", {}).get( + "price_side" + ) not in ("ask", "other"): + raise ConfigurationError('Market entry orders require entry_pricing.price_side = "other".') - if (conf.get('order_types', {}).get('exit') == 'market' - and conf.get('exit_pricing', {}).get('price_side') not in ('bid', 'other')): + if conf.get("order_types", {}).get("exit") == "market" and conf.get("exit_pricing", {}).get( + "price_side" + ) not in ("bid", "other"): raise ConfigurationError('Market exit orders require exit_pricing.price_side = "other".') def _validate_trailing_stoploss(conf: Dict[str, Any]) -> None: - - if conf.get('stoploss') == 0.0: + if conf.get("stoploss") == 0.0: raise ConfigurationError( - 'The config stoploss needs to be different from 0 to avoid problems with sell orders.' + "The config stoploss needs to be different from 0 to avoid problems with sell orders." ) # Skip if trailing stoploss is not activated - if not conf.get('trailing_stop', False): + if not conf.get("trailing_stop", False): return - tsl_positive = float(conf.get('trailing_stop_positive', 0)) - tsl_offset = float(conf.get('trailing_stop_positive_offset', 0)) - tsl_only_offset = conf.get('trailing_only_offset_is_reached', False) + tsl_positive = float(conf.get("trailing_stop_positive", 0)) + tsl_offset = float(conf.get("trailing_stop_positive_offset", 0)) + tsl_only_offset = conf.get("trailing_only_offset_is_reached", False) if tsl_only_offset: if tsl_positive == 0.0: raise ConfigurationError( - 'The config trailing_only_offset_is_reached needs ' - 'trailing_stop_positive_offset to be more than 0 in your config.') + "The config trailing_only_offset_is_reached needs " + "trailing_stop_positive_offset to be more than 0 in your config." + ) if tsl_positive > 0 and 0 < tsl_offset <= tsl_positive: raise ConfigurationError( - 'The config trailing_stop_positive_offset needs ' - 'to be greater than trailing_stop_positive in your config.') + "The config trailing_stop_positive_offset needs " + "to be greater than trailing_stop_positive in your config." + ) # Fetch again without default - if 'trailing_stop_positive' in conf and float(conf['trailing_stop_positive']) == 0.0: + if "trailing_stop_positive" in conf and float(conf["trailing_stop_positive"]) == 0.0: raise ConfigurationError( - 'The config trailing_stop_positive needs to be different from 0 ' - 'to avoid problems with sell orders.' + "The config trailing_stop_positive needs to be different from 0 " + "to avoid problems with sell orders." ) @@ -158,10 +156,10 @@ def _validate_edge(conf: Dict[str, Any]) -> None: Edge and Dynamic whitelist should not both be enabled, since edge overrides dynamic whitelists. """ - if not conf.get('edge', {}).get('enabled'): + if not conf.get("edge", {}).get("enabled"): return - if not conf.get('use_exit_signal', True): + if not conf.get("use_exit_signal", True): raise ConfigurationError( "Edge requires `use_exit_signal` to be True, otherwise no sells will happen." ) @@ -171,13 +169,20 @@ def _validate_whitelist(conf: Dict[str, Any]) -> None: """ Dynamic whitelist does not require pair_whitelist to be set - however StaticWhitelist does. """ - if conf.get('runmode', RunMode.OTHER) in [RunMode.OTHER, RunMode.PLOT, - RunMode.UTIL_NO_EXCHANGE, RunMode.UTIL_EXCHANGE]: + if conf.get("runmode", RunMode.OTHER) in [ + RunMode.OTHER, + RunMode.PLOT, + RunMode.UTIL_NO_EXCHANGE, + RunMode.UTIL_EXCHANGE, + ]: return - for pl in conf.get('pairlists', [{'method': 'StaticPairList'}]): - if (isinstance(pl, dict) and pl.get('method') == 'StaticPairList' - and not conf.get('exchange', {}).get('pair_whitelist')): + for pl in conf.get("pairlists", [{"method": "StaticPairList"}]): + if ( + isinstance(pl, dict) + and pl.get("method") == "StaticPairList" + and not conf.get("exchange", {}).get("pair_whitelist") + ): raise ConfigurationError("StaticPairList requires pair_whitelist to be set.") @@ -186,14 +191,14 @@ def _validate_protections(conf: Dict[str, Any]) -> None: Validate protection configuration validity """ - for prot in conf.get('protections', []): - if ('stop_duration' in prot and 'stop_duration_candles' in prot): + for prot in conf.get("protections", []): + if "stop_duration" in prot and "stop_duration_candles" in prot: raise ConfigurationError( "Protections must specify either `stop_duration` or `stop_duration_candles`.\n" f"Please fix the protection {prot.get('method')}" ) - if ('lookback_period' in prot and 'lookback_period_candles' in prot): + if "lookback_period" in prot and "lookback_period_candles" in prot: raise ConfigurationError( "Protections must specify either `lookback_period` or `lookback_period_candles`.\n" f"Please fix the protection {prot.get('method')}" @@ -201,10 +206,10 @@ def _validate_protections(conf: Dict[str, Any]) -> None: def _validate_ask_orderbook(conf: Dict[str, Any]) -> None: - ask_strategy = conf.get('exit_pricing', {}) - ob_min = ask_strategy.get('order_book_min') - ob_max = ask_strategy.get('order_book_max') - if ob_min is not None and ob_max is not None and ask_strategy.get('use_order_book'): + ask_strategy = conf.get("exit_pricing", {}) + ob_min = ask_strategy.get("order_book_min") + ob_max = ask_strategy.get("order_book_max") + if ob_min is not None and ob_max is not None and ask_strategy.get("use_order_book"): if ob_min != ob_max: raise ConfigurationError( "Using order_book_max != order_book_min in exit_pricing is no longer supported." @@ -212,7 +217,7 @@ def _validate_ask_orderbook(conf: Dict[str, Any]) -> None: ) else: # Move value to order_book_top - ask_strategy['order_book_top'] = ob_min + ask_strategy["order_book_top"] = ob_min logger.warning( "DEPRECATED: " "Please use `order_book_top` instead of `order_book_min` and `order_book_max` " @@ -221,7 +226,6 @@ def _validate_ask_orderbook(conf: Dict[str, Any]) -> None: def validate_migrated_strategy_settings(conf: Dict[str, Any]) -> None: - _validate_time_in_force(conf) _validate_order_types(conf) _validate_unfilledtimeout(conf) @@ -230,119 +234,129 @@ def validate_migrated_strategy_settings(conf: Dict[str, Any]) -> None: def _validate_time_in_force(conf: Dict[str, Any]) -> None: - - time_in_force = conf.get('order_time_in_force', {}) - if 'buy' in time_in_force or 'sell' in time_in_force: - if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: + time_in_force = conf.get("order_time_in_force", {}) + if "buy" in time_in_force or "sell" in time_in_force: + if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: raise ConfigurationError( - "Please migrate your time_in_force settings to use 'entry' and 'exit'.") + "Please migrate your time_in_force settings to use 'entry' and 'exit'." + ) else: logger.warning( "DEPRECATED: Using 'buy' and 'sell' for time_in_force is deprecated." "Please migrate your time_in_force settings to use 'entry' and 'exit'." ) process_deprecated_setting( - conf, 'order_time_in_force', 'buy', 'order_time_in_force', 'entry') + conf, "order_time_in_force", "buy", "order_time_in_force", "entry" + ) process_deprecated_setting( - conf, 'order_time_in_force', 'sell', 'order_time_in_force', 'exit') + conf, "order_time_in_force", "sell", "order_time_in_force", "exit" + ) def _validate_order_types(conf: Dict[str, Any]) -> None: - - order_types = conf.get('order_types', {}) - old_order_types = ['buy', 'sell', 'emergencysell', 'forcebuy', - 'forcesell', 'emergencyexit', 'forceexit', 'forceentry'] + order_types = conf.get("order_types", {}) + old_order_types = [ + "buy", + "sell", + "emergencysell", + "forcebuy", + "forcesell", + "emergencyexit", + "forceexit", + "forceentry", + ] if any(x in order_types for x in old_order_types): - if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: + if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: raise ConfigurationError( - "Please migrate your order_types settings to use the new wording.") + "Please migrate your order_types settings to use the new wording." + ) else: logger.warning( "DEPRECATED: Using 'buy' and 'sell' for order_types is deprecated." "Please migrate your order_types settings to use 'entry' and 'exit' wording." ) for o, n in [ - ('buy', 'entry'), - ('sell', 'exit'), - ('emergencysell', 'emergency_exit'), - ('forcesell', 'force_exit'), - ('forcebuy', 'force_entry'), - ('emergencyexit', 'emergency_exit'), - ('forceexit', 'force_exit'), - ('forceentry', 'force_entry'), + ("buy", "entry"), + ("sell", "exit"), + ("emergencysell", "emergency_exit"), + ("forcesell", "force_exit"), + ("forcebuy", "force_entry"), + ("emergencyexit", "emergency_exit"), + ("forceexit", "force_exit"), + ("forceentry", "force_entry"), ]: - - process_deprecated_setting(conf, 'order_types', o, 'order_types', n) + process_deprecated_setting(conf, "order_types", o, "order_types", n) def _validate_unfilledtimeout(conf: Dict[str, Any]) -> None: - unfilledtimeout = conf.get('unfilledtimeout', {}) - if any(x in unfilledtimeout for x in ['buy', 'sell']): - if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: + unfilledtimeout = conf.get("unfilledtimeout", {}) + if any(x in unfilledtimeout for x in ["buy", "sell"]): + if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: raise ConfigurationError( - "Please migrate your unfilledtimeout settings to use the new wording.") + "Please migrate your unfilledtimeout settings to use the new wording." + ) else: - logger.warning( "DEPRECATED: Using 'buy' and 'sell' for unfilledtimeout is deprecated." "Please migrate your unfilledtimeout settings to use 'entry' and 'exit' wording." ) for o, n in [ - ('buy', 'entry'), - ('sell', 'exit'), + ("buy", "entry"), + ("sell", "exit"), ]: - - process_deprecated_setting(conf, 'unfilledtimeout', o, 'unfilledtimeout', n) + process_deprecated_setting(conf, "unfilledtimeout", o, "unfilledtimeout", n) def _validate_pricing_rules(conf: Dict[str, Any]) -> None: - - if conf.get('ask_strategy') or conf.get('bid_strategy'): - if conf.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: - raise ConfigurationError( - "Please migrate your pricing settings to use the new wording.") + if conf.get("ask_strategy") or conf.get("bid_strategy"): + if conf.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: + raise ConfigurationError("Please migrate your pricing settings to use the new wording.") else: - logger.warning( "DEPRECATED: Using 'ask_strategy' and 'bid_strategy' is deprecated." "Please migrate your settings to use 'entry_pricing' and 'exit_pricing'." ) - conf['entry_pricing'] = {} - for obj in list(conf.get('bid_strategy', {}).keys()): - if obj == 'ask_last_balance': - process_deprecated_setting(conf, 'bid_strategy', obj, - 'entry_pricing', 'price_last_balance') + conf["entry_pricing"] = {} + for obj in list(conf.get("bid_strategy", {}).keys()): + if obj == "ask_last_balance": + process_deprecated_setting( + conf, "bid_strategy", obj, "entry_pricing", "price_last_balance" + ) else: - process_deprecated_setting(conf, 'bid_strategy', obj, 'entry_pricing', obj) - del conf['bid_strategy'] + process_deprecated_setting(conf, "bid_strategy", obj, "entry_pricing", obj) + del conf["bid_strategy"] - conf['exit_pricing'] = {} - for obj in list(conf.get('ask_strategy', {}).keys()): - if obj == 'bid_last_balance': - process_deprecated_setting(conf, 'ask_strategy', obj, - 'exit_pricing', 'price_last_balance') + conf["exit_pricing"] = {} + for obj in list(conf.get("ask_strategy", {}).keys()): + if obj == "bid_last_balance": + process_deprecated_setting( + conf, "ask_strategy", obj, "exit_pricing", "price_last_balance" + ) else: - process_deprecated_setting(conf, 'ask_strategy', obj, 'exit_pricing', obj) - del conf['ask_strategy'] + process_deprecated_setting(conf, "ask_strategy", obj, "exit_pricing", obj) + del conf["ask_strategy"] def _validate_freqai_hyperopt(conf: Dict[str, Any]) -> None: - freqai_enabled = conf.get('freqai', {}).get('enabled', False) - analyze_per_epoch = conf.get('analyze_per_epoch', False) + freqai_enabled = conf.get("freqai", {}).get("enabled", False) + analyze_per_epoch = conf.get("analyze_per_epoch", False) if analyze_per_epoch and freqai_enabled: raise ConfigurationError( - 'Using analyze-per-epoch parameter is not supported with a FreqAI strategy.') + "Using analyze-per-epoch parameter is not supported with a FreqAI strategy." + ) def _validate_freqai_include_timeframes(conf: Dict[str, Any], preliminary: bool) -> None: - freqai_enabled = conf.get('freqai', {}).get('enabled', False) + freqai_enabled = conf.get("freqai", {}).get("enabled", False) if freqai_enabled: - main_tf = conf.get('timeframe', '5m') - freqai_include_timeframes = conf.get('freqai', {}).get('feature_parameters', {} - ).get('include_timeframes', []) + main_tf = conf.get("timeframe", "5m") + freqai_include_timeframes = ( + conf.get("freqai", {}).get("feature_parameters", {}).get("include_timeframes", []) + ) from freqtrade.exchange import timeframe_to_seconds + main_tf_s = timeframe_to_seconds(main_tf) offending_lines = [] for tf in freqai_include_timeframes: @@ -352,57 +366,65 @@ def _validate_freqai_include_timeframes(conf: Dict[str, Any], preliminary: bool) if offending_lines: raise ConfigurationError( f"Main timeframe of {main_tf} must be smaller or equal to FreqAI " - f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}") + f"`include_timeframes`.Offending include-timeframes: {', '.join(offending_lines)}" + ) # Ensure that the base timeframe is included in the include_timeframes list if not preliminary and main_tf not in freqai_include_timeframes: - feature_parameters = conf.get('freqai', {}).get('feature_parameters', {}) + feature_parameters = conf.get("freqai", {}).get("feature_parameters", {}) include_timeframes = [main_tf] + freqai_include_timeframes - conf.get('freqai', {}).get('feature_parameters', {}) \ - .update({**feature_parameters, 'include_timeframes': include_timeframes}) + conf.get("freqai", {}).get("feature_parameters", {}).update( + {**feature_parameters, "include_timeframes": include_timeframes} + ) def _validate_freqai_backtest(conf: Dict[str, Any]) -> None: - if conf.get('runmode', RunMode.OTHER) == RunMode.BACKTEST: - freqai_enabled = conf.get('freqai', {}).get('enabled', False) - timerange = conf.get('timerange') - freqai_backtest_live_models = conf.get('freqai_backtest_live_models', False) + if conf.get("runmode", RunMode.OTHER) == RunMode.BACKTEST: + freqai_enabled = conf.get("freqai", {}).get("enabled", False) + timerange = conf.get("timerange") + freqai_backtest_live_models = conf.get("freqai_backtest_live_models", False) if freqai_backtest_live_models and freqai_enabled and timerange: raise ConfigurationError( - 'Using timerange parameter is not supported with ' - '--freqai-backtest-live-models parameter.') + "Using timerange parameter is not supported with " + "--freqai-backtest-live-models parameter." + ) if freqai_backtest_live_models and not freqai_enabled: raise ConfigurationError( - 'Using --freqai-backtest-live-models parameter is only ' - 'supported with a FreqAI strategy.') + "Using --freqai-backtest-live-models parameter is only " + "supported with a FreqAI strategy." + ) if freqai_enabled and not freqai_backtest_live_models and not timerange: raise ConfigurationError( - 'Please pass --timerange if you intend to use FreqAI for backtesting.') + "Please pass --timerange if you intend to use FreqAI for backtesting." + ) def _validate_consumers(conf: Dict[str, Any]) -> None: - emc_conf = conf.get('external_message_consumer', {}) - if emc_conf.get('enabled', False): - if len(emc_conf.get('producers', [])) < 1: + emc_conf = conf.get("external_message_consumer", {}) + if emc_conf.get("enabled", False): + if len(emc_conf.get("producers", [])) < 1: raise ConfigurationError("You must specify at least 1 Producer to connect to.") - producer_names = [p['name'] for p in emc_conf.get('producers', [])] + producer_names = [p["name"] for p in emc_conf.get("producers", [])] duplicates = [item for item, count in Counter(producer_names).items() if count > 1] if duplicates: raise ConfigurationError( - f"Producer names must be unique. Duplicate: {', '.join(duplicates)}") - if conf.get('process_only_new_candles', True): + f"Producer names must be unique. Duplicate: {', '.join(duplicates)}" + ) + if conf.get("process_only_new_candles", True): # Warning here or require it? - logger.warning("To receive best performance with external data, " - "please set `process_only_new_candles` to False") + logger.warning( + "To receive best performance with external data, " + "please set `process_only_new_candles` to False" + ) def _strategy_settings(conf: Dict[str, Any]) -> None: - - process_deprecated_setting(conf, None, 'use_sell_signal', None, 'use_exit_signal') - process_deprecated_setting(conf, None, 'sell_profit_only', None, 'exit_profit_only') - process_deprecated_setting(conf, None, 'sell_profit_offset', None, 'exit_profit_offset') - process_deprecated_setting(conf, None, 'ignore_roi_if_buy_signal', - None, 'ignore_roi_if_entry_signal') + process_deprecated_setting(conf, None, "use_sell_signal", None, "use_exit_signal") + process_deprecated_setting(conf, None, "sell_profit_only", None, "exit_profit_only") + process_deprecated_setting(conf, None, "sell_profit_offset", None, "exit_profit_offset") + process_deprecated_setting( + conf, None, "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal" + ) diff --git a/freqtrade/configuration/configuration.py b/freqtrade/configuration/configuration.py index 906d0a544..cc8b5407e 100644 --- a/freqtrade/configuration/configuration.py +++ b/freqtrade/configuration/configuration.py @@ -1,6 +1,7 @@ """ This module contains the configuration class """ + import logging import warnings from copy import deepcopy @@ -56,7 +57,7 @@ class Configuration: :return: configuration dictionary """ # Keep this method as staticmethod, so it can be used from interactive environments - c = Configuration({'config': files}, RunMode.OTHER) + c = Configuration({"config": files}, RunMode.OTHER) return c.get_config() def load_config(self) -> Dict[str, Any]: @@ -69,19 +70,20 @@ class Configuration: # Load environment variables from freqtrade.commands.arguments import NO_CONF_ALLOWED - if self.args.get('command') not in NO_CONF_ALLOWED: + + if self.args.get("command") not in NO_CONF_ALLOWED: env_data = enironment_vars_to_dict() config = deep_merge_dicts(env_data, config) # Normalize config - if 'internals' not in config: - config['internals'] = {} + if "internals" not in config: + config["internals"] = {} - if 'pairlists' not in config: - config['pairlists'] = [] + if "pairlists" not in config: + config["pairlists"] = [] # Keep a copy of the original configuration file - config['original_config'] = deepcopy(config) + config["original_config"] = deepcopy(config) self._process_logging_options(config) @@ -105,7 +107,7 @@ class Configuration: from freqtrade.exchange.check_exchange import check_exchange # Check if the exchange set by the user is supported - check_exchange(config, config.get('experimental', {}).get('block_bad_exchanges', True)) + check_exchange(config, config.get("experimental", {}).get("block_bad_exchanges", True)) self._resolve_pairs_list(config) @@ -119,52 +121,56 @@ class Configuration: the -v/--verbose, --logfile options """ # Log level - config.update({'verbosity': self.args.get('verbosity', 0)}) + config.update({"verbosity": self.args.get("verbosity", 0)}) - if 'logfile' in self.args and self.args['logfile']: - config.update({'logfile': self.args['logfile']}) + if "logfile" in self.args and self.args["logfile"]: + config.update({"logfile": self.args["logfile"]}) setup_logging(config) def _process_trading_options(self, config: Config) -> None: - if config['runmode'] not in TRADE_MODES: + if config["runmode"] not in TRADE_MODES: return - if config.get('dry_run', False): - logger.info('Dry run is enabled') - if config.get('db_url') in [None, constants.DEFAULT_DB_PROD_URL]: + if config.get("dry_run", False): + logger.info("Dry run is enabled") + if config.get("db_url") in [None, constants.DEFAULT_DB_PROD_URL]: # Default to in-memory db for dry_run if not specified - config['db_url'] = constants.DEFAULT_DB_DRYRUN_URL + config["db_url"] = constants.DEFAULT_DB_DRYRUN_URL else: - if not config.get('db_url'): - config['db_url'] = constants.DEFAULT_DB_PROD_URL - logger.info('Dry run is disabled') + if not config.get("db_url"): + config["db_url"] = constants.DEFAULT_DB_PROD_URL + logger.info("Dry run is disabled") logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"') def _process_common_options(self, config: Config) -> None: - # Set strategy if not specified in config and or if it's non default - if self.args.get('strategy') or not config.get('strategy'): - config.update({'strategy': self.args.get('strategy')}) + if self.args.get("strategy") or not config.get("strategy"): + config.update({"strategy": self.args.get("strategy")}) - self._args_to_config(config, argname='strategy_path', - logstring='Using additional Strategy lookup path: {}') + self._args_to_config( + config, argname="strategy_path", logstring="Using additional Strategy lookup path: {}" + ) - if ('db_url' in self.args and self.args['db_url'] and - self.args['db_url'] != constants.DEFAULT_DB_PROD_URL): - config.update({'db_url': self.args['db_url']}) - logger.info('Parameter --db-url detected ...') + if ( + "db_url" in self.args + and self.args["db_url"] + and self.args["db_url"] != constants.DEFAULT_DB_PROD_URL + ): + config.update({"db_url": self.args["db_url"]}) + logger.info("Parameter --db-url detected ...") - self._args_to_config(config, argname='db_url_from', - logstring='Parameter --db-url-from detected ...') + self._args_to_config( + config, argname="db_url_from", logstring="Parameter --db-url-from detected ..." + ) - if config.get('force_entry_enable', False): - logger.warning('`force_entry_enable` RPC message enabled.') + if config.get("force_entry_enable", False): + logger.warning("`force_entry_enable` RPC message enabled.") # Support for sd_notify - if 'sd_notify' in self.args and self.args['sd_notify']: - config['internals'].update({'sd_notify': True}) + if "sd_notify" in self.args and self.args["sd_notify"]: + config["internals"].update({"sd_notify": True}) def _process_datadir_options(self, config: Config) -> None: """ @@ -172,245 +178,275 @@ class Configuration: --user-data, --datadir """ # Check exchange parameter here - otherwise `datadir` might be wrong. - if 'exchange' in self.args and self.args['exchange']: - config['exchange']['name'] = self.args['exchange'] + if "exchange" in self.args and self.args["exchange"]: + config["exchange"]["name"] = self.args["exchange"] logger.info(f"Using exchange {config['exchange']['name']}") - if 'pair_whitelist' not in config['exchange']: - config['exchange']['pair_whitelist'] = [] + if "pair_whitelist" not in config["exchange"]: + config["exchange"]["pair_whitelist"] = [] - if 'user_data_dir' in self.args and self.args['user_data_dir']: - config.update({'user_data_dir': self.args['user_data_dir']}) - elif 'user_data_dir' not in config: + if "user_data_dir" in self.args and self.args["user_data_dir"]: + config.update({"user_data_dir": self.args["user_data_dir"]}) + elif "user_data_dir" not in config: # Default to cwd/user_data (legacy option ...) - config.update({'user_data_dir': str(Path.cwd() / 'user_data')}) + config.update({"user_data_dir": str(Path.cwd() / "user_data")}) # reset to user_data_dir so this contains the absolute path. - config['user_data_dir'] = create_userdata_dir(config['user_data_dir'], create_dir=False) - logger.info('Using user-data directory: %s ...', config['user_data_dir']) + config["user_data_dir"] = create_userdata_dir(config["user_data_dir"], create_dir=False) + logger.info("Using user-data directory: %s ...", config["user_data_dir"]) - config.update({'datadir': create_datadir(config, self.args.get('datadir'))}) - logger.info('Using data directory: %s ...', config.get('datadir')) + config.update({"datadir": create_datadir(config, self.args.get("datadir"))}) + logger.info("Using data directory: %s ...", config.get("datadir")) - if self.args.get('exportfilename'): - self._args_to_config(config, argname='exportfilename', - logstring='Storing backtest results to {} ...') - config['exportfilename'] = Path(config['exportfilename']) + if self.args.get("exportfilename"): + self._args_to_config( + config, argname="exportfilename", logstring="Storing backtest results to {} ..." + ) + config["exportfilename"] = Path(config["exportfilename"]) else: - config['exportfilename'] = (config['user_data_dir'] - / 'backtest_results') + config["exportfilename"] = config["user_data_dir"] / "backtest_results" - if self.args.get('show_sensitive'): + if self.args.get("show_sensitive"): logger.warning( "Sensitive information will be shown in the upcoming output. " "Please make sure to never share this output without redacting " - "the information yourself.") + "the information yourself." + ) def _process_optimize_options(self, config: Config) -> None: - # This will override the strategy configuration - self._args_to_config(config, argname='timeframe', - logstring='Parameter -i/--timeframe detected ... ' - 'Using timeframe: {} ...') - - self._args_to_config(config, argname='position_stacking', - logstring='Parameter --enable-position-stacking detected ...') + self._args_to_config( + config, + argname="timeframe", + logstring="Parameter -i/--timeframe detected ... Using timeframe: {} ...", + ) self._args_to_config( - config, argname='enable_protections', - logstring='Parameter --enable-protections detected, enabling Protections. ...') + config, + argname="position_stacking", + logstring="Parameter --enable-position-stacking detected ...", + ) - if 'use_max_market_positions' in self.args and not self.args["use_max_market_positions"]: - config.update({'use_max_market_positions': False}) - logger.info('Parameter --disable-max-market-positions detected ...') - logger.info('max_open_trades set to unlimited ...') - elif 'max_open_trades' in self.args and self.args['max_open_trades']: - config.update({'max_open_trades': self.args['max_open_trades']}) - logger.info('Parameter --max-open-trades detected, ' - 'overriding max_open_trades to: %s ...', config.get('max_open_trades')) - elif config['runmode'] in NON_UTIL_MODES: - logger.info('Using max_open_trades: %s ...', config.get('max_open_trades')) + self._args_to_config( + config, + argname="enable_protections", + logstring="Parameter --enable-protections detected, enabling Protections. ...", + ) + + if "use_max_market_positions" in self.args and not self.args["use_max_market_positions"]: + config.update({"use_max_market_positions": False}) + logger.info("Parameter --disable-max-market-positions detected ...") + logger.info("max_open_trades set to unlimited ...") + elif "max_open_trades" in self.args and self.args["max_open_trades"]: + config.update({"max_open_trades": self.args["max_open_trades"]}) + logger.info( + "Parameter --max-open-trades detected, overriding max_open_trades to: %s ...", + config.get("max_open_trades"), + ) + elif config["runmode"] in NON_UTIL_MODES: + logger.info("Using max_open_trades: %s ...", config.get("max_open_trades")) # Setting max_open_trades to infinite if -1 - if config.get('max_open_trades') == -1: - config['max_open_trades'] = float('inf') + if config.get("max_open_trades") == -1: + config["max_open_trades"] = float("inf") - if self.args.get('stake_amount'): + if self.args.get("stake_amount"): # Convert explicitly to float to support CLI argument for both unlimited and value try: - self.args['stake_amount'] = float(self.args['stake_amount']) + self.args["stake_amount"] = float(self.args["stake_amount"]) except ValueError: pass configurations = [ - ('timeframe_detail', - 'Parameter --timeframe-detail detected, using {} for intra-candle backtesting ...'), - ('backtest_show_pair_list', 'Parameter --show-pair-list detected.'), - ('stake_amount', - 'Parameter --stake-amount detected, overriding stake_amount to: {} ...'), - ('dry_run_wallet', - 'Parameter --dry-run-wallet detected, overriding dry_run_wallet to: {} ...'), - ('fee', 'Parameter --fee detected, setting fee to: {} ...'), - ('timerange', 'Parameter --timerange detected: {} ...'), - ] + ( + "timeframe_detail", + "Parameter --timeframe-detail detected, using {} for intra-candle backtesting ...", + ), + ("backtest_show_pair_list", "Parameter --show-pair-list detected."), + ( + "stake_amount", + "Parameter --stake-amount detected, overriding stake_amount to: {} ...", + ), + ( + "dry_run_wallet", + "Parameter --dry-run-wallet detected, overriding dry_run_wallet to: {} ...", + ), + ("fee", "Parameter --fee detected, setting fee to: {} ..."), + ("timerange", "Parameter --timerange detected: {} ..."), + ] self._args_to_config_loop(config, configurations) self._process_datadir_options(config) - self._args_to_config(config, argname='strategy_list', - logstring='Using strategy list of {} strategies', logfun=len) + self._args_to_config( + config, + argname="strategy_list", + logstring="Using strategy list of {} strategies", + logfun=len, + ) configurations = [ - ('recursive_strategy_search', - 'Recursively searching for a strategy in the strategies folder.'), - ('timeframe', 'Overriding timeframe with Command line argument'), - ('export', 'Parameter --export detected: {} ...'), - ('backtest_breakdown', 'Parameter --breakdown detected ...'), - ('backtest_cache', 'Parameter --cache={} detected ...'), - ('disableparamexport', 'Parameter --disableparamexport detected: {} ...'), - ('freqai_backtest_live_models', - 'Parameter --freqai-backtest-live-models detected ...'), + ( + "recursive_strategy_search", + "Recursively searching for a strategy in the strategies folder.", + ), + ("timeframe", "Overriding timeframe with Command line argument"), + ("export", "Parameter --export detected: {} ..."), + ("backtest_breakdown", "Parameter --breakdown detected ..."), + ("backtest_cache", "Parameter --cache={} detected ..."), + ("disableparamexport", "Parameter --disableparamexport detected: {} ..."), + ("freqai_backtest_live_models", "Parameter --freqai-backtest-live-models detected ..."), ] self._args_to_config_loop(config, configurations) # Edge section: - if 'stoploss_range' in self.args and self.args["stoploss_range"]: + if "stoploss_range" in self.args and self.args["stoploss_range"]: txt_range = eval(self.args["stoploss_range"]) - config['edge'].update({'stoploss_range_min': txt_range[0]}) - config['edge'].update({'stoploss_range_max': txt_range[1]}) - config['edge'].update({'stoploss_range_step': txt_range[2]}) - logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"]) + config["edge"].update({"stoploss_range_min": txt_range[0]}) + config["edge"].update({"stoploss_range_max": txt_range[1]}) + config["edge"].update({"stoploss_range_step": txt_range[2]}) + logger.info("Parameter --stoplosses detected: %s ...", self.args["stoploss_range"]) # Hyperopt section configurations = [ - ('hyperopt', 'Using Hyperopt class name: {}'), - ('hyperopt_path', 'Using additional Hyperopt lookup path: {}'), - ('hyperoptexportfilename', 'Using hyperopt file: {}'), - ('lookahead_analysis_exportfilename', 'Saving lookahead analysis results into {} ...'), - ('epochs', 'Parameter --epochs detected ... Will run Hyperopt with for {} epochs ...'), - ('spaces', 'Parameter -s/--spaces detected: {}'), - ('analyze_per_epoch', 'Parameter --analyze-per-epoch detected.'), - ('print_all', 'Parameter --print-all detected ...'), + ("hyperopt", "Using Hyperopt class name: {}"), + ("hyperopt_path", "Using additional Hyperopt lookup path: {}"), + ("hyperoptexportfilename", "Using hyperopt file: {}"), + ("lookahead_analysis_exportfilename", "Saving lookahead analysis results into {} ..."), + ("epochs", "Parameter --epochs detected ... Will run Hyperopt with for {} epochs ..."), + ("spaces", "Parameter -s/--spaces detected: {}"), + ("analyze_per_epoch", "Parameter --analyze-per-epoch detected."), + ("print_all", "Parameter --print-all detected ..."), ] self._args_to_config_loop(config, configurations) - if 'print_colorized' in self.args and not self.args["print_colorized"]: - logger.info('Parameter --no-color detected ...') - config.update({'print_colorized': False}) + if "print_colorized" in self.args and not self.args["print_colorized"]: + logger.info("Parameter --no-color detected ...") + config.update({"print_colorized": False}) else: - config.update({'print_colorized': True}) + config.update({"print_colorized": True}) configurations = [ - ('print_json', 'Parameter --print-json detected ...'), - ('export_csv', 'Parameter --export-csv detected: {}'), - ('hyperopt_jobs', 'Parameter -j/--job-workers detected: {}'), - ('hyperopt_random_state', 'Parameter --random-state detected: {}'), - ('hyperopt_min_trades', 'Parameter --min-trades detected: {}'), - ('hyperopt_loss', 'Using Hyperopt loss class name: {}'), - ('hyperopt_show_index', 'Parameter -n/--index detected: {}'), - ('hyperopt_list_best', 'Parameter --best detected: {}'), - ('hyperopt_list_profitable', 'Parameter --profitable detected: {}'), - ('hyperopt_list_min_trades', 'Parameter --min-trades detected: {}'), - ('hyperopt_list_max_trades', 'Parameter --max-trades detected: {}'), - ('hyperopt_list_min_avg_time', 'Parameter --min-avg-time detected: {}'), - ('hyperopt_list_max_avg_time', 'Parameter --max-avg-time detected: {}'), - ('hyperopt_list_min_avg_profit', 'Parameter --min-avg-profit detected: {}'), - ('hyperopt_list_max_avg_profit', 'Parameter --max-avg-profit detected: {}'), - ('hyperopt_list_min_total_profit', 'Parameter --min-total-profit detected: {}'), - ('hyperopt_list_max_total_profit', 'Parameter --max-total-profit detected: {}'), - ('hyperopt_list_min_objective', 'Parameter --min-objective detected: {}'), - ('hyperopt_list_max_objective', 'Parameter --max-objective detected: {}'), - ('hyperopt_list_no_details', 'Parameter --no-details detected: {}'), - ('hyperopt_show_no_header', 'Parameter --no-header detected: {}'), - ('hyperopt_ignore_missing_space', 'Paramter --ignore-missing-space detected: {}'), + ("print_json", "Parameter --print-json detected ..."), + ("export_csv", "Parameter --export-csv detected: {}"), + ("hyperopt_jobs", "Parameter -j/--job-workers detected: {}"), + ("hyperopt_random_state", "Parameter --random-state detected: {}"), + ("hyperopt_min_trades", "Parameter --min-trades detected: {}"), + ("hyperopt_loss", "Using Hyperopt loss class name: {}"), + ("hyperopt_show_index", "Parameter -n/--index detected: {}"), + ("hyperopt_list_best", "Parameter --best detected: {}"), + ("hyperopt_list_profitable", "Parameter --profitable detected: {}"), + ("hyperopt_list_min_trades", "Parameter --min-trades detected: {}"), + ("hyperopt_list_max_trades", "Parameter --max-trades detected: {}"), + ("hyperopt_list_min_avg_time", "Parameter --min-avg-time detected: {}"), + ("hyperopt_list_max_avg_time", "Parameter --max-avg-time detected: {}"), + ("hyperopt_list_min_avg_profit", "Parameter --min-avg-profit detected: {}"), + ("hyperopt_list_max_avg_profit", "Parameter --max-avg-profit detected: {}"), + ("hyperopt_list_min_total_profit", "Parameter --min-total-profit detected: {}"), + ("hyperopt_list_max_total_profit", "Parameter --max-total-profit detected: {}"), + ("hyperopt_list_min_objective", "Parameter --min-objective detected: {}"), + ("hyperopt_list_max_objective", "Parameter --max-objective detected: {}"), + ("hyperopt_list_no_details", "Parameter --no-details detected: {}"), + ("hyperopt_show_no_header", "Parameter --no-header detected: {}"), + ("hyperopt_ignore_missing_space", "Parameter --ignore-missing-space detected: {}"), ] self._args_to_config_loop(config, configurations) def _process_plot_options(self, config: Config) -> None: - configurations = [ - ('pairs', 'Using pairs {}'), - ('indicators1', 'Using indicators1: {}'), - ('indicators2', 'Using indicators2: {}'), - ('trade_ids', 'Filtering on trade_ids: {}'), - ('plot_limit', 'Limiting plot to: {}'), - ('plot_auto_open', 'Parameter --auto-open detected.'), - ('trade_source', 'Using trades from: {}'), - ('prepend_data', 'Prepend detected. Allowing data prepending.'), - ('erase', 'Erase detected. Deleting existing data.'), - ('no_trades', 'Parameter --no-trades detected.'), - ('timeframes', 'timeframes --timeframes: {}'), - ('days', 'Detected --days: {}'), - ('include_inactive', 'Detected --include-inactive-pairs: {}'), - ('download_trades', 'Detected --dl-trades: {}'), - ('dataformat_ohlcv', 'Using "{}" to store OHLCV data.'), - ('dataformat_trades', 'Using "{}" to store trades data.'), - ('show_timerange', 'Detected --show-timerange'), + ("pairs", "Using pairs {}"), + ("indicators1", "Using indicators1: {}"), + ("indicators2", "Using indicators2: {}"), + ("trade_ids", "Filtering on trade_ids: {}"), + ("plot_limit", "Limiting plot to: {}"), + ("plot_auto_open", "Parameter --auto-open detected."), + ("trade_source", "Using trades from: {}"), + ("prepend_data", "Prepend detected. Allowing data prepending."), + ("erase", "Erase detected. Deleting existing data."), + ("no_trades", "Parameter --no-trades detected."), + ("timeframes", "timeframes --timeframes: {}"), + ("days", "Detected --days: {}"), + ("include_inactive", "Detected --include-inactive-pairs: {}"), + ("download_trades", "Detected --dl-trades: {}"), + ("convert_trades", "Detected --convert: {} - Converting Trade data to OHCV {}"), + ("dataformat_ohlcv", 'Using "{}" to store OHLCV data.'), + ("dataformat_trades", 'Using "{}" to store trades data.'), + ("show_timerange", "Detected --show-timerange"), ] self._args_to_config_loop(config, configurations) def _process_data_options(self, config: Config) -> None: - self._args_to_config(config, argname='new_pairs_days', - logstring='Detected --new-pairs-days: {}') - self._args_to_config(config, argname='trading_mode', - logstring='Detected --trading-mode: {}') - config['candle_type_def'] = CandleType.get_default( - config.get('trading_mode', 'spot') or 'spot') - config['trading_mode'] = TradingMode(config.get('trading_mode', 'spot') or 'spot') - self._args_to_config(config, argname='candle_types', - logstring='Detected --candle-types: {}') + self._args_to_config( + config, argname="new_pairs_days", logstring="Detected --new-pairs-days: {}" + ) + self._args_to_config( + config, argname="trading_mode", logstring="Detected --trading-mode: {}" + ) + config["candle_type_def"] = CandleType.get_default( + config.get("trading_mode", "spot") or "spot" + ) + config["trading_mode"] = TradingMode(config.get("trading_mode", "spot") or "spot") + self._args_to_config( + config, argname="candle_types", logstring="Detected --candle-types: {}" + ) def _process_analyze_options(self, config: Config) -> None: configurations = [ - ('analysis_groups', 'Analysis reason groups: {}'), - ('enter_reason_list', 'Analysis enter tag list: {}'), - ('exit_reason_list', 'Analysis exit tag list: {}'), - ('indicator_list', 'Analysis indicator list: {}'), - ('timerange', 'Filter trades by timerange: {}'), - ('analysis_rejected', 'Analyse rejected signals: {}'), - ('analysis_to_csv', 'Store analysis tables to CSV: {}'), - ('analysis_csv_path', 'Path to store analysis CSVs: {}'), + ("analysis_groups", "Analysis reason groups: {}"), + ("enter_reason_list", "Analysis enter tag list: {}"), + ("exit_reason_list", "Analysis exit tag list: {}"), + ("indicator_list", "Analysis indicator list: {}"), + ("timerange", "Filter trades by timerange: {}"), + ("analysis_rejected", "Analyse rejected signals: {}"), + ("analysis_to_csv", "Store analysis tables to CSV: {}"), + ("analysis_csv_path", "Path to store analysis CSVs: {}"), # Lookahead analysis results - ('targeted_trade_amount', 'Targeted Trade amount: {}'), - ('minimum_trade_amount', 'Minimum Trade amount: {}'), - ('lookahead_analysis_exportfilename', 'Path to store lookahead-analysis-results: {}'), - ('startup_candle', 'Startup candle to be used on recursive analysis: {}'), + ("targeted_trade_amount", "Targeted Trade amount: {}"), + ("minimum_trade_amount", "Minimum Trade amount: {}"), + ("lookahead_analysis_exportfilename", "Path to store lookahead-analysis-results: {}"), + ("startup_candle", "Startup candle to be used on recursive analysis: {}"), ] self._args_to_config_loop(config, configurations) def _args_to_config_loop(self, config, configurations: List[Tuple[str, str]]) -> None: - for argname, logstring in configurations: self._args_to_config(config, argname=argname, logstring=logstring) def _process_runmode(self, config: Config) -> None: - - self._args_to_config(config, argname='dry_run', - logstring='Parameter --dry-run detected, ' - 'overriding dry_run to: {} ...') + self._args_to_config( + config, + argname="dry_run", + logstring="Parameter --dry-run detected, overriding dry_run to: {} ...", + ) if not self.runmode: # Handle real mode, infer dry/live from config - self.runmode = RunMode.DRY_RUN if config.get('dry_run', True) else RunMode.LIVE + self.runmode = RunMode.DRY_RUN if config.get("dry_run", True) else RunMode.LIVE logger.info(f"Runmode set to {self.runmode.value}.") - config.update({'runmode': self.runmode}) + config.update({"runmode": self.runmode}) def _process_freqai_options(self, config: Config) -> None: + self._args_to_config( + config, argname="freqaimodel", logstring="Using freqaimodel class name: {}" + ) - self._args_to_config(config, argname='freqaimodel', - logstring='Using freqaimodel class name: {}') - - self._args_to_config(config, argname='freqaimodel_path', - logstring='Using freqaimodel path: {}') + self._args_to_config( + config, argname="freqaimodel_path", logstring="Using freqaimodel path: {}" + ) return - def _args_to_config(self, config: Config, argname: str, - logstring: str, logfun: Optional[Callable] = None, - deprecated_msg: Optional[str] = None) -> None: + def _args_to_config( + self, + config: Config, + argname: str, + logstring: str, + logfun: Optional[Callable] = None, + deprecated_msg: Optional[str] = None, + ) -> None: """ :param config: Configuration dictionary :param argname: Argumentname in self.args - will be copied to config dict. @@ -420,9 +456,11 @@ class Configuration: sample: logfun=len (prints the length of the found configuration instead of the content) """ - if (argname in self.args and self.args[argname] is not None - and self.args[argname] is not False): - + if ( + argname in self.args + and self.args[argname] is not None + and self.args[argname] is not False + ): config.update({argname: self.args[argname]}) if logfun: logger.info(logstring.format(logfun(config[argname]))) @@ -441,7 +479,7 @@ class Configuration: """ if "pairs" in config: - config['exchange']['pair_whitelist'] = config['pairs'] + config["exchange"]["pair_whitelist"] = config["pairs"] return if "pairs_file" in self.args and self.args["pairs_file"]: @@ -451,19 +489,19 @@ class Configuration: # or if pairs file is specified explicitly if not pairs_file.exists(): raise OperationalException(f'No pairs file found with path "{pairs_file}".') - config['pairs'] = load_file(pairs_file) - if isinstance(config['pairs'], list): - config['pairs'].sort() + config["pairs"] = load_file(pairs_file) + if isinstance(config["pairs"], list): + config["pairs"].sort() return - if 'config' in self.args and self.args['config']: + if "config" in self.args and self.args["config"]: logger.info("Using pairlist from configuration.") - config['pairs'] = config.get('exchange', {}).get('pair_whitelist') + config["pairs"] = config.get("exchange", {}).get("pair_whitelist") else: # Fall back to /dl_path/pairs.json - pairs_file = config['datadir'] / 'pairs.json' + pairs_file = config["datadir"] / "pairs.json" if pairs_file.exists(): logger.info(f'Reading pairs file "{pairs_file}".') - config['pairs'] = load_file(pairs_file) - if 'pairs' in config and isinstance(config['pairs'], list): - config['pairs'].sort() + config["pairs"] = load_file(pairs_file) + if "pairs" in config and isinstance(config["pairs"], list): + config["pairs"].sort() diff --git a/freqtrade/configuration/deprecated_settings.py b/freqtrade/configuration/deprecated_settings.py index 6a2d365a3..6a0901ed7 100644 --- a/freqtrade/configuration/deprecated_settings.py +++ b/freqtrade/configuration/deprecated_settings.py @@ -12,9 +12,13 @@ from freqtrade.exceptions import ConfigurationError, OperationalException logger = logging.getLogger(__name__) -def check_conflicting_settings(config: Config, - section_old: Optional[str], name_old: str, - section_new: Optional[str], name_new: str) -> None: +def check_conflicting_settings( + config: Config, + section_old: Optional[str], + name_old: str, + section_new: Optional[str], + name_new: str, +) -> None: section_new_config = config.get(section_new, {}) if section_new else config section_old_config = config.get(section_old, {}) if section_old else config if name_new in section_new_config and name_old in section_old_config: @@ -29,9 +33,9 @@ def check_conflicting_settings(config: Config, ) -def process_removed_setting(config: Config, - section1: str, name1: str, - section2: Optional[str], name2: str) -> None: +def process_removed_setting( + config: Config, section1: str, name1: str, section2: Optional[str], name2: str +) -> None: """ :param section1: Removed section :param name1: Removed setting name @@ -48,10 +52,13 @@ def process_removed_setting(config: Config, ) -def process_deprecated_setting(config: Config, - section_old: Optional[str], name_old: str, - section_new: Optional[str], name_new: str - ) -> None: +def process_deprecated_setting( + config: Config, + section_old: Optional[str], + name_old: str, + section_new: Optional[str], + name_new: str, +) -> None: check_conflicting_settings(config, section_old, name_old, section_new, name_new) section_old_config = config.get(section_old, {}) if section_old else config @@ -71,57 +78,91 @@ def process_deprecated_setting(config: Config, def process_temporary_deprecated_settings(config: Config) -> None: - # Kept for future deprecated / moved settings # check_conflicting_settings(config, 'ask_strategy', 'use_sell_signal', # 'experimental', 'use_sell_signal') - process_deprecated_setting(config, 'ask_strategy', 'ignore_buying_expired_candle_after', - None, 'ignore_buying_expired_candle_after') + process_deprecated_setting( + config, + "ask_strategy", + "ignore_buying_expired_candle_after", + None, + "ignore_buying_expired_candle_after", + ) - process_deprecated_setting(config, None, 'forcebuy_enable', None, 'force_entry_enable') + process_deprecated_setting(config, None, "forcebuy_enable", None, "force_entry_enable") # New settings - if config.get('telegram'): - process_deprecated_setting(config['telegram'], 'notification_settings', 'sell', - 'notification_settings', 'exit') - process_deprecated_setting(config['telegram'], 'notification_settings', 'sell_fill', - 'notification_settings', 'exit_fill') - process_deprecated_setting(config['telegram'], 'notification_settings', 'sell_cancel', - 'notification_settings', 'exit_cancel') - process_deprecated_setting(config['telegram'], 'notification_settings', 'buy', - 'notification_settings', 'entry') - process_deprecated_setting(config['telegram'], 'notification_settings', 'buy_fill', - 'notification_settings', 'entry_fill') - process_deprecated_setting(config['telegram'], 'notification_settings', 'buy_cancel', - 'notification_settings', 'entry_cancel') - if config.get('webhook'): - process_deprecated_setting(config, 'webhook', 'webhookbuy', 'webhook', 'webhookentry') - process_deprecated_setting(config, 'webhook', 'webhookbuycancel', - 'webhook', 'webhookentrycancel') - process_deprecated_setting(config, 'webhook', 'webhookbuyfill', - 'webhook', 'webhookentryfill') - process_deprecated_setting(config, 'webhook', 'webhooksell', 'webhook', 'webhookexit') - process_deprecated_setting(config, 'webhook', 'webhooksellcancel', - 'webhook', 'webhookexitcancel') - process_deprecated_setting(config, 'webhook', 'webhooksellfill', - 'webhook', 'webhookexitfill') + if config.get("telegram"): + process_deprecated_setting( + config["telegram"], "notification_settings", "sell", "notification_settings", "exit" + ) + process_deprecated_setting( + config["telegram"], + "notification_settings", + "sell_fill", + "notification_settings", + "exit_fill", + ) + process_deprecated_setting( + config["telegram"], + "notification_settings", + "sell_cancel", + "notification_settings", + "exit_cancel", + ) + process_deprecated_setting( + config["telegram"], "notification_settings", "buy", "notification_settings", "entry" + ) + process_deprecated_setting( + config["telegram"], + "notification_settings", + "buy_fill", + "notification_settings", + "entry_fill", + ) + process_deprecated_setting( + config["telegram"], + "notification_settings", + "buy_cancel", + "notification_settings", + "entry_cancel", + ) + if config.get("webhook"): + process_deprecated_setting(config, "webhook", "webhookbuy", "webhook", "webhookentry") + process_deprecated_setting( + config, "webhook", "webhookbuycancel", "webhook", "webhookentrycancel" + ) + process_deprecated_setting( + config, "webhook", "webhookbuyfill", "webhook", "webhookentryfill" + ) + process_deprecated_setting(config, "webhook", "webhooksell", "webhook", "webhookexit") + process_deprecated_setting( + config, "webhook", "webhooksellcancel", "webhook", "webhookexitcancel" + ) + process_deprecated_setting( + config, "webhook", "webhooksellfill", "webhook", "webhookexitfill" + ) # Legacy way - having them in experimental ... - process_removed_setting(config, 'experimental', 'use_sell_signal', None, 'use_exit_signal') - process_removed_setting(config, 'experimental', 'sell_profit_only', None, 'exit_profit_only') - process_removed_setting(config, 'experimental', 'ignore_roi_if_buy_signal', - None, 'ignore_roi_if_entry_signal') + process_removed_setting(config, "experimental", "use_sell_signal", None, "use_exit_signal") + process_removed_setting(config, "experimental", "sell_profit_only", None, "exit_profit_only") + process_removed_setting( + config, "experimental", "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal" + ) - process_removed_setting(config, 'ask_strategy', 'use_sell_signal', None, 'use_exit_signal') - process_removed_setting(config, 'ask_strategy', 'sell_profit_only', None, 'exit_profit_only') - process_removed_setting(config, 'ask_strategy', 'sell_profit_offset', - None, 'exit_profit_offset') - process_removed_setting(config, 'ask_strategy', 'ignore_roi_if_buy_signal', - None, 'ignore_roi_if_entry_signal') - if (config.get('edge', {}).get('enabled', False) - and 'capital_available_percentage' in config.get('edge', {})): + process_removed_setting(config, "ask_strategy", "use_sell_signal", None, "use_exit_signal") + process_removed_setting(config, "ask_strategy", "sell_profit_only", None, "exit_profit_only") + process_removed_setting( + config, "ask_strategy", "sell_profit_offset", None, "exit_profit_offset" + ) + process_removed_setting( + config, "ask_strategy", "ignore_roi_if_buy_signal", None, "ignore_roi_if_entry_signal" + ) + if config.get("edge", {}).get( + "enabled", False + ) and "capital_available_percentage" in config.get("edge", {}): raise ConfigurationError( "DEPRECATED: " "Using 'edge.capital_available_percentage' has been deprecated in favor of " @@ -129,12 +170,11 @@ def process_temporary_deprecated_settings(config: Config) -> None: "'tradable_balance_ratio' and remove 'capital_available_percentage' " "from the edge configuration." ) - if 'ticker_interval' in config: - + if "ticker_interval" in config: raise ConfigurationError( "DEPRECATED: 'ticker_interval' detected. " "Please use 'timeframe' instead of 'ticker_interval." ) - if 'protections' in config: + if "protections" in config: logger.warning("DEPRECATED: Setting 'protections' in the configuration is deprecated.") diff --git a/freqtrade/configuration/detect_environment.py b/freqtrade/configuration/detect_environment.py index 99d585e87..1f9185548 100644 --- a/freqtrade/configuration/detect_environment.py +++ b/freqtrade/configuration/detect_environment.py @@ -5,4 +5,4 @@ def running_in_docker() -> bool: """ Check if we are running in a docker container """ - return os.environ.get('FT_APP_ENV') == 'docker' + return os.environ.get("FT_APP_ENV") == "docker" diff --git a/freqtrade/configuration/directory_operations.py b/freqtrade/configuration/directory_operations.py index 267a74928..99d72dabe 100644 --- a/freqtrade/configuration/directory_operations.py +++ b/freqtrade/configuration/directory_operations.py @@ -4,8 +4,14 @@ from pathlib import Path from typing import Optional from freqtrade.configuration.detect_environment import running_in_docker -from freqtrade.constants import (USER_DATA_FILES, USERPATH_FREQAIMODELS, USERPATH_HYPEROPTS, - USERPATH_NOTEBOOKS, USERPATH_STRATEGIES, Config) +from freqtrade.constants import ( + USER_DATA_FILES, + USERPATH_FREQAIMODELS, + USERPATH_HYPEROPTS, + USERPATH_NOTEBOOKS, + USERPATH_STRATEGIES, + Config, +) from freqtrade.exceptions import OperationalException @@ -13,16 +19,15 @@ logger = logging.getLogger(__name__) def create_datadir(config: Config, datadir: Optional[str] = None) -> Path: - folder = Path(datadir) if datadir else Path(f"{config['user_data_dir']}/data") if not datadir: # set datadir - exchange_name = config.get('exchange', {}).get('name', '').lower() + exchange_name = config.get("exchange", {}).get("name", "").lower() folder = folder.joinpath(exchange_name) if not folder.is_dir(): folder.mkdir(parents=True) - logger.info(f'Created data directory: {datadir}') + logger.info(f"Created data directory: {datadir}") return folder @@ -34,8 +39,8 @@ def chown_user_directory(directory: Path) -> None: if running_in_docker(): try: import subprocess - subprocess.check_output( - ['sudo', 'chown', '-R', 'ftuser:', str(directory.resolve())]) + + subprocess.check_output(["sudo", "chown", "-R", "ftuser:", str(directory.resolve())]) except Exception: logger.warning(f"Could not chown {directory}") @@ -50,18 +55,28 @@ def create_userdata_dir(directory: str, create_dir: bool = False) -> Path: :param create_dir: Create directory if it does not exist. :return: Path object containing the directory """ - sub_dirs = ["backtest_results", "data", USERPATH_HYPEROPTS, "hyperopt_results", "logs", - USERPATH_NOTEBOOKS, "plot", USERPATH_STRATEGIES, USERPATH_FREQAIMODELS] + sub_dirs = [ + "backtest_results", + "data", + USERPATH_HYPEROPTS, + "hyperopt_results", + "logs", + USERPATH_NOTEBOOKS, + "plot", + USERPATH_STRATEGIES, + USERPATH_FREQAIMODELS, + ] folder = Path(directory) chown_user_directory(folder) if not folder.is_dir(): if create_dir: folder.mkdir(parents=True) - logger.info(f'Created user-data directory: {folder}') + logger.info(f"Created user-data directory: {folder}") else: raise OperationalException( f"Directory `{folder}` does not exist. " - "Please use `freqtrade create-userdir` to create a user directory") + "Please use `freqtrade create-userdir` to create a user directory" + ) # Create required subdirectories for f in sub_dirs: diff --git a/freqtrade/configuration/environment_vars.py b/freqtrade/configuration/environment_vars.py index b59b10fa2..0830f3df7 100644 --- a/freqtrade/configuration/environment_vars.py +++ b/freqtrade/configuration/environment_vars.py @@ -16,9 +16,9 @@ def _get_var_typed(val): try: return float(val) except ValueError: - if val.lower() in ('t', 'true'): + if val.lower() in ("t", "true"): return True - elif val.lower() in ('f', 'false'): + elif val.lower() in ("f", "false"): return False # keep as string return val @@ -32,16 +32,21 @@ def _flat_vars_to_nested_dict(env_dict: Dict[str, Any], prefix: str) -> Dict[str :param prefix: Prefix to consider (usually FREQTRADE__) :return: Nested dict based on available and relevant variables. """ - no_convert = ['CHAT_ID', 'PASSWORD'] + no_convert = ["CHAT_ID", "PASSWORD"] relevant_vars: Dict[str, Any] = {} for env_var, val in sorted(env_dict.items()): if env_var.startswith(prefix): logger.info(f"Loading variable '{env_var}'") - key = env_var.replace(prefix, '') - for k in reversed(key.split('__')): - val = {k.lower(): _get_var_typed(val) - if not isinstance(val, dict) and k not in no_convert else val} + key = env_var.replace(prefix, "") + for k in reversed(key.split("__")): + val = { + k.lower(): ( + _get_var_typed(val) + if not isinstance(val, dict) and k not in no_convert + else val + ) + } relevant_vars = deep_merge_dicts(val, relevant_vars) return relevant_vars diff --git a/freqtrade/configuration/load_config.py b/freqtrade/configuration/load_config.py index 22eeeca55..c11f6b37e 100644 --- a/freqtrade/configuration/load_config.py +++ b/freqtrade/configuration/load_config.py @@ -1,6 +1,7 @@ """ This module contain functions to load the configuration file """ + import logging import re import sys @@ -25,25 +26,25 @@ def log_config_error_range(path: str, errmsg: str) -> str: """ Parses configuration file and prints range around error """ - if path != '-': - offsetlist = re.findall(r'(?<=Parse\serror\sat\soffset\s)\d+', errmsg) + if path != "-": + offsetlist = re.findall(r"(?<=Parse\serror\sat\soffset\s)\d+", errmsg) if offsetlist: offset = int(offsetlist[0]) text = Path(path).read_text() # Fetch an offset of 80 characters around the error line - subtext = text[offset - min(80, offset):offset + 80] - segments = subtext.split('\n') + subtext = text[offset - min(80, offset) : offset + 80] + segments = subtext.split("\n") if len(segments) > 3: # Remove first and last lines, to avoid odd truncations - return '\n'.join(segments[1:-1]) + return "\n".join(segments[1:-1]) else: return subtext - return '' + return "" def load_file(path: Path) -> Dict[str, Any]: try: - with path.open('r') as file: + with path.open("r") as file: config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE) except FileNotFoundError: raise OperationalException(f'File "{path}" not found!') from None @@ -58,25 +59,27 @@ def load_config_file(path: str) -> Dict[str, Any]: """ try: # Read config from stdin if requested in the options - with Path(path).open() if path != '-' else sys.stdin as file: + with Path(path).open() if path != "-" else sys.stdin as file: config = rapidjson.load(file, parse_mode=CONFIG_PARSE_MODE) except FileNotFoundError: raise OperationalException( f'Config file "{path}" not found!' - ' Please create a config file or check whether it exists.') from None + " Please create a config file or check whether it exists." + ) from None except rapidjson.JSONDecodeError as e: err_range = log_config_error_range(path, str(e)) raise ConfigurationError( - f'{e}\n' - f'Please verify the following segment of your configuration:\n{err_range}' - if err_range else 'Please verify your configuration file for syntax errors.' + f"{e}\nPlease verify the following segment of your configuration:\n{err_range}" + if err_range + else "Please verify your configuration file for syntax errors." ) return config def load_from_files( - files: List[str], base_path: Optional[Path] = None, level: int = 0) -> Dict[str, Any]: + files: List[str], base_path: Optional[Path] = None, level: int = 0 +) -> Dict[str, Any]: """ Recursively load configuration files if specified. Sub-files are assumed to be relative to the initial config. @@ -90,8 +93,8 @@ def load_from_files( files_loaded = [] # We expect here a list of config filenames for filename in files: - logger.info(f'Using config: {filename} ...') - if filename == '-': + logger.info(f"Using config: {filename} ...") + if filename == "-": # Immediately load stdin and return return load_config_file(filename) file = Path(filename) @@ -100,10 +103,11 @@ def load_from_files( file = base_path / file config_tmp = load_config_file(str(file)) - if 'add_config_files' in config_tmp: + if "add_config_files" in config_tmp: config_sub = load_from_files( - config_tmp['add_config_files'], file.resolve().parent, level + 1) - files_loaded.extend(config_sub.get('config_files', [])) + config_tmp["add_config_files"], file.resolve().parent, level + 1 + ) + files_loaded.extend(config_sub.get("config_files", [])) config_tmp = deep_merge_dicts(config_tmp, config_sub) files_loaded.insert(0, str(file)) @@ -111,6 +115,6 @@ def load_from_files( # Merge config options, overwriting prior values config = deep_merge_dicts(config_tmp, config) - config['config_files'] = files_loaded + config["config_files"] = files_loaded return config diff --git a/freqtrade/configuration/timerange.py b/freqtrade/configuration/timerange.py index b82b13b10..6449086fa 100644 --- a/freqtrade/configuration/timerange.py +++ b/freqtrade/configuration/timerange.py @@ -1,6 +1,7 @@ """ This module contains the argument manager class """ + import logging import re from datetime import datetime, timezone @@ -22,9 +23,13 @@ class TimeRange: if *type is None, don't use corresponding startvalue. """ - def __init__(self, starttype: Optional[str] = None, stoptype: Optional[str] = None, - startts: int = 0, stopts: int = 0): - + def __init__( + self, + starttype: Optional[str] = None, + stoptype: Optional[str] = None, + startts: int = 0, + stopts: int = 0, + ): self.starttype: Optional[str] = starttype self.stoptype: Optional[str] = stoptype self.startts: int = startts @@ -48,12 +53,12 @@ class TimeRange: Returns a string representation of the timerange as used by parse_timerange. Follows the format yyyymmdd-yyyymmdd - leaving out the parts that are not set. """ - start = '' - stop = '' + start = "" + stop = "" if startdt := self.startdt: - start = startdt.strftime('%Y%m%d') + start = startdt.strftime("%Y%m%d") if stopdt := self.stopdt: - stop = stopdt.strftime('%Y%m%d') + stop = stopdt.strftime("%Y%m%d") return f"{start}-{stop}" @property @@ -61,7 +66,7 @@ class TimeRange: """ Returns a string representation of the start date """ - val = 'unbounded' + val = "unbounded" if (startdt := self.startdt) is not None: val = startdt.strftime(DATETIME_PRINT_FORMAT) return val @@ -71,15 +76,19 @@ class TimeRange: """ Returns a string representation of the stop date """ - val = 'unbounded' + val = "unbounded" if (stopdt := self.stopdt) is not None: val = stopdt.strftime(DATETIME_PRINT_FORMAT) return val def __eq__(self, other): """Override the default Equals behavior""" - return (self.starttype == other.starttype and self.stoptype == other.stoptype - and self.startts == other.startts and self.stopts == other.stopts) + return ( + self.starttype == other.starttype + and self.stoptype == other.stoptype + and self.startts == other.startts + and self.stopts == other.stopts + ) def subtract_start(self, seconds: int) -> None: """ @@ -90,8 +99,9 @@ class TimeRange: if self.startts: self.startts = self.startts - seconds - def adjust_start_if_necessary(self, timeframe_secs: int, startup_candles: int, - min_date: datetime) -> None: + def adjust_start_if_necessary( + self, timeframe_secs: int, startup_candles: int, min_date: datetime + ) -> None: """ Adjust startts by candles. Applies only if no startup-candles have been available. @@ -101,13 +111,13 @@ class TimeRange: has to be moved :return: None (Modifies the object in place) """ - if (not self.starttype or (startup_candles - and min_date.timestamp() >= self.startts)): + if not self.starttype or (startup_candles and min_date.timestamp() >= self.startts): # If no startts was defined, or backtest-data starts at the defined backtest-date - logger.warning("Moving start-date by %s candles to account for startup time.", - startup_candles) + logger.warning( + "Moving start-date by %s candles to account for startup time.", startup_candles + ) self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles) - self.starttype = 'date' + self.starttype = "date" @classmethod def parse_timerange(cls, text: Optional[str]) -> Self: @@ -118,16 +128,17 @@ class TimeRange: """ if not text: return cls(None, None, 0, 0) - syntax = [(r'^-(\d{8})$', (None, 'date')), - (r'^(\d{8})-$', ('date', None)), - (r'^(\d{8})-(\d{8})$', ('date', 'date')), - (r'^-(\d{10})$', (None, 'date')), - (r'^(\d{10})-$', ('date', None)), - (r'^(\d{10})-(\d{10})$', ('date', 'date')), - (r'^-(\d{13})$', (None, 'date')), - (r'^(\d{13})-$', ('date', None)), - (r'^(\d{13})-(\d{13})$', ('date', 'date')), - ] + syntax = [ + (r"^-(\d{8})$", (None, "date")), + (r"^(\d{8})-$", ("date", None)), + (r"^(\d{8})-(\d{8})$", ("date", "date")), + (r"^-(\d{10})$", (None, "date")), + (r"^(\d{10})-$", ("date", None)), + (r"^(\d{10})-(\d{10})$", ("date", "date")), + (r"^-(\d{13})$", (None, "date")), + (r"^(\d{13})-$", ("date", None)), + (r"^(\d{13})-(\d{13})$", ("date", "date")), + ] for rex, stype in syntax: # Apply the regular expression to text match = re.match(rex, text) @@ -138,9 +149,12 @@ class TimeRange: stop: int = 0 if stype[0]: starts = rvals[index] - if stype[0] == 'date' and len(starts) == 8: - start = int(datetime.strptime(starts, '%Y%m%d').replace( - tzinfo=timezone.utc).timestamp()) + if stype[0] == "date" and len(starts) == 8: + start = int( + datetime.strptime(starts, "%Y%m%d") + .replace(tzinfo=timezone.utc) + .timestamp() + ) elif len(starts) == 13: start = int(starts) // 1000 else: @@ -148,15 +162,19 @@ class TimeRange: index += 1 if stype[1]: stops = rvals[index] - if stype[1] == 'date' and len(stops) == 8: - stop = int(datetime.strptime(stops, '%Y%m%d').replace( - tzinfo=timezone.utc).timestamp()) + if stype[1] == "date" and len(stops) == 8: + stop = int( + datetime.strptime(stops, "%Y%m%d") + .replace(tzinfo=timezone.utc) + .timestamp() + ) elif len(stops) == 13: stop = int(stops) // 1000 else: stop = int(stops) if start > stop > 0: raise ConfigurationError( - f'Start date is after stop date for timerange "{text}"') + f'Start date is after stop date for timerange "{text}"' + ) return cls(stype[0], stype[1], start, stop) raise ConfigurationError(f'Incorrect syntax for timerange "{text}"') diff --git a/freqtrade/constants.py b/freqtrade/constants.py index 37e2d849c..f8f1ac7ee 100644 --- a/freqtrade/constants.py +++ b/freqtrade/constants.py @@ -3,110 +3,160 @@ """ bot constants """ + from typing import Any, Dict, List, Literal, Tuple from freqtrade.enums import CandleType, PriceType, RPCMessageType DOCS_LINK = "https://www.freqtrade.io/en/stable" -DEFAULT_CONFIG = 'config.json' +DEFAULT_CONFIG = "config.json" PROCESS_THROTTLE_SECS = 5 # sec HYPEROPT_EPOCH = 100 # epochs RETRY_TIMEOUT = 30 # sec -TIMEOUT_UNITS = ['minutes', 'seconds'] -EXPORT_OPTIONS = ['none', 'trades', 'signals'] -DEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite' -DEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite' -UNLIMITED_STAKE_AMOUNT = 'unlimited' +TIMEOUT_UNITS = ["minutes", "seconds"] +EXPORT_OPTIONS = ["none", "trades", "signals"] +DEFAULT_DB_PROD_URL = "sqlite:///tradesv3.sqlite" +DEFAULT_DB_DRYRUN_URL = "sqlite:///tradesv3.dryrun.sqlite" +UNLIMITED_STAKE_AMOUNT = "unlimited" DEFAULT_AMOUNT_RESERVE_PERCENT = 0.05 -REQUIRED_ORDERTIF = ['entry', 'exit'] -REQUIRED_ORDERTYPES = ['entry', 'exit', 'stoploss', 'stoploss_on_exchange'] -PRICING_SIDES = ['ask', 'bid', 'same', 'other'] -ORDERTYPE_POSSIBILITIES = ['limit', 'market'] -_ORDERTIF_POSSIBILITIES = ['GTC', 'FOK', 'IOC', 'PO'] +REQUIRED_ORDERTIF = ["entry", "exit"] +REQUIRED_ORDERTYPES = ["entry", "exit", "stoploss", "stoploss_on_exchange"] +PRICING_SIDES = ["ask", "bid", "same", "other"] +ORDERTYPE_POSSIBILITIES = ["limit", "market"] +_ORDERTIF_POSSIBILITIES = ["GTC", "FOK", "IOC", "PO"] ORDERTIF_POSSIBILITIES = _ORDERTIF_POSSIBILITIES + [t.lower() for t in _ORDERTIF_POSSIBILITIES] STOPLOSS_PRICE_TYPES = [p for p in PriceType] -HYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss', - 'SharpeHyperOptLoss', 'SharpeHyperOptLossDaily', - 'SortinoHyperOptLoss', 'SortinoHyperOptLossDaily', - 'CalmarHyperOptLoss', - 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss', - 'ProfitDrawDownHyperOptLoss'] -AVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList', - 'MarketCapPairList', 'AgeFilter', "FullTradesFilter", 'OffsetFilter', - 'PerformanceFilter', 'PrecisionFilter', 'PriceFilter', - 'RangeStabilityFilter', 'ShuffleFilter', 'SpreadFilter', - 'VolatilityFilter'] -AVAILABLE_PROTECTIONS = ['CooldownPeriod', - 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard'] -AVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5', 'feather', 'parquet'] -BACKTEST_BREAKDOWNS = ['day', 'week', 'month'] -BACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month'] -BACKTEST_CACHE_DEFAULT = 'day' +HYPEROPT_LOSS_BUILTIN = [ + "ShortTradeDurHyperOptLoss", + "OnlyProfitHyperOptLoss", + "SharpeHyperOptLoss", + "SharpeHyperOptLossDaily", + "SortinoHyperOptLoss", + "SortinoHyperOptLossDaily", + "CalmarHyperOptLoss", + "MaxDrawDownHyperOptLoss", + "MaxDrawDownRelativeHyperOptLoss", + "ProfitDrawDownHyperOptLoss", +] +AVAILABLE_PAIRLISTS = [ + "StaticPairList", + "VolumePairList", + "ProducerPairList", + "RemotePairList", + "MarketCapPairList", + "AgeFilter", + "FullTradesFilter", + "OffsetFilter", + "PerformanceFilter", + "PrecisionFilter", + "PriceFilter", + "RangeStabilityFilter", + "ShuffleFilter", + "SpreadFilter", + "VolatilityFilter", +] +AVAILABLE_PROTECTIONS = ["CooldownPeriod", "LowProfitPairs", "MaxDrawdown", "StoplossGuard"] +AVAILABLE_DATAHANDLERS = ["json", "jsongz", "hdf5", "feather", "parquet"] +BACKTEST_BREAKDOWNS = ["day", "week", "month"] +BACKTEST_CACHE_AGE = ["none", "day", "week", "month"] +BACKTEST_CACHE_DEFAULT = "day" DRY_RUN_WALLET = 1000 -DATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S' +DATETIME_PRINT_FORMAT = "%Y-%m-%d %H:%M:%S" MATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons -DEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume'] +DEFAULT_DATAFRAME_COLUMNS = ["date", "open", "high", "low", "close", "volume"] # Don't modify sequence of DEFAULT_TRADES_COLUMNS # it has wide consequences for stored trades files -DEFAULT_TRADES_COLUMNS = ['timestamp', 'id', 'type', 'side', 'price', 'amount', 'cost'] +DEFAULT_TRADES_COLUMNS = ["timestamp", "id", "type", "side", "price", "amount", "cost"] TRADES_DTYPES = { - 'timestamp': 'int64', - 'id': 'str', - 'type': 'str', - 'side': 'str', - 'price': 'float64', - 'amount': 'float64', - 'cost': 'float64', + "timestamp": "int64", + "id": "str", + "type": "str", + "side": "str", + "price": "float64", + "amount": "float64", + "cost": "float64", } -TRADING_MODES = ['spot', 'margin', 'futures'] -MARGIN_MODES = ['cross', 'isolated', ''] +TRADING_MODES = ["spot", "margin", "futures"] +MARGIN_MODES = ["cross", "isolated", ""] -LAST_BT_RESULT_FN = '.last_result.json' -FTHYPT_FILEVERSION = 'fthypt_fileversion' +LAST_BT_RESULT_FN = ".last_result.json" +FTHYPT_FILEVERSION = "fthypt_fileversion" -USERPATH_HYPEROPTS = 'hyperopts' -USERPATH_STRATEGIES = 'strategies' -USERPATH_NOTEBOOKS = 'notebooks' -USERPATH_FREQAIMODELS = 'freqaimodels' +USERPATH_HYPEROPTS = "hyperopts" +USERPATH_STRATEGIES = "strategies" +USERPATH_NOTEBOOKS = "notebooks" +USERPATH_FREQAIMODELS = "freqaimodels" -TELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent'] -WEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw'] +TELEGRAM_SETTING_OPTIONS = ["on", "off", "silent"] +WEBHOOK_FORMAT_OPTIONS = ["form", "json", "raw"] FULL_DATAFRAME_THRESHOLD = 100 CUSTOM_TAG_MAX_LENGTH = 255 -DL_DATA_TIMEFRAMES = ['1m', '5m'] +DL_DATA_TIMEFRAMES = ["1m", "5m"] -ENV_VAR_PREFIX = 'FREQTRADE__' +ENV_VAR_PREFIX = "FREQTRADE__" -CANCELED_EXCHANGE_STATES = ('cancelled', 'canceled', 'expired') -NON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ('closed',) +CANCELED_EXCHANGE_STATES = ("cancelled", "canceled", "expired") +NON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ("closed",) # Define decimals per coin for outputs # Only used for outputs. DECIMAL_PER_COIN_FALLBACK = 3 # Should be low to avoid listing all possible FIAT's DECIMALS_PER_COIN = { - 'BTC': 8, - 'ETH': 5, + "BTC": 8, + "ETH": 5, } -DUST_PER_COIN = { - 'BTC': 0.0001, - 'ETH': 0.01 -} +DUST_PER_COIN = {"BTC": 0.0001, "ETH": 0.01} # Source files with destination directories within user-directory USER_DATA_FILES = { - 'sample_strategy.py': USERPATH_STRATEGIES, - 'sample_hyperopt_loss.py': USERPATH_HYPEROPTS, - 'strategy_analysis_example.ipynb': USERPATH_NOTEBOOKS, + "sample_strategy.py": USERPATH_STRATEGIES, + "sample_hyperopt_loss.py": USERPATH_HYPEROPTS, + "strategy_analysis_example.ipynb": USERPATH_NOTEBOOKS, } SUPPORTED_FIAT = [ - "AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", - "EUR", "GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", - "KRW", "MXN", "MYR", "NOK", "NZD", "PHP", "PKR", "PLN", - "RUB", "UAH", "SEK", "SGD", "THB", "TRY", "TWD", "ZAR", - "USD", "BTC", "ETH", "XRP", "LTC", "BCH", "BNB" + "AUD", + "BRL", + "CAD", + "CHF", + "CLP", + "CNY", + "CZK", + "DKK", + "EUR", + "GBP", + "HKD", + "HUF", + "IDR", + "ILS", + "INR", + "JPY", + "KRW", + "MXN", + "MYR", + "NOK", + "NZD", + "PHP", + "PKR", + "PLN", + "RUB", + "UAH", + "SEK", + "SGD", + "THB", + "TRY", + "TWD", + "ZAR", + "USD", + "BTC", + "ETH", + "XRP", + "LTC", + "BCH", + "BNB", + "", # Allow empty field in config. ] MINIMAL_CONFIG = { @@ -117,280 +167,296 @@ MINIMAL_CONFIG = { "key": "", "secret": "", "pair_whitelist": [], - "ccxt_async_config": { - } - } + "ccxt_async_config": {}, + }, } -__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType} +__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {"type": "object"} for x in RPCMessageType} # Required json-schema for user specified config CONF_SCHEMA = { - 'type': 'object', - 'properties': { - 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1}, - 'new_pairs_days': {'type': 'integer', 'default': 30}, - 'timeframe': {'type': 'string'}, - 'stake_currency': {'type': 'string'}, - 'stake_amount': { - 'type': ['number', 'string'], - 'minimum': 0.0001, - 'pattern': UNLIMITED_STAKE_AMOUNT + "type": "object", + "properties": { + "max_open_trades": {"type": ["integer", "number"], "minimum": -1}, + "new_pairs_days": {"type": "integer", "default": 30}, + "timeframe": {"type": "string"}, + "stake_currency": {"type": "string"}, + "stake_amount": { + "type": ["number", "string"], + "minimum": 0.0001, + "pattern": UNLIMITED_STAKE_AMOUNT, }, - 'tradable_balance_ratio': { - 'type': 'number', - 'minimum': 0.0, - 'maximum': 1, - 'default': 0.99 + "tradable_balance_ratio": {"type": "number", "minimum": 0.0, "maximum": 1, "default": 0.99}, + "available_capital": { + "type": "number", + "minimum": 0, }, - 'available_capital': { - 'type': 'number', - 'minimum': 0, + "amend_last_stake_amount": {"type": "boolean", "default": False}, + "last_stake_amount_min_ratio": { + "type": "number", + "minimum": 0.0, + "maximum": 1.0, + "default": 0.5, }, - 'amend_last_stake_amount': {'type': 'boolean', 'default': False}, - 'last_stake_amount_min_ratio': { - 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5 + "fiat_display_currency": {"type": "string", "enum": SUPPORTED_FIAT}, + "dry_run": {"type": "boolean"}, + "dry_run_wallet": {"type": "number", "default": DRY_RUN_WALLET}, + "cancel_open_orders_on_exit": {"type": "boolean", "default": False}, + "process_only_new_candles": {"type": "boolean"}, + "minimal_roi": { + "type": "object", + "patternProperties": {"^[0-9.]+$": {"type": "number"}}, }, - 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT}, - 'dry_run': {'type': 'boolean'}, - 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET}, - 'cancel_open_orders_on_exit': {'type': 'boolean', 'default': False}, - 'process_only_new_candles': {'type': 'boolean'}, - 'minimal_roi': { - 'type': 'object', - 'patternProperties': { - '^[0-9.]+$': {'type': 'number'} + "amount_reserve_percent": {"type": "number", "minimum": 0.0, "maximum": 0.5}, + "stoploss": {"type": "number", "maximum": 0, "exclusiveMaximum": True}, + "trailing_stop": {"type": "boolean"}, + "trailing_stop_positive": {"type": "number", "minimum": 0, "maximum": 1}, + "trailing_stop_positive_offset": {"type": "number", "minimum": 0, "maximum": 1}, + "trailing_only_offset_is_reached": {"type": "boolean"}, + "use_exit_signal": {"type": "boolean"}, + "exit_profit_only": {"type": "boolean"}, + "exit_profit_offset": {"type": "number"}, + "fee": {"type": "number", "minimum": 0, "maximum": 0.1}, + "ignore_roi_if_entry_signal": {"type": "boolean"}, + "ignore_buying_expired_candle_after": {"type": "number"}, + "trading_mode": {"type": "string", "enum": TRADING_MODES}, + "margin_mode": {"type": "string", "enum": MARGIN_MODES}, + "reduce_df_footprint": {"type": "boolean", "default": False}, + "minimum_trade_amount": {"type": "number", "default": 10}, + "targeted_trade_amount": {"type": "number", "default": 20}, + "lookahead_analysis_exportfilename": {"type": "string"}, + "startup_candle": { + "type": "array", + "uniqueItems": True, + "default": [199, 399, 499, 999, 1999], + }, + "liquidation_buffer": {"type": "number", "minimum": 0.0, "maximum": 0.99}, + "backtest_breakdown": { + "type": "array", + "items": {"type": "string", "enum": BACKTEST_BREAKDOWNS}, + }, + "bot_name": {"type": "string"}, + "unfilledtimeout": { + "type": "object", + "properties": { + "entry": {"type": "number", "minimum": 1}, + "exit": {"type": "number", "minimum": 1}, + "exit_timeout_count": {"type": "number", "minimum": 0, "default": 0}, + "unit": {"type": "string", "enum": TIMEOUT_UNITS, "default": "minutes"}, }, }, - 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5}, - 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True}, - 'trailing_stop': {'type': 'boolean'}, - 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1}, - 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1}, - 'trailing_only_offset_is_reached': {'type': 'boolean'}, - 'use_exit_signal': {'type': 'boolean'}, - 'exit_profit_only': {'type': 'boolean'}, - 'exit_profit_offset': {'type': 'number'}, - 'ignore_roi_if_entry_signal': {'type': 'boolean'}, - 'ignore_buying_expired_candle_after': {'type': 'number'}, - 'trading_mode': {'type': 'string', 'enum': TRADING_MODES}, - 'margin_mode': {'type': 'string', 'enum': MARGIN_MODES}, - 'reduce_df_footprint': {'type': 'boolean', 'default': False}, - 'minimum_trade_amount': {'type': 'number', 'default': 10}, - 'targeted_trade_amount': {'type': 'number', 'default': 20}, - 'lookahead_analysis_exportfilename': {'type': 'string'}, - 'startup_candle': { - 'type': 'array', - 'uniqueItems': True, - 'default': [199, 399, 499, 999, 1999], - }, - 'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99}, - 'backtest_breakdown': { - 'type': 'array', - 'items': {'type': 'string', 'enum': BACKTEST_BREAKDOWNS} - }, - 'bot_name': {'type': 'string'}, - 'unfilledtimeout': { - 'type': 'object', - 'properties': { - 'entry': {'type': 'number', 'minimum': 1}, - 'exit': {'type': 'number', 'minimum': 1}, - 'exit_timeout_count': {'type': 'number', 'minimum': 0, 'default': 0}, - 'unit': {'type': 'string', 'enum': TIMEOUT_UNITS, 'default': 'minutes'} - } - }, - 'entry_pricing': { - 'type': 'object', - 'properties': { - 'price_last_balance': { - 'type': 'number', - 'minimum': 0, - 'maximum': 1, - 'exclusiveMaximum': False, + "entry_pricing": { + "type": "object", + "properties": { + "price_last_balance": { + "type": "number", + "minimum": 0, + "maximum": 1, + "exclusiveMaximum": False, }, - 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'}, - 'use_order_book': {'type': 'boolean'}, - 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, }, - 'check_depth_of_market': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'bids_to_ask_delta': {'type': 'number', 'minimum': 0}, - } + "price_side": {"type": "string", "enum": PRICING_SIDES, "default": "same"}, + "use_order_book": {"type": "boolean"}, + "order_book_top": { + "type": "integer", + "minimum": 1, + "maximum": 50, + }, + "check_depth_of_market": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "bids_to_ask_delta": {"type": "number", "minimum": 0}, + }, }, }, - 'required': ['price_side'] + "required": ["price_side"], }, - 'exit_pricing': { - 'type': 'object', - 'properties': { - 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'}, - 'price_last_balance': { - 'type': 'number', - 'minimum': 0, - 'maximum': 1, - 'exclusiveMaximum': False, + "exit_pricing": { + "type": "object", + "properties": { + "price_side": {"type": "string", "enum": PRICING_SIDES, "default": "same"}, + "price_last_balance": { + "type": "number", + "minimum": 0, + "maximum": 1, + "exclusiveMaximum": False, }, - 'use_order_book': {'type': 'boolean'}, - 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, }, - }, - 'required': ['price_side'] - }, - 'custom_price_max_distance_ratio': { - 'type': 'number', 'minimum': 0.0 - }, - 'order_types': { - 'type': 'object', - 'properties': { - 'entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'force_exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'force_entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'emergency_exit': { - 'type': 'string', - 'enum': ORDERTYPE_POSSIBILITIES, - 'default': 'market'}, - 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES}, - 'stoploss_on_exchange': {'type': 'boolean'}, - 'stoploss_price_type': {'type': 'string', 'enum': STOPLOSS_PRICE_TYPES}, - 'stoploss_on_exchange_interval': {'type': 'number'}, - 'stoploss_on_exchange_limit_ratio': {'type': 'number', 'minimum': 0.0, - 'maximum': 1.0} - }, - 'required': ['entry', 'exit', 'stoploss', 'stoploss_on_exchange'] - }, - 'order_time_in_force': { - 'type': 'object', - 'properties': { - 'entry': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}, - 'exit': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES} - }, - 'required': REQUIRED_ORDERTIF - }, - 'exchange': {'$ref': '#/definitions/exchange'}, - 'edge': {'$ref': '#/definitions/edge'}, - 'freqai': {'$ref': '#/definitions/freqai'}, - 'external_message_consumer': {'$ref': '#/definitions/external_message_consumer'}, - 'experimental': { - 'type': 'object', - 'properties': { - 'block_bad_exchanges': {'type': 'boolean'} - } - }, - 'pairlists': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS}, + "use_order_book": {"type": "boolean"}, + "order_book_top": { + "type": "integer", + "minimum": 1, + "maximum": 50, }, - 'required': ['method'], - } - }, - 'protections': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'method': {'type': 'string', 'enum': AVAILABLE_PROTECTIONS}, - 'stop_duration': {'type': 'number', 'minimum': 0.0}, - 'stop_duration_candles': {'type': 'number', 'minimum': 0}, - 'trade_limit': {'type': 'number', 'minimum': 1}, - 'lookback_period': {'type': 'number', 'minimum': 1}, - 'lookback_period_candles': {'type': 'number', 'minimum': 1}, - }, - 'required': ['method'], - } - }, - 'telegram': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'token': {'type': 'string'}, - 'chat_id': {'type': 'string'}, - 'allow_custom_messages': {'type': 'boolean', 'default': True}, - 'balance_dust_level': {'type': 'number', 'minimum': 0.0}, - 'notification_settings': { - 'type': 'object', - 'default': {}, - 'properties': { - 'status': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS}, - 'warning': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS}, - 'startup': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS}, - 'entry': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS}, - 'entry_fill': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS, - 'default': 'off' - }, - 'entry_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS, }, - 'exit': { - 'type': ['string', 'object'], - 'additionalProperties': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS - } - }, - 'exit_fill': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS, - 'default': 'on' - }, - 'exit_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS}, - 'protection_trigger': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS, - 'default': 'on' - }, - 'protection_trigger_global': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS, - 'default': 'on' - }, - 'show_candle': { - 'type': 'string', - 'enum': ['off', 'ohlc'], - 'default': 'off' - }, - 'strategy_msg': { - 'type': 'string', - 'enum': TELEGRAM_SETTING_OPTIONS, - 'default': 'on' - }, - } - }, - 'reload': {'type': 'boolean'}, }, - 'required': ['enabled', 'token', 'chat_id'], + "required": ["price_side"], }, - 'webhook': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'url': {'type': 'string'}, - 'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'}, - 'retries': {'type': 'integer', 'minimum': 0}, - 'retry_delay': {'type': 'number', 'minimum': 0}, + "custom_price_max_distance_ratio": {"type": "number", "minimum": 0.0}, + "order_types": { + "type": "object", + "properties": { + "entry": {"type": "string", "enum": ORDERTYPE_POSSIBILITIES}, + "exit": {"type": "string", "enum": ORDERTYPE_POSSIBILITIES}, + "force_exit": {"type": "string", "enum": ORDERTYPE_POSSIBILITIES}, + "force_entry": {"type": "string", "enum": ORDERTYPE_POSSIBILITIES}, + "emergency_exit": { + "type": "string", + "enum": ORDERTYPE_POSSIBILITIES, + "default": "market", + }, + "stoploss": {"type": "string", "enum": ORDERTYPE_POSSIBILITIES}, + "stoploss_on_exchange": {"type": "boolean"}, + "stoploss_price_type": {"type": "string", "enum": STOPLOSS_PRICE_TYPES}, + "stoploss_on_exchange_interval": {"type": "number"}, + "stoploss_on_exchange_limit_ratio": { + "type": "number", + "minimum": 0.0, + "maximum": 1.0, + }, + }, + "required": ["entry", "exit", "stoploss", "stoploss_on_exchange"], + }, + "order_time_in_force": { + "type": "object", + "properties": { + "entry": {"type": "string", "enum": ORDERTIF_POSSIBILITIES}, + "exit": {"type": "string", "enum": ORDERTIF_POSSIBILITIES}, + }, + "required": REQUIRED_ORDERTIF, + }, + "coingecko": { + "type": "object", + "properties": { + "is_demo": {"type": "boolean", "default": True}, + "api_key": {"type": "string"}, + }, + "required": ["is_demo", "api_key"], + }, + "exchange": {"$ref": "#/definitions/exchange"}, + "edge": {"$ref": "#/definitions/edge"}, + "freqai": {"$ref": "#/definitions/freqai"}, + "external_message_consumer": {"$ref": "#/definitions/external_message_consumer"}, + "experimental": { + "type": "object", + "properties": {"block_bad_exchanges": {"type": "boolean"}}, + }, + "pairlists": { + "type": "array", + "items": { + "type": "object", + "properties": { + "method": {"type": "string", "enum": AVAILABLE_PAIRLISTS}, + }, + "required": ["method"], + }, + }, + "protections": { + "type": "array", + "items": { + "type": "object", + "properties": { + "method": {"type": "string", "enum": AVAILABLE_PROTECTIONS}, + "stop_duration": {"type": "number", "minimum": 0.0}, + "stop_duration_candles": {"type": "number", "minimum": 0}, + "trade_limit": {"type": "number", "minimum": 1}, + "lookback_period": {"type": "number", "minimum": 1}, + "lookback_period_candles": {"type": "number", "minimum": 1}, + }, + "required": ["method"], + }, + }, + "telegram": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "token": {"type": "string"}, + "chat_id": {"type": "string"}, + "allow_custom_messages": {"type": "boolean", "default": True}, + "balance_dust_level": {"type": "number", "minimum": 0.0}, + "notification_settings": { + "type": "object", + "default": {}, + "properties": { + "status": {"type": "string", "enum": TELEGRAM_SETTING_OPTIONS}, + "warning": {"type": "string", "enum": TELEGRAM_SETTING_OPTIONS}, + "startup": {"type": "string", "enum": TELEGRAM_SETTING_OPTIONS}, + "entry": {"type": "string", "enum": TELEGRAM_SETTING_OPTIONS}, + "entry_fill": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + "default": "off", + }, + "entry_cancel": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + }, + "exit": { + "type": ["string", "object"], + "additionalProperties": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + }, + }, + "exit_fill": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + "default": "on", + }, + "exit_cancel": {"type": "string", "enum": TELEGRAM_SETTING_OPTIONS}, + "protection_trigger": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + "default": "on", + }, + "protection_trigger_global": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + "default": "on", + }, + "show_candle": { + "type": "string", + "enum": ["off", "ohlc"], + "default": "off", + }, + "strategy_msg": { + "type": "string", + "enum": TELEGRAM_SETTING_OPTIONS, + "default": "on", + }, + }, + }, + "reload": {"type": "boolean"}, + }, + "required": ["enabled", "token", "chat_id"], + }, + "webhook": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "url": {"type": "string"}, + "format": {"type": "string", "enum": WEBHOOK_FORMAT_OPTIONS, "default": "form"}, + "retries": {"type": "integer", "minimum": 0}, + "retry_delay": {"type": "number", "minimum": 0}, **__MESSAGE_TYPE_DICT, # **{x: {'type': 'object'} for x in RPCMessageType}, # Below -> Deprecated - 'webhookentry': {'type': 'object'}, - 'webhookentrycancel': {'type': 'object'}, - 'webhookentryfill': {'type': 'object'}, - 'webhookexit': {'type': 'object'}, - 'webhookexitcancel': {'type': 'object'}, - 'webhookexitfill': {'type': 'object'}, - 'webhookstatus': {'type': 'object'}, + "webhookentry": {"type": "object"}, + "webhookentrycancel": {"type": "object"}, + "webhookentryfill": {"type": "object"}, + "webhookexit": {"type": "object"}, + "webhookexitcancel": {"type": "object"}, + "webhookexitfill": {"type": "object"}, + "webhookstatus": {"type": "object"}, }, }, - 'discord': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'webhook_url': {'type': 'string'}, + "discord": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "webhook_url": {"type": "string"}, "exit_fill": { - 'type': 'array', 'items': {'type': 'object'}, - 'default': [ + "type": "array", + "items": {"type": "object"}, + "default": [ {"Trade ID": "{trade_id}"}, {"Exchange": "{exchange}"}, {"Pair": "{pair}"}, @@ -406,11 +472,12 @@ CONF_SCHEMA = { {"Exit Reason": "{exit_reason}"}, {"Strategy": "{strategy}"}, {"Timeframe": "{timeframe}"}, - ] + ], }, "entry_fill": { - 'type': 'array', 'items': {'type': 'object'}, - 'default': [ + "type": "array", + "items": {"type": "object"}, + "default": [ {"Trade ID": "{trade_id}"}, {"Exchange": "{exchange}"}, {"Pair": "{pair}"}, @@ -420,147 +487,143 @@ CONF_SCHEMA = { {"Open date": "{open_date:%Y-%m-%d %H:%M:%S}"}, {"Enter tag": "{enter_tag}"}, {"Strategy": "{strategy} {timeframe}"}, - ] + ], }, - } - }, - 'api_server': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'listen_ip_address': {'format': 'ipv4'}, - 'listen_port': { - 'type': 'integer', - 'minimum': 1024, - 'maximum': 65535 - }, - 'username': {'type': 'string'}, - 'password': {'type': 'string'}, - 'ws_token': {'type': ['string', 'array'], 'items': {'type': 'string'}}, - 'jwt_secret_key': {'type': 'string'}, - 'CORS_origins': {'type': 'array', 'items': {'type': 'string'}}, - 'verbosity': {'type': 'string', 'enum': ['error', 'info']}, }, - 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password'] }, - 'db_url': {'type': 'string'}, - 'export': {'type': 'string', 'enum': EXPORT_OPTIONS, 'default': 'trades'}, - 'disableparamexport': {'type': 'boolean'}, - 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']}, - 'force_entry_enable': {'type': 'boolean'}, - 'disable_dataframe_checks': {'type': 'boolean'}, - 'internals': { - 'type': 'object', - 'default': {}, - 'properties': { - 'process_throttle_secs': {'type': 'integer'}, - 'interval': {'type': 'integer'}, - 'sd_notify': {'type': 'boolean'}, - } + "api_server": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "listen_ip_address": {"format": "ipv4"}, + "listen_port": {"type": "integer", "minimum": 1024, "maximum": 65535}, + "username": {"type": "string"}, + "password": {"type": "string"}, + "ws_token": {"type": ["string", "array"], "items": {"type": "string"}}, + "jwt_secret_key": {"type": "string"}, + "CORS_origins": {"type": "array", "items": {"type": "string"}}, + "verbosity": {"type": "string", "enum": ["error", "info"]}, + }, + "required": ["enabled", "listen_ip_address", "listen_port", "username", "password"], }, - 'dataformat_ohlcv': { - 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'feather' + "db_url": {"type": "string"}, + "export": {"type": "string", "enum": EXPORT_OPTIONS, "default": "trades"}, + "disableparamexport": {"type": "boolean"}, + "initial_state": {"type": "string", "enum": ["running", "stopped"]}, + "force_entry_enable": {"type": "boolean"}, + "disable_dataframe_checks": {"type": "boolean"}, + "internals": { + "type": "object", + "default": {}, + "properties": { + "process_throttle_secs": {"type": "integer"}, + "interval": {"type": "integer"}, + "sd_notify": {"type": "boolean"}, + }, }, - 'dataformat_trades': { - 'type': 'string', - 'enum': AVAILABLE_DATAHANDLERS, - 'default': 'feather' + "dataformat_ohlcv": { + "type": "string", + "enum": AVAILABLE_DATAHANDLERS, + "default": "feather", }, - 'position_adjustment_enable': {'type': 'boolean'}, - 'max_entry_position_adjustment': {'type': ['integer', 'number'], 'minimum': -1}, + "dataformat_trades": { + "type": "string", + "enum": AVAILABLE_DATAHANDLERS, + "default": "feather", + }, + "position_adjustment_enable": {"type": "boolean"}, + "max_entry_position_adjustment": {"type": ["integer", "number"], "minimum": -1}, }, - 'definitions': { - 'exchange': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'key': {'type': 'string', 'default': ''}, - 'secret': {'type': 'string', 'default': ''}, - 'password': {'type': 'string', 'default': ''}, - 'uid': {'type': 'string'}, - 'pair_whitelist': { - 'type': 'array', - 'items': { - 'type': 'string', + "definitions": { + "exchange": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "key": {"type": "string", "default": ""}, + "secret": {"type": "string", "default": ""}, + "password": {"type": "string", "default": ""}, + "uid": {"type": "string"}, + "pair_whitelist": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True + "uniqueItems": True, }, - 'pair_blacklist': { - 'type': 'array', - 'items': { - 'type': 'string', + "pair_blacklist": { + "type": "array", + "items": { + "type": "string", }, - 'uniqueItems': True + "uniqueItems": True, }, - 'unknown_fee_rate': {'type': 'number'}, - 'outdated_offset': {'type': 'integer', 'minimum': 1}, - 'markets_refresh_interval': {'type': 'integer'}, - 'ccxt_config': {'type': 'object'}, - 'ccxt_async_config': {'type': 'object'} + "unknown_fee_rate": {"type": "number"}, + "outdated_offset": {"type": "integer", "minimum": 1}, + "markets_refresh_interval": {"type": "integer"}, + "ccxt_config": {"type": "object"}, + "ccxt_async_config": {"type": "object"}, }, - 'required': ['name'] + "required": ["name"], }, - 'edge': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean'}, - 'process_throttle_secs': {'type': 'integer', 'minimum': 600}, - 'calculate_since_number_of_days': {'type': 'integer'}, - 'allowed_risk': {'type': 'number'}, - 'stoploss_range_min': {'type': 'number'}, - 'stoploss_range_max': {'type': 'number'}, - 'stoploss_range_step': {'type': 'number'}, - 'minimum_winrate': {'type': 'number'}, - 'minimum_expectancy': {'type': 'number'}, - 'min_trade_number': {'type': 'number'}, - 'max_trade_duration_minute': {'type': 'integer'}, - 'remove_pumps': {'type': 'boolean'} + "edge": { + "type": "object", + "properties": { + "enabled": {"type": "boolean"}, + "process_throttle_secs": {"type": "integer", "minimum": 600}, + "calculate_since_number_of_days": {"type": "integer"}, + "allowed_risk": {"type": "number"}, + "stoploss_range_min": {"type": "number"}, + "stoploss_range_max": {"type": "number"}, + "stoploss_range_step": {"type": "number"}, + "minimum_winrate": {"type": "number"}, + "minimum_expectancy": {"type": "number"}, + "min_trade_number": {"type": "number"}, + "max_trade_duration_minute": {"type": "integer"}, + "remove_pumps": {"type": "boolean"}, }, - 'required': ['process_throttle_secs', 'allowed_risk'] + "required": ["process_throttle_secs", "allowed_risk"], }, - 'external_message_consumer': { - 'type': 'object', - 'properties': { - 'enabled': {'type': 'boolean', 'default': False}, - 'producers': { - 'type': 'array', - 'items': { - 'type': 'object', - 'properties': { - 'name': {'type': 'string'}, - 'host': {'type': 'string'}, - 'port': { - 'type': 'integer', - 'default': 8080, - 'minimum': 0, - 'maximum': 65535 + "external_message_consumer": { + "type": "object", + "properties": { + "enabled": {"type": "boolean", "default": False}, + "producers": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "host": {"type": "string"}, + "port": { + "type": "integer", + "default": 8080, + "minimum": 0, + "maximum": 65535, }, - 'secure': {'type': 'boolean', 'default': False}, - 'ws_token': {'type': 'string'}, + "secure": {"type": "boolean", "default": False}, + "ws_token": {"type": "string"}, }, - 'required': ['name', 'host', 'ws_token'] - } + "required": ["name", "host", "ws_token"], + }, }, - 'wait_timeout': {'type': 'integer', 'minimum': 0}, - 'sleep_time': {'type': 'integer', 'minimum': 0}, - 'ping_timeout': {'type': 'integer', 'minimum': 0}, - 'remove_entry_exit_signals': {'type': 'boolean', 'default': False}, - 'initial_candle_limit': { - 'type': 'integer', - 'minimum': 0, - 'maximum': 1500, - 'default': 1500 + "wait_timeout": {"type": "integer", "minimum": 0}, + "sleep_time": {"type": "integer", "minimum": 0}, + "ping_timeout": {"type": "integer", "minimum": 0}, + "remove_entry_exit_signals": {"type": "boolean", "default": False}, + "initial_candle_limit": { + "type": "integer", + "minimum": 0, + "maximum": 1500, + "default": 1500, + }, + "message_size_limit": { # In megabytes + "type": "integer", + "minimum": 1, + "maximum": 20, + "default": 8, }, - 'message_size_limit': { # In megabytes - 'type': 'integer', - 'minimum': 1, - 'maxmium': 20, - 'default': 8, - } }, - 'required': ['producers'] + "required": ["producers"], }, "freqai": { "type": "object", @@ -585,28 +648,30 @@ CONF_SCHEMA = { "principal_component_analysis": {"type": "boolean", "default": False}, "use_SVM_to_remove_outliers": {"type": "boolean", "default": False}, "plot_feature_importances": {"type": "integer", "default": 0}, - "svm_params": {"type": "object", - "properties": { - "shuffle": {"type": "boolean", "default": False}, - "nu": {"type": "number", "default": 0.1} - }, - }, + "svm_params": { + "type": "object", + "properties": { + "shuffle": {"type": "boolean", "default": False}, + "nu": {"type": "number", "default": 0.1}, + }, + }, "shuffle_after_split": {"type": "boolean", "default": False}, - "buffer_train_data_candles": {"type": "integer", "default": 0} + "buffer_train_data_candles": {"type": "integer", "default": 0}, }, - "required": ["include_timeframes", "include_corr_pairlist", ] + "required": [ + "include_timeframes", + "include_corr_pairlist", + ], }, "data_split_parameters": { "type": "object", "properties": { "test_size": {"type": "number"}, "random_state": {"type": "integer"}, - "shuffle": {"type": "boolean", "default": False} + "shuffle": {"type": "boolean", "default": False}, }, }, - "model_training_parameters": { - "type": "object" - }, + "model_training_parameters": {"type": "object"}, "rl_config": { "type": "object", "properties": { @@ -625,9 +690,9 @@ CONF_SCHEMA = { "type": "object", "properties": { "rr": {"type": "number", "default": 1}, - "profit_aim": {"type": "number", "default": 0.025} - } - } + "profit_aim": {"type": "number", "default": 0.025}, + }, + }, }, }, }, @@ -637,53 +702,53 @@ CONF_SCHEMA = { "backtest_period_days", "identifier", "feature_parameters", - "data_split_parameters" - ] + "data_split_parameters", + ], }, }, } SCHEMA_TRADE_REQUIRED = [ - 'exchange', - 'timeframe', - 'max_open_trades', - 'stake_currency', - 'stake_amount', - 'tradable_balance_ratio', - 'last_stake_amount_min_ratio', - 'dry_run', - 'dry_run_wallet', - 'exit_pricing', - 'entry_pricing', - 'stoploss', - 'minimal_roi', - 'internals', - 'dataformat_ohlcv', - 'dataformat_trades', + "exchange", + "timeframe", + "max_open_trades", + "stake_currency", + "stake_amount", + "tradable_balance_ratio", + "last_stake_amount_min_ratio", + "dry_run", + "dry_run_wallet", + "exit_pricing", + "entry_pricing", + "stoploss", + "minimal_roi", + "internals", + "dataformat_ohlcv", + "dataformat_trades", ] SCHEMA_BACKTEST_REQUIRED = [ - 'exchange', - 'stake_currency', - 'stake_amount', - 'dry_run_wallet', - 'dataformat_ohlcv', - 'dataformat_trades', + "exchange", + "stake_currency", + "stake_amount", + "dry_run_wallet", + "dataformat_ohlcv", + "dataformat_trades", ] SCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [ - 'stoploss', - 'minimal_roi', - 'max_open_trades' + "stoploss", + "minimal_roi", + "max_open_trades", ] SCHEMA_MINIMAL_REQUIRED = [ - 'exchange', - 'dry_run', - 'dataformat_ohlcv', - 'dataformat_trades', + "exchange", + "dry_run", + "dataformat_ohlcv", + "dataformat_trades", ] SCHEMA_MINIMAL_WEBSERVER = SCHEMA_MINIMAL_REQUIRED + [ - 'api_server', + "api_server", ] CANCEL_REASON = { @@ -696,7 +761,7 @@ CANCEL_REASON = { "FORCE_EXIT": "forcesold", "REPLACE": "cancelled to be replaced by new limit order", "REPLACE_FAILED": "failed to replace order, deleting Trade", - "USER_CANCEL": "user requested order cancel" + "USER_CANCEL": "user requested order cancel", } # List of pairs with their timeframes @@ -706,12 +771,12 @@ ListPairsWithTimeframes = List[PairWithTimeframe] # Type for trades list TradeList = List[List] -LongShort = Literal['long', 'short'] -EntryExit = Literal['entry', 'exit'] -BuySell = Literal['buy', 'sell'] -MakerTaker = Literal['maker', 'taker'] -BidAsk = Literal['bid', 'ask'] -OBLiteral = Literal['asks', 'bids'] +LongShort = Literal["long", "short"] +EntryExit = Literal["entry", "exit"] +BuySell = Literal["buy", "sell"] +MakerTaker = Literal["maker", "taker"] +BidAsk = Literal["bid", "ask"] +OBLiteral = Literal["asks", "bids"] Config = Dict[str, Any] # Exchange part of the configuration. @@ -719,4 +784,4 @@ ExchangeConfig = Dict[str, Any] IntOrInf = float -EntryExecuteMode = Literal['initial', 'pos_adjust', 'replace'] +EntryExecuteMode = Literal["initial", "pos_adjust", "replace"] diff --git a/freqtrade/data/__init__.py b/freqtrade/data/__init__.py index 0e7eea0d0..f716abfc5 100644 --- a/freqtrade/data/__init__.py +++ b/freqtrade/data/__init__.py @@ -3,6 +3,4 @@ Module to handle data operations for freqtrade """ # limit what's imported when using `from freqtrade.data import *` -__all__ = [ - 'converter' -] +__all__ = ["converter"] diff --git a/freqtrade/data/btanalysis.py b/freqtrade/data/btanalysis.py index 07417b27f..eef415879 100644 --- a/freqtrade/data/btanalysis.py +++ b/freqtrade/data/btanalysis.py @@ -1,6 +1,7 @@ """ Helpers when analyzing backtest data """ + import logging from copy import copy from datetime import datetime, timezone @@ -21,14 +22,35 @@ from freqtrade.types import BacktestHistoryEntryType, BacktestResultType logger = logging.getLogger(__name__) # Newest format -BT_DATA_COLUMNS = ['pair', 'stake_amount', 'max_stake_amount', 'amount', - 'open_date', 'close_date', 'open_rate', 'close_rate', - 'fee_open', 'fee_close', 'trade_duration', - 'profit_ratio', 'profit_abs', 'exit_reason', - 'initial_stop_loss_abs', 'initial_stop_loss_ratio', 'stop_loss_abs', - 'stop_loss_ratio', 'min_rate', 'max_rate', 'is_open', 'enter_tag', - 'leverage', 'is_short', 'open_timestamp', 'close_timestamp', 'orders' - ] +BT_DATA_COLUMNS = [ + "pair", + "stake_amount", + "max_stake_amount", + "amount", + "open_date", + "close_date", + "open_rate", + "close_rate", + "fee_open", + "fee_close", + "trade_duration", + "profit_ratio", + "profit_abs", + "exit_reason", + "initial_stop_loss_abs", + "initial_stop_loss_ratio", + "stop_loss_abs", + "stop_loss_ratio", + "min_rate", + "max_rate", + "is_open", + "enter_tag", + "leverage", + "is_short", + "open_timestamp", + "close_timestamp", + "orders", +] def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> str: @@ -50,15 +72,16 @@ def get_latest_optimize_filename(directory: Union[Path, str], variant: str) -> s if not filename.is_file(): raise ValueError( - f"Directory '{directory}' does not seem to contain backtest statistics yet.") + f"Directory '{directory}' does not seem to contain backtest statistics yet." + ) with filename.open() as file: data = json_load(file) - if f'latest_{variant}' not in data: + if f"latest_{variant}" not in data: raise ValueError(f"Invalid '{LAST_BT_RESULT_FN}' format.") - return data[f'latest_{variant}'] + return data[f"latest_{variant}"] def get_latest_backtest_filename(directory: Union[Path, str]) -> str: @@ -71,7 +94,7 @@ def get_latest_backtest_filename(directory: Union[Path, str]) -> str: * `directory/.last_result.json` does not exist * `directory/.last_result.json` has the wrong content """ - return get_latest_optimize_filename(directory, 'backtest') + return get_latest_optimize_filename(directory, "backtest") def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str: @@ -85,14 +108,15 @@ def get_latest_hyperopt_filename(directory: Union[Path, str]) -> str: * `directory/.last_result.json` has the wrong content """ try: - return get_latest_optimize_filename(directory, 'hyperopt') + return get_latest_optimize_filename(directory, "hyperopt") except ValueError: # Return default (legacy) pickle filename - return 'hyperopt_results.pickle' + return "hyperopt_results.pickle" def get_latest_hyperopt_file( - directory: Union[Path, str], predef_filename: Optional[str] = None) -> Path: + directory: Union[Path, str], predef_filename: Optional[str] = None +) -> Path: """ Get latest hyperopt export based on '.last_result.json'. :param directory: Directory to search for last result @@ -107,7 +131,8 @@ def get_latest_hyperopt_file( if predef_filename: if Path(predef_filename).is_absolute(): raise ConfigurationError( - "--hyperopt-filename expects only the filename, not an absolute path.") + "--hyperopt-filename expects only the filename, not an absolute path." + ) return directory / predef_filename return directory / get_latest_hyperopt_filename(directory) @@ -126,7 +151,7 @@ def load_backtest_metadata(filename: Union[Path, str]) -> Dict[str, Any]: except FileNotFoundError: return {} except Exception as e: - raise OperationalException('Unexpected error while loading backtest metadata.') from e + raise OperationalException("Unexpected error while loading backtest metadata.") from e def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType: @@ -147,7 +172,7 @@ def load_backtest_stats(filename: Union[Path, str]) -> BacktestResultType: # Legacy list format does not contain metadata. if isinstance(data, dict): - data['metadata'] = load_backtest_metadata(filename) + data["metadata"] = load_backtest_metadata(filename) return data @@ -159,38 +184,39 @@ def load_and_merge_backtest_result(strategy_name: str, filename: Path, results: :param results: dict to merge the result to. """ bt_data = load_backtest_stats(filename) - k: Literal['metadata', 'strategy'] - for k in ('metadata', 'strategy'): # type: ignore + k: Literal["metadata", "strategy"] + for k in ("metadata", "strategy"): # type: ignore results[k][strategy_name] = bt_data[k][strategy_name] - results['metadata'][strategy_name]['filename'] = filename.stem - comparison = bt_data['strategy_comparison'] + results["metadata"][strategy_name]["filename"] = filename.stem + comparison = bt_data["strategy_comparison"] for i in range(len(comparison)): - if comparison[i]['key'] == strategy_name: - results['strategy_comparison'].append(comparison[i]) + if comparison[i]["key"] == strategy_name: + results["strategy_comparison"].append(comparison[i]) break def _get_backtest_files(dirname: Path) -> List[Path]: # Weird glob expression here avoids including .meta.json files. - return list(reversed(sorted(dirname.glob('backtest-result-*-[0-9][0-9].json')))) + return list(reversed(sorted(dirname.glob("backtest-result-*-[0-9][0-9].json")))) def _extract_backtest_result(filename: Path) -> List[BacktestHistoryEntryType]: metadata = load_backtest_metadata(filename) return [ { - 'filename': filename.stem, - 'strategy': s, - 'run_id': v['run_id'], - 'notes': v.get('notes', ''), + "filename": filename.stem, + "strategy": s, + "run_id": v["run_id"], + "notes": v.get("notes", ""), # Backtest "run" time - 'backtest_start_time': v['backtest_start_time'], + "backtest_start_time": v["backtest_start_time"], # Backtest timerange - 'backtest_start_ts': v.get('backtest_start_ts', None), - 'backtest_end_ts': v.get('backtest_end_ts', None), - 'timeframe': v.get('timeframe', None), - 'timeframe_detail': v.get('timeframe_detail', None), - } for s, v in metadata.items() + "backtest_start_ts": v.get("backtest_start_ts", None), + "backtest_end_ts": v.get("backtest_end_ts", None), + "timeframe": v.get("timeframe", None), + "timeframe_detail": v.get("timeframe_detail", None), + } + for s, v in metadata.items() ] @@ -218,7 +244,7 @@ def delete_backtest_result(file_abs: Path): """ # *.meta.json logger.info(f"Deleting backtest result file: {file_abs.name}") - file_abs_meta = file_abs.with_suffix('.meta.json') + file_abs_meta = file_abs.with_suffix(".meta.json") file_abs.unlink() file_abs_meta.unlink() @@ -244,12 +270,13 @@ def get_backtest_market_change(filename: Path, include_ts: bool = True) -> pd.Da """ df = pd.read_feather(filename) if include_ts: - df.loc[:, '__date_ts'] = df.loc[:, 'date'].astype(np.int64) // 1000 // 1000 + df.loc[:, "__date_ts"] = df.loc[:, "date"].astype(np.int64) // 1000 // 1000 return df -def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, str], - min_backtest_date: Optional[datetime] = None) -> Dict[str, Any]: +def find_existing_backtest_stats( + dirname: Union[Path, str], run_ids: Dict[str, str], min_backtest_date: Optional[datetime] = None +) -> Dict[str, Any]: """ Find existing backtest stats that match specified run IDs and load them. :param dirname: pathlib.Path object, or string pointing to the file. @@ -261,9 +288,9 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s run_ids = copy(run_ids) dirname = Path(dirname) results: Dict[str, Any] = { - 'metadata': {}, - 'strategy': {}, - 'strategy_comparison': [], + "metadata": {}, + "strategy": {}, + "strategy_comparison": [], } for filename in _get_backtest_files(dirname): @@ -280,14 +307,14 @@ def find_existing_backtest_stats(dirname: Union[Path, str], run_ids: Dict[str, s continue if min_backtest_date is not None: - backtest_date = strategy_metadata['backtest_start_time'] + backtest_date = strategy_metadata["backtest_start_time"] backtest_date = datetime.fromtimestamp(backtest_date, tz=timezone.utc) if backtest_date < min_backtest_date: # Do not use a cached result for this strategy as first result is too old. del run_ids[strategy_name] continue - if strategy_metadata['run_id'] == run_id: + if strategy_metadata["run_id"] == run_id: del run_ids[strategy_name] load_and_merge_backtest_result(strategy_name, filename, results) @@ -300,20 +327,20 @@ def _load_backtest_data_df_compatibility(df: pd.DataFrame) -> pd.DataFrame: """ Compatibility support for older backtest data. """ - df['open_date'] = pd.to_datetime(df['open_date'], utc=True) - df['close_date'] = pd.to_datetime(df['close_date'], utc=True) + df["open_date"] = pd.to_datetime(df["open_date"], utc=True) + df["close_date"] = pd.to_datetime(df["close_date"], utc=True) # Compatibility support for pre short Columns - if 'is_short' not in df.columns: - df['is_short'] = False - if 'leverage' not in df.columns: - df['leverage'] = 1.0 - if 'enter_tag' not in df.columns: - df['enter_tag'] = df['buy_tag'] - df = df.drop(['buy_tag'], axis=1) - if 'max_stake_amount' not in df.columns: - df['max_stake_amount'] = df['stake_amount'] - if 'orders' not in df.columns: - df['orders'] = None + if "is_short" not in df.columns: + df["is_short"] = False + if "leverage" not in df.columns: + df["leverage"] = 1.0 + if "enter_tag" not in df.columns: + df["enter_tag"] = df["buy_tag"] + df = df.drop(["buy_tag"], axis=1) + if "max_stake_amount" not in df.columns: + df["max_stake_amount"] = df["stake_amount"] + if "orders" not in df.columns: + df["orders"] = None return df @@ -329,23 +356,25 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non data = load_backtest_stats(filename) if not isinstance(data, list): # new, nested format - if 'strategy' not in data: + if "strategy" not in data: raise ValueError("Unknown dataformat.") if not strategy: - if len(data['strategy']) == 1: - strategy = list(data['strategy'].keys())[0] + if len(data["strategy"]) == 1: + strategy = list(data["strategy"].keys())[0] else: - raise ValueError("Detected backtest result with more than one strategy. " - "Please specify a strategy.") + raise ValueError( + "Detected backtest result with more than one strategy. " + "Please specify a strategy." + ) - if strategy not in data['strategy']: + if strategy not in data["strategy"]: raise ValueError( f"Strategy {strategy} not available in the backtest result. " f"Available strategies are '{','.join(data['strategy'].keys())}'" - ) + ) - data = data['strategy'][strategy]['trades'] + data = data["strategy"][strategy]["trades"] df = pd.DataFrame(data) if not df.empty: df = _load_backtest_data_df_compatibility(df) @@ -353,7 +382,8 @@ def load_backtest_data(filename: Union[Path, str], strategy: Optional[str] = Non else: # old format - only with lists. raise OperationalException( - "Backtest-results with only trades data are no longer supported.") + "Backtest-results with only trades data are no longer supported." + ) if not df.empty: df = df.sort_values("open_date").reset_index(drop=True) return df @@ -368,23 +398,26 @@ def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataF :return: dataframe with open-counts per time-period in timeframe """ from freqtrade.exchange import timeframe_to_resample_freq + timeframe_freq = timeframe_to_resample_freq(timeframe) - dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'], - freq=timeframe_freq)) - for row in results[['open_date', 'close_date']].iterrows()] + dates = [ + pd.Series(pd.date_range(row[1]["open_date"], row[1]["close_date"], freq=timeframe_freq)) + for row in results[["open_date", "close_date"]].iterrows() + ] deltas = [len(x) for x in dates] - dates = pd.Series(pd.concat(dates).values, name='date') + dates = pd.Series(pd.concat(dates).values, name="date") df2 = pd.DataFrame(np.repeat(results.values, deltas, axis=0), columns=results.columns) df2 = pd.concat([dates, df2], axis=1) - df2 = df2.set_index('date') - df_final = df2.resample(timeframe_freq)[['pair']].count() - df_final = df_final.rename({'pair': 'open_trades'}, axis=1) + df2 = df2.set_index("date") + df_final = df2.resample(timeframe_freq)[["pair"]].count() + df_final = df_final.rename({"pair": "open_trades"}, axis=1) return df_final -def evaluate_result_multi(results: pd.DataFrame, timeframe: str, - max_open_trades: IntOrInf) -> pd.DataFrame: +def evaluate_result_multi( + results: pd.DataFrame, timeframe: str, max_open_trades: IntOrInf +) -> pd.DataFrame: """ Find overlapping trades by expanding each trade once per period it was open and then counting overlaps @@ -394,7 +427,7 @@ def evaluate_result_multi(results: pd.DataFrame, timeframe: str, :return: dataframe with open-counts per time-period in freq """ df_final = analyze_trade_parallelism(results, timeframe) - return df_final[df_final['open_trades'] > max_open_trades] + return df_final[df_final["open_trades"] > max_open_trades] def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd.DataFrame: @@ -405,9 +438,9 @@ def trade_list_to_dataframe(trades: Union[List[Trade], List[LocalTrade]]) -> pd. """ df = pd.DataFrame.from_records([t.to_json(True) for t in trades], columns=BT_DATA_COLUMNS) if len(df) > 0: - df['close_date'] = pd.to_datetime(df['close_date'], utc=True) - df['open_date'] = pd.to_datetime(df['open_date'], utc=True) - df['close_rate'] = df['close_rate'].astype('float64') + df["close_date"] = pd.to_datetime(df["close_date"], utc=True) + df["open_date"] = pd.to_datetime(df["open_date"], utc=True) + df["close_rate"] = df["close_rate"].astype("float64") return df @@ -429,8 +462,13 @@ def load_trades_from_db(db_url: str, strategy: Optional[str] = None) -> pd.DataF return trades -def load_trades(source: str, db_url: str, exportfilename: Path, - no_trades: bool = False, strategy: Optional[str] = None) -> pd.DataFrame: +def load_trades( + source: str, + db_url: str, + exportfilename: Path, + no_trades: bool = False, + strategy: Optional[str] = None, +) -> pd.DataFrame: """ Based on configuration option 'trade_source': * loads data from DB (using `db_url`) @@ -451,8 +489,9 @@ def load_trades(source: str, db_url: str, exportfilename: Path, return load_backtest_data(exportfilename, strategy) -def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame, - date_index=False) -> pd.DataFrame: +def extract_trades_of_period( + dataframe: pd.DataFrame, trades: pd.DataFrame, date_index=False +) -> pd.DataFrame: """ Compare trades and backtested pair DataFrames to get trades performed on backtested period :return: the DataFrame of a trades of period @@ -461,8 +500,9 @@ def extract_trades_of_period(dataframe: pd.DataFrame, trades: pd.DataFrame, trades_start = dataframe.index[0] trades_stop = dataframe.index[-1] else: - trades_start = dataframe.iloc[0]['date'] - trades_stop = dataframe.iloc[-1]['date'] - trades = trades.loc[(trades['open_date'] >= trades_start) & - (trades['close_date'] <= trades_stop)] + trades_start = dataframe.iloc[0]["date"] + trades_stop = dataframe.iloc[-1]["date"] + trades = trades.loc[ + (trades["open_date"] >= trades_start) & (trades["close_date"] <= trades_stop) + ] return trades diff --git a/freqtrade/data/converter/__init__.py b/freqtrade/data/converter/__init__.py index 3918e49da..76c8a7edc 100644 --- a/freqtrade/data/converter/__init__.py +++ b/freqtrade/data/converter/__init__.py @@ -1,28 +1,38 @@ -from freqtrade.data.converter.converter import (clean_ohlcv_dataframe, convert_ohlcv_format, - ohlcv_fill_up_missing_data, ohlcv_to_dataframe, - order_book_to_dataframe, reduce_dataframe_footprint, - trim_dataframe, trim_dataframes) -from freqtrade.data.converter.trade_converter import (convert_trades_format, - convert_trades_to_ohlcv, trades_convert_types, - trades_df_remove_duplicates, - trades_dict_to_list, trades_list_to_df, - trades_to_ohlcv) +from freqtrade.data.converter.converter import ( + clean_ohlcv_dataframe, + convert_ohlcv_format, + ohlcv_fill_up_missing_data, + ohlcv_to_dataframe, + order_book_to_dataframe, + reduce_dataframe_footprint, + trim_dataframe, + trim_dataframes, +) +from freqtrade.data.converter.trade_converter import ( + convert_trades_format, + convert_trades_to_ohlcv, + trades_convert_types, + trades_df_remove_duplicates, + trades_dict_to_list, + trades_list_to_df, + trades_to_ohlcv, +) __all__ = [ - 'clean_ohlcv_dataframe', - 'convert_ohlcv_format', - 'ohlcv_fill_up_missing_data', - 'ohlcv_to_dataframe', - 'order_book_to_dataframe', - 'reduce_dataframe_footprint', - 'trim_dataframe', - 'trim_dataframes', - 'convert_trades_format', - 'convert_trades_to_ohlcv', - 'trades_convert_types', - 'trades_df_remove_duplicates', - 'trades_dict_to_list', - 'trades_list_to_df', - 'trades_to_ohlcv', + "clean_ohlcv_dataframe", + "convert_ohlcv_format", + "ohlcv_fill_up_missing_data", + "ohlcv_to_dataframe", + "order_book_to_dataframe", + "reduce_dataframe_footprint", + "trim_dataframe", + "trim_dataframes", + "convert_trades_format", + "convert_trades_to_ohlcv", + "trades_convert_types", + "trades_df_remove_duplicates", + "trades_dict_to_list", + "trades_list_to_df", + "trades_to_ohlcv", ] diff --git a/freqtrade/data/converter/converter.py b/freqtrade/data/converter/converter.py index 0ebf24a4f..0475ddee2 100644 --- a/freqtrade/data/converter/converter.py +++ b/freqtrade/data/converter/converter.py @@ -1,6 +1,7 @@ """ Functions to convert data from one format to another """ + import logging from typing import Dict @@ -15,8 +16,14 @@ from freqtrade.enums import CandleType, TradingMode logger = logging.getLogger(__name__) -def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *, - fill_missing: bool = True, drop_incomplete: bool = True) -> DataFrame: +def ohlcv_to_dataframe( + ohlcv: list, + timeframe: str, + pair: str, + *, + fill_missing: bool = True, + drop_incomplete: bool = True, +) -> DataFrame: """ Converts a list with candle (OHLCV) data (in format returned by ccxt.fetch_ohlcv) to a Dataframe @@ -32,20 +39,28 @@ def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *, cols = DEFAULT_DATAFRAME_COLUMNS df = DataFrame(ohlcv, columns=cols) - df['date'] = to_datetime(df['date'], unit='ms', utc=True) + df["date"] = to_datetime(df["date"], unit="ms", utc=True) # Some exchanges return int values for Volume and even for OHLC. # Convert them since TA-LIB indicators used in the strategy assume floats # and fail with exception... - df = df.astype(dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', - 'volume': 'float'}) - return clean_ohlcv_dataframe(df, timeframe, pair, - fill_missing=fill_missing, - drop_incomplete=drop_incomplete) + df = df.astype( + dtype={ + "open": "float", + "high": "float", + "low": "float", + "close": "float", + "volume": "float", + } + ) + return clean_ohlcv_dataframe( + df, timeframe, pair, fill_missing=fill_missing, drop_incomplete=drop_incomplete + ) -def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *, - fill_missing: bool, drop_incomplete: bool) -> DataFrame: +def clean_ohlcv_dataframe( + data: DataFrame, timeframe: str, pair: str, *, fill_missing: bool, drop_incomplete: bool +) -> DataFrame: """ Cleanse a OHLCV dataframe by * Grouping it by date (removes duplicate tics) @@ -60,17 +75,19 @@ def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *, :return: DataFrame """ # group by index and aggregate results to eliminate duplicate ticks - data = data.groupby(by='date', as_index=False, sort=True).agg({ - 'open': 'first', - 'high': 'max', - 'low': 'min', - 'close': 'last', - 'volume': 'max', - }) + data = data.groupby(by="date", as_index=False, sort=True).agg( + { + "open": "first", + "high": "max", + "low": "min", + "close": "last", + "volume": "max", + } + ) # eliminate partial candle if drop_incomplete: data.drop(data.tail(1).index, inplace=True) - logger.debug('Dropping last candle') + logger.debug("Dropping last candle") if fill_missing: return ohlcv_fill_up_missing_data(data, timeframe, pair) @@ -81,37 +98,35 @@ def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *, def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) -> DataFrame: """ Fills up missing data with 0 volume rows, - using the previous close as price for "open", "high" "low" and "close", volume is set to 0 + using the previous close as price for "open", "high", "low" and "close", volume is set to 0 """ from freqtrade.exchange import timeframe_to_resample_freq - ohlcv_dict = { - 'open': 'first', - 'high': 'max', - 'low': 'min', - 'close': 'last', - 'volume': 'sum' - } + ohlcv_dict = {"open": "first", "high": "max", "low": "min", "close": "last", "volume": "sum"} resample_interval = timeframe_to_resample_freq(timeframe) # Resample to create "NAN" values - df = dataframe.resample(resample_interval, on='date').agg(ohlcv_dict) + df = dataframe.resample(resample_interval, on="date").agg(ohlcv_dict) # Forwardfill close for missing columns - df['close'] = df['close'].ffill() + df["close"] = df["close"].ffill() # Use close for "open, high, low" - df.loc[:, ['open', 'high', 'low']] = df[['open', 'high', 'low']].fillna( - value={'open': df['close'], - 'high': df['close'], - 'low': df['close'], - }) + df.loc[:, ["open", "high", "low"]] = df[["open", "high", "low"]].fillna( + value={ + "open": df["close"], + "high": df["close"], + "low": df["close"], + } + ) df.reset_index(inplace=True) len_before = len(dataframe) len_after = len(df) pct_missing = (len_after - len_before) / len_before if len_before > 0 else 0 if len_before != len_after: - message = (f"Missing data fillup for {pair}, {timeframe}: " - f"before: {len_before} - after: {len_after} - {pct_missing:.2%}") + message = ( + f"Missing data fillup for {pair}, {timeframe}: " + f"before: {len_before} - after: {len_after} - {pct_missing:.2%}" + ) if pct_missing > 0.01: logger.info(message) else: @@ -120,8 +135,9 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) return df -def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date', - startup_candles: int = 0) -> DataFrame: +def trim_dataframe( + df: DataFrame, timerange, *, df_date_col: str = "date", startup_candles: int = 0 +) -> DataFrame: """ Trim dataframe based on given timerange :param df: Dataframe to trim @@ -134,15 +150,16 @@ def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date', # Trim candles instead of timeframe in case of given startup_candle count df = df.iloc[startup_candles:, :] else: - if timerange.starttype == 'date': + if timerange.starttype == "date": df = df.loc[df[df_date_col] >= timerange.startdt, :] - if timerange.stoptype == 'date': + if timerange.stoptype == "date": df = df.loc[df[df_date_col] <= timerange.stopdt, :] return df -def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange, - startup_candles: int) -> Dict[str, DataFrame]: +def trim_dataframes( + preprocessed: Dict[str, DataFrame], timerange, startup_candles: int +) -> Dict[str, DataFrame]: """ Trim startup period from analyzed dataframes :param preprocessed: Dict of pair: dataframe @@ -157,8 +174,9 @@ def trim_dataframes(preprocessed: Dict[str, DataFrame], timerange, if not trimed_df.empty: processed[pair] = trimed_df else: - logger.warning(f'{pair} has no data left after adjusting for startup candles, ' - f'skipping.') + logger.warning( + f"{pair} has no data left after adjusting for startup candles, skipping." + ) return processed @@ -170,19 +188,28 @@ def order_book_to_dataframe(bids: list, asks: list) -> DataFrame: b_sum b_size bids asks a_size a_sum ------------------------------------------------------------------- """ - cols = ['bids', 'b_size'] + cols = ["bids", "b_size"] bids_frame = DataFrame(bids, columns=cols) # add cumulative sum column - bids_frame['b_sum'] = bids_frame['b_size'].cumsum() - cols2 = ['asks', 'a_size'] + bids_frame["b_sum"] = bids_frame["b_size"].cumsum() + cols2 = ["asks", "a_size"] asks_frame = DataFrame(asks, columns=cols2) # add cumulative sum column - asks_frame['a_sum'] = asks_frame['a_size'].cumsum() + asks_frame["a_sum"] = asks_frame["a_size"].cumsum() - frame = pd.concat([bids_frame['b_sum'], bids_frame['b_size'], bids_frame['bids'], - asks_frame['asks'], asks_frame['a_size'], asks_frame['a_sum']], axis=1, - keys=['b_sum', 'b_size', 'bids', 'asks', 'a_size', 'a_sum']) + frame = pd.concat( + [ + bids_frame["b_sum"], + bids_frame["b_size"], + bids_frame["bids"], + asks_frame["asks"], + asks_frame["a_size"], + asks_frame["a_sum"], + ], + axis=1, + keys=["b_sum", "b_size", "bids", "asks", "a_size", "a_sum"], + ) # logger.info('order book %s', frame ) return frame @@ -201,47 +228,51 @@ def convert_ohlcv_format( :param erase: Erase source data (does not apply if source and target format are identical) """ from freqtrade.data.history import get_datahandler - src = get_datahandler(config['datadir'], convert_from) - trg = get_datahandler(config['datadir'], convert_to) - timeframes = config.get('timeframes', [config.get('timeframe')]) + + src = get_datahandler(config["datadir"], convert_from) + trg = get_datahandler(config["datadir"], convert_to) + timeframes = config.get("timeframes", [config.get("timeframe")]) logger.info(f"Converting candle (OHLCV) for timeframe {timeframes}") - candle_types = [CandleType.from_string(ct) for ct in config.get('candle_types', [ - c.value for c in CandleType])] + candle_types = [ + CandleType.from_string(ct) + for ct in config.get("candle_types", [c.value for c in CandleType]) + ] logger.info(candle_types) - paircombs = src.ohlcv_get_available_data(config['datadir'], TradingMode.SPOT) - paircombs.extend(src.ohlcv_get_available_data(config['datadir'], TradingMode.FUTURES)) + paircombs = src.ohlcv_get_available_data(config["datadir"], TradingMode.SPOT) + paircombs.extend(src.ohlcv_get_available_data(config["datadir"], TradingMode.FUTURES)) - if 'pairs' in config: + if "pairs" in config: # Filter pairs - paircombs = [comb for comb in paircombs if comb[0] in config['pairs']] + paircombs = [comb for comb in paircombs if comb[0] in config["pairs"]] - if 'timeframes' in config: - paircombs = [comb for comb in paircombs if comb[1] in config['timeframes']] + if "timeframes" in config: + paircombs = [comb for comb in paircombs if comb[1] in config["timeframes"]] paircombs = [comb for comb in paircombs if comb[2] in candle_types] paircombs = sorted(paircombs, key=lambda x: (x[0], x[1], x[2].value)) - formatted_paircombs = '\n'.join([f"{pair}, {timeframe}, {candle_type}" - for pair, timeframe, candle_type in paircombs]) + formatted_paircombs = "\n".join( + [f"{pair}, {timeframe}, {candle_type}" for pair, timeframe, candle_type in paircombs] + ) - logger.info(f"Converting candle (OHLCV) data for the following pair combinations:\n" - f"{formatted_paircombs}") + logger.info( + f"Converting candle (OHLCV) data for the following pair combinations:\n" + f"{formatted_paircombs}" + ) for pair, timeframe, candle_type in paircombs: - data = src.ohlcv_load(pair=pair, timeframe=timeframe, - timerange=None, - fill_missing=False, - drop_incomplete=False, - startup_candles=0, - candle_type=candle_type) + data = src.ohlcv_load( + pair=pair, + timeframe=timeframe, + timerange=None, + fill_missing=False, + drop_incomplete=False, + startup_candles=0, + candle_type=candle_type, + ) logger.info(f"Converting {len(data)} {timeframe} {candle_type} candles for {pair}") if len(data) > 0: - trg.ohlcv_store( - pair=pair, - timeframe=timeframe, - data=data, - candle_type=candle_type - ) + trg.ohlcv_store(pair=pair, timeframe=timeframe, data=data, candle_type=candle_type) if erase and convert_from != convert_to: logger.info(f"Deleting source data for {pair} / {timeframe}") src.ohlcv_purge(pair=pair, timeframe=timeframe, candle_type=candle_type) @@ -254,12 +285,11 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame: :return: Dataframe converted to float/int 32s """ - logger.debug(f"Memory usage of dataframe is " - f"{df.memory_usage().sum() / 1024**2:.2f} MB") + logger.debug(f"Memory usage of dataframe is {df.memory_usage().sum() / 1024**2:.2f} MB") df_dtypes = df.dtypes for column, dtype in df_dtypes.items(): - if column in ['open', 'high', 'low', 'close', 'volume']: + if column in ["open", "high", "low", "close", "volume"]: continue if dtype == np.float64: df_dtypes[column] = np.float32 @@ -267,7 +297,6 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame: df_dtypes[column] = np.int32 df = df.astype(df_dtypes) - logger.debug(f"Memory usage after optimization is: " - f"{df.memory_usage().sum() / 1024**2:.2f} MB") + logger.debug(f"Memory usage after optimization is: {df.memory_usage().sum() / 1024**2:.2f} MB") return df diff --git a/freqtrade/data/converter/trade_converter.py b/freqtrade/data/converter/trade_converter.py index a7cc97cb8..9b8fe718e 100644 --- a/freqtrade/data/converter/trade_converter.py +++ b/freqtrade/data/converter/trade_converter.py @@ -1,6 +1,7 @@ """ Functions to convert data from one format to another """ + import logging from pathlib import Path from typing import Dict, List @@ -9,8 +10,13 @@ import pandas as pd from pandas import DataFrame, to_datetime from freqtrade.configuration import TimeRange -from freqtrade.constants import (DEFAULT_DATAFRAME_COLUMNS, DEFAULT_TRADES_COLUMNS, TRADES_DTYPES, - Config, TradeList) +from freqtrade.constants import ( + DEFAULT_DATAFRAME_COLUMNS, + DEFAULT_TRADES_COLUMNS, + TRADES_DTYPES, + Config, + TradeList, +) from freqtrade.enums import CandleType, TradingMode from freqtrade.exceptions import OperationalException @@ -25,7 +31,7 @@ def trades_df_remove_duplicates(trades: pd.DataFrame) -> pd.DataFrame: :param trades: DataFrame with the columns constants.DEFAULT_TRADES_COLUMNS :return: DataFrame with duplicates removed based on the 'timestamp' column """ - return trades.drop_duplicates(subset=['timestamp', 'id']) + return trades.drop_duplicates(subset=["timestamp", "id"]) def trades_dict_to_list(trades: List[Dict]) -> TradeList: @@ -42,7 +48,7 @@ def trades_convert_types(trades: DataFrame) -> DataFrame: Convert Trades dtypes and add 'date' column """ trades = trades.astype(TRADES_DTYPES) - trades['date'] = to_datetime(trades['timestamp'], unit='ms', utc=True) + trades["date"] = to_datetime(trades["timestamp"], unit="ms", utc=True) return trades @@ -71,13 +77,14 @@ def trades_to_ohlcv(trades: DataFrame, timeframe: str) -> DataFrame: :raises: ValueError if no trades are provided """ from freqtrade.exchange import timeframe_to_resample_freq + if trades.empty: - raise ValueError('Trade-list empty.') - df = trades.set_index('date', drop=True) + raise ValueError("Trade-list empty.") + df = trades.set_index("date", drop=True) resample_interval = timeframe_to_resample_freq(timeframe) - df_new = df['price'].resample(resample_interval).ohlc() - df_new['volume'] = df['amount'].resample(resample_interval).sum() - df_new['date'] = df_new.index + df_new = df["price"].resample(resample_interval).ohlc() + df_new["volume"] = df["amount"].resample(resample_interval).sum() + df_new["date"] = df_new.index # Drop 0 volume rows df_new = df_new.dropna() return df_new.loc[:, DEFAULT_DATAFRAME_COLUMNS] @@ -97,24 +104,27 @@ def convert_trades_to_ohlcv( Convert stored trades data to ohlcv data """ from freqtrade.data.history import get_datahandler + data_handler_trades = get_datahandler(datadir, data_format=data_format_trades) data_handler_ohlcv = get_datahandler(datadir, data_format=data_format_ohlcv) - logger.info(f"About to convert pairs: '{', '.join(pairs)}', " - f"intervals: '{', '.join(timeframes)}' to {datadir}") + logger.info( + f"About to convert pairs: '{', '.join(pairs)}', " + f"intervals: '{', '.join(timeframes)}' to {datadir}" + ) trading_mode = TradingMode.FUTURES if candle_type != CandleType.SPOT else TradingMode.SPOT for pair in pairs: trades = data_handler_trades.trades_load(pair, trading_mode) for timeframe in timeframes: if erase: if data_handler_ohlcv.ohlcv_purge(pair, timeframe, candle_type=candle_type): - logger.info(f'Deleting existing data for pair {pair}, interval {timeframe}.') + logger.info(f"Deleting existing data for pair {pair}, interval {timeframe}.") try: ohlcv = trades_to_ohlcv(trades, timeframe) # Store ohlcv data_handler_ohlcv.ohlcv_store(pair, timeframe, data=ohlcv, candle_type=candle_type) except ValueError: - logger.warning(f'Could not convert {pair} to OHLCV.') + logger.warning(f"Could not convert {pair} to OHLCV.") def convert_trades_format(config: Config, convert_from: str, convert_to: str, erase: bool): @@ -125,25 +135,27 @@ def convert_trades_format(config: Config, convert_from: str, convert_to: str, er :param convert_to: Target format :param erase: Erase source data (does not apply if source and target format are identical) """ - if convert_from == 'kraken_csv': - if config['exchange']['name'] != 'kraken': + if convert_from == "kraken_csv": + if config["exchange"]["name"] != "kraken": raise OperationalException( - 'Converting from csv is only supported for kraken.' - 'Please refer to the documentation for details about this special mode.' + "Converting from csv is only supported for kraken." + "Please refer to the documentation for details about this special mode." ) from freqtrade.data.converter.trade_converter_kraken import import_kraken_trades_from_csv + import_kraken_trades_from_csv(config, convert_to) return from freqtrade.data.history import get_datahandler - src = get_datahandler(config['datadir'], convert_from) - trg = get_datahandler(config['datadir'], convert_to) - if 'pairs' not in config: - config['pairs'] = src.trades_get_pairs(config['datadir']) + src = get_datahandler(config["datadir"], convert_from) + trg = get_datahandler(config["datadir"], convert_to) + + if "pairs" not in config: + config["pairs"] = src.trades_get_pairs(config["datadir"]) logger.info(f"Converting trades for {config['pairs']}") - trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) - for pair in config['pairs']: + trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT) + for pair in config["pairs"]: data = src.trades_load(pair, trading_mode) logger.info(f"Converting {len(data)} trades for {pair}") trg.trades_store(pair, data, trading_mode) diff --git a/freqtrade/data/converter/trade_converter_kraken.py b/freqtrade/data/converter/trade_converter_kraken.py index 90f7df28f..911fcd17b 100644 --- a/freqtrade/data/converter/trade_converter_kraken.py +++ b/freqtrade/data/converter/trade_converter_kraken.py @@ -4,8 +4,10 @@ from pathlib import Path import pandas as pd from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_TRADES_COLUMNS, Config -from freqtrade.data.converter.trade_converter import (trades_convert_types, - trades_df_remove_duplicates) +from freqtrade.data.converter.trade_converter import ( + trades_convert_types, + trades_df_remove_duplicates, +) from freqtrade.data.history import get_datahandler from freqtrade.enums import TradingMode from freqtrade.exceptions import OperationalException @@ -15,32 +17,33 @@ from freqtrade.resolvers import ExchangeResolver logger = logging.getLogger(__name__) -KRAKEN_CSV_TRADE_COLUMNS = ['timestamp', 'price', 'amount'] +KRAKEN_CSV_TRADE_COLUMNS = ["timestamp", "price", "amount"] def import_kraken_trades_from_csv(config: Config, convert_to: str): """ Import kraken trades from csv """ - if config['exchange']['name'] != 'kraken': - raise OperationalException('This function is only for the kraken exchange.') + if config["exchange"]["name"] != "kraken": + raise OperationalException("This function is only for the kraken exchange.") - datadir: Path = config['datadir'] + datadir: Path = config["datadir"] data_handler = get_datahandler(datadir, data_format=convert_to) - tradesdir: Path = config['datadir'] / 'trades_csv' + tradesdir: Path = config["datadir"] / "trades_csv" exchange = ExchangeResolver.load_exchange(config, validate=False) # iterate through directories in this directory - data_symbols = {p.stem for p in tradesdir.rglob('*.csv')} + data_symbols = {p.stem for p in tradesdir.rglob("*.csv")} # create pair/filename mapping markets = { - (m['symbol'], m['altname']) for m in exchange.markets.values() - if m.get('altname') in data_symbols + (m["symbol"], m["altname"]) + for m in exchange.markets.values() + if m.get("altname") in data_symbols } logger.info(f"Found csv files for {', '.join(data_symbols)}.") - if pairs_raw := config.get('pairs'): + if pairs_raw := config.get("pairs"): pairs = expand_pairlist(pairs_raw, [m[0] for m in markets]) markets = {m for m in markets if m[0] in pairs} if not markets: @@ -66,18 +69,20 @@ def import_kraken_trades_from_csv(config: Config, convert_to: str): trades = pd.concat(dfs, ignore_index=True) del dfs - trades.loc[:, 'timestamp'] = trades['timestamp'] * 1e3 - trades.loc[:, 'cost'] = trades['price'] * trades['amount'] + trades.loc[:, "timestamp"] = trades["timestamp"] * 1e3 + trades.loc[:, "cost"] = trades["price"] * trades["amount"] for col in DEFAULT_TRADES_COLUMNS: if col not in trades.columns: - trades.loc[:, col] = '' + trades.loc[:, col] = "" trades = trades[DEFAULT_TRADES_COLUMNS] trades = trades_convert_types(trades) trades_df = trades_df_remove_duplicates(trades) del trades - logger.info(f"{pair}: {len(trades_df)} trades, from " - f"{trades_df['date'].min():{DATETIME_PRINT_FORMAT}} to " - f"{trades_df['date'].max():{DATETIME_PRINT_FORMAT}}") + logger.info( + f"{pair}: {len(trades_df)} trades, from " + f"{trades_df['date'].min():{DATETIME_PRINT_FORMAT}} to " + f"{trades_df['date'].max():{DATETIME_PRINT_FORMAT}}" + ) data_handler.trades_store(pair, trades_df, TradingMode.SPOT) diff --git a/freqtrade/data/dataprovider.py b/freqtrade/data/dataprovider.py index 6fa6e4738..777f99895 100644 --- a/freqtrade/data/dataprovider.py +++ b/freqtrade/data/dataprovider.py @@ -4,6 +4,7 @@ Responsible to provide data to the bot including ticker and orderbook data, live and historical candle (OHLCV) data Common Interface for bot and strategy to access data. """ + import logging from collections import deque from datetime import datetime, timezone @@ -12,8 +13,12 @@ from typing import Any, Dict, List, Optional, Tuple from pandas import DataFrame, Timedelta, Timestamp, to_timedelta from freqtrade.configuration import TimeRange -from freqtrade.constants import (FULL_DATAFRAME_THRESHOLD, Config, ListPairsWithTimeframes, - PairWithTimeframe) +from freqtrade.constants import ( + FULL_DATAFRAME_THRESHOLD, + Config, + ListPairsWithTimeframes, + PairWithTimeframe, +) from freqtrade.data.history import load_pair_history from freqtrade.enums import CandleType, RPCMessageType, RunMode from freqtrade.exceptions import ExchangeError, OperationalException @@ -27,18 +32,17 @@ from freqtrade.util import PeriodicCache logger = logging.getLogger(__name__) -NO_EXCHANGE_EXCEPTION = 'Exchange is not available to DataProvider.' +NO_EXCHANGE_EXCEPTION = "Exchange is not available to DataProvider." MAX_DATAFRAME_CANDLES = 1000 class DataProvider: - def __init__( self, config: Config, exchange: Optional[Exchange], pairlists=None, - rpc: Optional[RPCManager] = None + rpc: Optional[RPCManager] = None, ) -> None: self._config = config self._exchange = exchange @@ -49,18 +53,20 @@ class DataProvider: self.__slice_date: Optional[datetime] = None self.__cached_pairs_backtesting: Dict[PairWithTimeframe, DataFrame] = {} - self.__producer_pairs_df: Dict[str, - Dict[PairWithTimeframe, Tuple[DataFrame, datetime]]] = {} + self.__producer_pairs_df: Dict[ + str, Dict[PairWithTimeframe, Tuple[DataFrame, datetime]] + ] = {} self.__producer_pairs: Dict[str, List[str]] = {} self._msg_queue: deque = deque() - self._default_candle_type = self._config.get('candle_type_def', CandleType.SPOT) - self._default_timeframe = self._config.get('timeframe', '1h') + self._default_candle_type = self._config.get("candle_type_def", CandleType.SPOT) + self._default_timeframe = self._config.get("timeframe", "1h") self.__msg_cache = PeriodicCache( - maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe)) + maxsize=1000, ttl=timeframe_to_seconds(self._default_timeframe) + ) - self.producers = self._config.get('external_message_consumer', {}).get('producers', []) + self.producers = self._config.get("external_message_consumer", {}).get("producers", []) self.external_data_enabled = len(self.producers) > 0 def _set_dataframe_max_index(self, limit_index: int): @@ -80,11 +86,7 @@ class DataProvider: self.__slice_date = limit_date def _set_cached_df( - self, - pair: str, - timeframe: str, - dataframe: DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, dataframe: DataFrame, candle_type: CandleType ) -> None: """ Store cached Dataframe. @@ -96,8 +98,7 @@ class DataProvider: :param candle_type: Any of the enum CandleType (must match trading mode!) """ pair_key = (pair, timeframe, candle_type) - self.__cached_pairs[pair_key] = ( - dataframe, datetime.now(timezone.utc)) + self.__cached_pairs[pair_key] = (dataframe, datetime.now(timezone.utc)) # For multiple producers we will want to merge the pairlists instead of overwriting def _set_producer_pairs(self, pairlist: List[str], producer_name: str = "default"): @@ -116,12 +117,7 @@ class DataProvider: """ return self.__producer_pairs.get(producer_name, []).copy() - def _emit_df( - self, - pair_key: PairWithTimeframe, - dataframe: DataFrame, - new_candle: bool - ) -> None: + def _emit_df(self, pair_key: PairWithTimeframe, dataframe: DataFrame, new_candle: bool) -> None: """ Send this dataframe as an ANALYZED_DF message to RPC @@ -131,19 +127,21 @@ class DataProvider: """ if self.__rpc: msg: RPCAnalyzedDFMsg = { - 'type': RPCMessageType.ANALYZED_DF, - 'data': { - 'key': pair_key, - 'df': dataframe.tail(1), - 'la': datetime.now(timezone.utc) - } - } + "type": RPCMessageType.ANALYZED_DF, + "data": { + "key": pair_key, + "df": dataframe.tail(1), + "la": datetime.now(timezone.utc), + }, + } self.__rpc.send_msg(msg) if new_candle: - self.__rpc.send_msg({ - 'type': RPCMessageType.NEW_CANDLE, - 'data': pair_key, - }) + self.__rpc.send_msg( + { + "type": RPCMessageType.NEW_CANDLE, + "data": pair_key, + } + ) def _replace_external_df( self, @@ -152,7 +150,7 @@ class DataProvider: last_analyzed: datetime, timeframe: str, candle_type: CandleType, - producer_name: str = "default" + producer_name: str = "default", ) -> None: """ Add the pair data to this class from an external source. @@ -178,7 +176,7 @@ class DataProvider: last_analyzed: datetime, timeframe: str, candle_type: CandleType, - producer_name: str = "default" + producer_name: str = "default", ) -> Tuple[bool, int]: """ Append a candle to the existing external dataframe. The incoming dataframe @@ -204,12 +202,14 @@ class DataProvider: last_analyzed=last_analyzed, timeframe=timeframe, candle_type=candle_type, - producer_name=producer_name + producer_name=producer_name, ) return (True, 0) - if (producer_name not in self.__producer_pairs_df - or pair_key not in self.__producer_pairs_df[producer_name]): + if ( + producer_name not in self.__producer_pairs_df + or pair_key not in self.__producer_pairs_df[producer_name] + ): # We don't have data from this producer yet, # or we don't have data for this pair_key # return False and 1000 for the full df @@ -220,12 +220,12 @@ class DataProvider: # CHECK FOR MISSING CANDLES # Convert the timeframe to a timedelta for pandas timeframe_delta: Timedelta = to_timedelta(timeframe) - local_last: Timestamp = existing_df.iloc[-1]['date'] # We want the last date from our copy + local_last: Timestamp = existing_df.iloc[-1]["date"] # We want the last date from our copy # We want the first date from the incoming - incoming_first: Timestamp = dataframe.iloc[0]['date'] + incoming_first: Timestamp = dataframe.iloc[0]["date"] # Remove existing candles that are newer than the incoming first candle - existing_df1 = existing_df[existing_df['date'] < incoming_first] + existing_df1 = existing_df[existing_df["date"] < incoming_first] candle_difference = (incoming_first - local_last) / timeframe_delta @@ -243,13 +243,13 @@ class DataProvider: # Everything is good, we appended self._replace_external_df( - pair, - appended_df, - last_analyzed=last_analyzed, - timeframe=timeframe, - candle_type=candle_type, - producer_name=producer_name - ) + pair, + appended_df, + last_analyzed=last_analyzed, + timeframe=timeframe, + candle_type=candle_type, + producer_name=producer_name, + ) return (True, 0) def get_producer_df( @@ -257,7 +257,7 @@ class DataProvider: pair: str, timeframe: Optional[str] = None, candle_type: Optional[CandleType] = None, - producer_name: str = "default" + producer_name: str = "default", ) -> Tuple[DataFrame, datetime]: """ Get the pair data from producers. @@ -292,64 +292,64 @@ class DataProvider: """ self._pairlists = pairlists - def historic_ohlcv( - self, - pair: str, - timeframe: str, - candle_type: str = '' - ) -> DataFrame: + def historic_ohlcv(self, pair: str, timeframe: str, candle_type: str = "") -> DataFrame: """ Get stored historical candle (OHLCV) data :param pair: pair to get the data for :param timeframe: timeframe to get data for :param candle_type: '', mark, index, premiumIndex, or funding_rate """ - _candle_type = CandleType.from_string( - candle_type) if candle_type != '' else self._config['candle_type_def'] + _candle_type = ( + CandleType.from_string(candle_type) + if candle_type != "" + else self._config["candle_type_def"] + ) saved_pair: PairWithTimeframe = (pair, str(timeframe), _candle_type) if saved_pair not in self.__cached_pairs_backtesting: - timerange = TimeRange.parse_timerange(None if self._config.get( - 'timerange') is None else str(self._config.get('timerange'))) + timerange = TimeRange.parse_timerange( + None + if self._config.get("timerange") is None + else str(self._config.get("timerange")) + ) startup_candles = self.get_required_startup(str(timeframe)) tf_seconds = timeframe_to_seconds(str(timeframe)) timerange.subtract_start(tf_seconds * startup_candles) - logger.info(f"Loading data for {pair} {timeframe} " - f"from {timerange.start_fmt} to {timerange.stop_fmt}") + logger.info( + f"Loading data for {pair} {timeframe} " + f"from {timerange.start_fmt} to {timerange.stop_fmt}" + ) self.__cached_pairs_backtesting[saved_pair] = load_pair_history( pair=pair, timeframe=timeframe, - datadir=self._config['datadir'], + datadir=self._config["datadir"], timerange=timerange, - data_format=self._config['dataformat_ohlcv'], + data_format=self._config["dataformat_ohlcv"], candle_type=_candle_type, - ) return self.__cached_pairs_backtesting[saved_pair].copy() def get_required_startup(self, timeframe: str) -> int: - freqai_config = self._config.get('freqai', {}) - if not freqai_config.get('enabled', False): - return self._config.get('startup_candle_count', 0) + freqai_config = self._config.get("freqai", {}) + if not freqai_config.get("enabled", False): + return self._config.get("startup_candle_count", 0) else: - startup_candles = self._config.get('startup_candle_count', 0) - indicator_periods = freqai_config['feature_parameters']['indicator_periods_candles'] + startup_candles = self._config.get("startup_candle_count", 0) + indicator_periods = freqai_config["feature_parameters"]["indicator_periods_candles"] # make sure the startupcandles is at least the set maximum indicator periods - self._config['startup_candle_count'] = max(startup_candles, max(indicator_periods)) + self._config["startup_candle_count"] = max(startup_candles, max(indicator_periods)) tf_seconds = timeframe_to_seconds(timeframe) - train_candles = freqai_config['train_period_days'] * 86400 / tf_seconds - total_candles = int(self._config['startup_candle_count'] + train_candles) + train_candles = freqai_config["train_period_days"] * 86400 / tf_seconds + total_candles = int(self._config["startup_candle_count"] + train_candles) logger.info( - f'Increasing startup_candle_count for freqai on {timeframe} to {total_candles}') + f"Increasing startup_candle_count for freqai on {timeframe} to {total_candles}" + ) return total_candles def get_pair_dataframe( - self, - pair: str, - timeframe: Optional[str] = None, - candle_type: str = '' + self, pair: str, timeframe: Optional[str] = None, candle_type: str = "" ) -> DataFrame: """ Return pair candle (OHLCV) data, either live or cached historical -- depending @@ -366,13 +366,13 @@ class DataProvider: data = self.ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type) else: # Get historical OHLCV data (cached on disk). - timeframe = timeframe or self._config['timeframe'] + timeframe = timeframe or self._config["timeframe"] data = self.historic_ohlcv(pair=pair, timeframe=timeframe, candle_type=candle_type) # Cut date to timeframe-specific date. # This is necessary to prevent lookahead bias in callbacks through informative pairs. if self.__slice_date: cutoff_date = timeframe_to_prev_date(timeframe, self.__slice_date) - data = data.loc[data['date'] < cutoff_date] + data = data.loc[data["date"] < cutoff_date] if len(data) == 0: logger.warning(f"No data found for ({pair}, {timeframe}, {candle_type}).") return data @@ -387,7 +387,7 @@ class DataProvider: combination. Returns empty dataframe and Epoch 0 (1970-01-01) if no dataframe was cached. """ - pair_key = (pair, timeframe, self._config.get('candle_type_def', CandleType.SPOT)) + pair_key = (pair, timeframe, self._config.get("candle_type_def", CandleType.SPOT)) if pair_key in self.__cached_pairs: if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE): df, date = self.__cached_pairs[pair_key] @@ -395,7 +395,7 @@ class DataProvider: df, date = self.__cached_pairs[pair_key] if self.__slice_index is not None: max_index = self.__slice_index - df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES):max_index] + df = df.iloc[max(0, max_index - MAX_DATAFRAME_CANDLES) : max_index] return df, date else: return (DataFrame(), datetime.fromtimestamp(0, tz=timezone.utc)) @@ -406,7 +406,7 @@ class DataProvider: Get runmode of the bot can be "live", "dry-run", "backtest", "edgecli", "hyperopt" or "other". """ - return RunMode(self._config.get('runmode', RunMode.OTHER)) + return RunMode(self._config.get("runmode", RunMode.OTHER)) def current_whitelist(self) -> List[str]: """ @@ -434,9 +434,11 @@ class DataProvider: # Exchange functions - def refresh(self, - pairlist: ListPairsWithTimeframes, - helping_pairs: Optional[ListPairsWithTimeframes] = None) -> None: + def refresh( + self, + pairlist: ListPairsWithTimeframes, + helping_pairs: Optional[ListPairsWithTimeframes] = None, + ) -> None: """ Refresh data, called with each cycle """ @@ -456,11 +458,7 @@ class DataProvider: return list(self._exchange._klines.keys()) def ohlcv( - self, - pair: str, - timeframe: Optional[str] = None, - copy: bool = True, - candle_type: str = '' + self, pair: str, timeframe: Optional[str] = None, copy: bool = True, candle_type: str = "" ) -> DataFrame: """ Get candle (OHLCV) data for the given pair as DataFrame @@ -474,11 +472,13 @@ class DataProvider: if self._exchange is None: raise OperationalException(NO_EXCHANGE_EXCEPTION) if self.runmode in (RunMode.DRY_RUN, RunMode.LIVE): - _candle_type = CandleType.from_string( - candle_type) if candle_type != '' else self._config['candle_type_def'] + _candle_type = ( + CandleType.from_string(candle_type) + if candle_type != "" + else self._config["candle_type_def"] + ) return self._exchange.klines( - (pair, timeframe or self._config['timeframe'], _candle_type), - copy=copy + (pair, timeframe or self._config["timeframe"], _candle_type), copy=copy ) else: return DataFrame() diff --git a/freqtrade/data/entryexitanalysis.py b/freqtrade/data/entryexitanalysis.py index 36587e573..9d936d295 100644 --- a/freqtrade/data/entryexitanalysis.py +++ b/freqtrade/data/entryexitanalysis.py @@ -8,8 +8,11 @@ from tabulate import tabulate from freqtrade.configuration import TimeRange from freqtrade.constants import Config -from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, - load_backtest_stats) +from freqtrade.data.btanalysis import ( + get_latest_backtest_filename, + load_backtest_data, + load_backtest_stats, +) from freqtrade.exceptions import OperationalException @@ -18,9 +21,10 @@ logger = logging.getLogger(__name__) def _load_backtest_analysis_data(backtest_dir: Path, name: str): if backtest_dir.is_dir(): - scpf = Path(backtest_dir, - Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl" - ) + scpf = Path( + backtest_dir, + Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl", + ) else: scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl") @@ -53,7 +57,8 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand for pair in pairlist: if pair in signal_candles[strategy_name]: analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators( - pair, trades, signal_candles[strategy_name][pair]) + pair, trades, signal_candles[strategy_name][pair] + ) except Exception as e: print(f"Cannot process entry/exit reasons for {strategy_name}: ", e) @@ -64,28 +69,28 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles: buyf = signal_candles if len(buyf) > 0: - buyf = buyf.set_index('date', drop=False) - trades_red = trades.loc[trades['pair'] == pair].copy() + buyf = buyf.set_index("date", drop=False) + trades_red = trades.loc[trades["pair"] == pair].copy() trades_inds = pd.DataFrame() if trades_red.shape[0] > 0 and buyf.shape[0] > 0: for t, v in trades_red.open_date.items(): - allinds = buyf.loc[(buyf['date'] < v)] + allinds = buyf.loc[(buyf["date"] < v)] if allinds.shape[0] > 0: tmp_inds = allinds.iloc[[-1]] - trades_red.loc[t, 'signal_date'] = tmp_inds['date'].values[0] - trades_red.loc[t, 'enter_reason'] = trades_red.loc[t, 'enter_tag'] - tmp_inds.index.rename('signal_date', inplace=True) + trades_red.loc[t, "signal_date"] = tmp_inds["date"].values[0] + trades_red.loc[t, "enter_reason"] = trades_red.loc[t, "enter_tag"] + tmp_inds.index.rename("signal_date", inplace=True) trades_inds = pd.concat([trades_inds, tmp_inds]) - if 'signal_date' in trades_red: - trades_red['signal_date'] = pd.to_datetime(trades_red['signal_date'], utc=True) - trades_red.set_index('signal_date', inplace=True) + if "signal_date" in trades_red: + trades_red["signal_date"] = pd.to_datetime(trades_red["signal_date"], utc=True) + trades_red.set_index("signal_date", inplace=True) try: - trades_red = pd.merge(trades_red, trades_inds, on='signal_date', how='outer') + trades_red = pd.merge(trades_red, trades_inds, on="signal_date", how="outer") except Exception as e: raise e return trades_red @@ -93,138 +98,166 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles: return pd.DataFrame() -def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ): +def _do_group_table_output( + bigdf, + glist, + csv_path: Path, + to_csv=False, +): for g in glist: # 0: summary wins/losses grouped by enter tag if g == "0": - group_mask = ['enter_reason'] - wins = bigdf.loc[bigdf['profit_abs'] >= 0] \ - .groupby(group_mask) \ - .agg({'profit_abs': ['sum']}) + group_mask = ["enter_reason"] + wins = ( + bigdf.loc[bigdf["profit_abs"] >= 0].groupby(group_mask).agg({"profit_abs": ["sum"]}) + ) - wins.columns = ['profit_abs_wins'] - loss = bigdf.loc[bigdf['profit_abs'] < 0] \ - .groupby(group_mask) \ - .agg({'profit_abs': ['sum']}) - loss.columns = ['profit_abs_loss'] + wins.columns = ["profit_abs_wins"] + loss = ( + bigdf.loc[bigdf["profit_abs"] < 0].groupby(group_mask).agg({"profit_abs": ["sum"]}) + ) + loss.columns = ["profit_abs_loss"] - new = bigdf.groupby(group_mask).agg({'profit_abs': [ - 'count', - lambda x: sum(x > 0), - lambda x: sum(x <= 0)]}) + new = bigdf.groupby(group_mask).agg( + {"profit_abs": ["count", lambda x: sum(x > 0), lambda x: sum(x <= 0)]} + ) new = pd.concat([new, wins, loss], axis=1).fillna(0) - new['profit_tot'] = new['profit_abs_wins'] - abs(new['profit_abs_loss']) - new['wl_ratio_pct'] = (new.iloc[:, 1] / new.iloc[:, 0] * 100).fillna(0) - new['avg_win'] = (new['profit_abs_wins'] / new.iloc[:, 1]).fillna(0) - new['avg_loss'] = (new['profit_abs_loss'] / new.iloc[:, 2]).fillna(0) + new["profit_tot"] = new["profit_abs_wins"] - abs(new["profit_abs_loss"]) + new["wl_ratio_pct"] = (new.iloc[:, 1] / new.iloc[:, 0] * 100).fillna(0) + new["avg_win"] = (new["profit_abs_wins"] / new.iloc[:, 1]).fillna(0) + new["avg_loss"] = (new["profit_abs_loss"] / new.iloc[:, 2]).fillna(0) - new['exp_ratio'] = ( - ( - (1 + (new['avg_win'] / abs(new['avg_loss']))) * (new['wl_ratio_pct'] / 100) - ) - 1).fillna(0) + new["exp_ratio"] = ( + ((1 + (new["avg_win"] / abs(new["avg_loss"]))) * (new["wl_ratio_pct"] / 100)) - 1 + ).fillna(0) - new.columns = ['total_num_buys', 'wins', 'losses', - 'profit_abs_wins', 'profit_abs_loss', - 'profit_tot', 'wl_ratio_pct', - 'avg_win', 'avg_loss', 'exp_ratio'] + new.columns = [ + "total_num_buys", + "wins", + "losses", + "profit_abs_wins", + "profit_abs_loss", + "profit_tot", + "wl_ratio_pct", + "avg_win", + "avg_loss", + "exp_ratio", + ] - sortcols = ['total_num_buys'] + sortcols = ["total_num_buys"] - _print_table(new, sortcols, show_index=True, name="Group 0:", - to_csv=to_csv, csv_path=csv_path) + _print_table( + new, sortcols, show_index=True, name="Group 0:", to_csv=to_csv, csv_path=csv_path + ) else: - agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'], - 'profit_ratio': ['median', 'mean', 'sum']} - agg_cols = ['num_buys', 'profit_abs_sum', 'profit_abs_median', - 'profit_abs_mean', 'median_profit_pct', 'mean_profit_pct', - 'total_profit_pct'] - sortcols = ['profit_abs_sum', 'enter_reason'] + agg_mask = { + "profit_abs": ["count", "sum", "median", "mean"], + "profit_ratio": ["median", "mean", "sum"], + } + agg_cols = [ + "num_buys", + "profit_abs_sum", + "profit_abs_median", + "profit_abs_mean", + "median_profit_pct", + "mean_profit_pct", + "total_profit_pct", + ] + sortcols = ["profit_abs_sum", "enter_reason"] # 1: profit summaries grouped by enter_tag if g == "1": - group_mask = ['enter_reason'] + group_mask = ["enter_reason"] # 2: profit summaries grouped by enter_tag and exit_tag if g == "2": - group_mask = ['enter_reason', 'exit_reason'] + group_mask = ["enter_reason", "exit_reason"] # 3: profit summaries grouped by pair and enter_tag if g == "3": - group_mask = ['pair', 'enter_reason'] + group_mask = ["pair", "enter_reason"] # 4: profit summaries grouped by pair, enter_ and exit_tag (this can get quite large) if g == "4": - group_mask = ['pair', 'enter_reason', 'exit_reason'] + group_mask = ["pair", "enter_reason", "exit_reason"] # 5: profit summaries grouped by exit_tag if g == "5": - group_mask = ['exit_reason'] - sortcols = ['exit_reason'] + group_mask = ["exit_reason"] + sortcols = ["exit_reason"] if group_mask: new = bigdf.groupby(group_mask).agg(agg_mask).reset_index() new.columns = group_mask + agg_cols - new['median_profit_pct'] = new['median_profit_pct'] * 100 - new['mean_profit_pct'] = new['mean_profit_pct'] * 100 - new['total_profit_pct'] = new['total_profit_pct'] * 100 + new["median_profit_pct"] = new["median_profit_pct"] * 100 + new["mean_profit_pct"] = new["mean_profit_pct"] * 100 + new["total_profit_pct"] = new["total_profit_pct"] * 100 - _print_table(new, sortcols, name=f"Group {g}:", - to_csv=to_csv, csv_path=csv_path) + _print_table(new, sortcols, name=f"Group {g}:", to_csv=to_csv, csv_path=csv_path) else: logger.warning("Invalid group mask specified.") -def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame, - to_csv: bool = False, csv_path=None) -> None: - cols = ['pair', 'date', 'enter_tag'] - sortcols = ['date', 'pair', 'enter_tag'] - _print_table(rejected_signals_df[cols], - sortcols, - show_index=False, - name="Rejected Signals:", - to_csv=to_csv, - csv_path=csv_path) +def _do_rejected_signals_output( + rejected_signals_df: pd.DataFrame, to_csv: bool = False, csv_path=None +) -> None: + cols = ["pair", "date", "enter_tag"] + sortcols = ["date", "pair", "enter_tag"] + _print_table( + rejected_signals_df[cols], + sortcols, + show_index=False, + name="Rejected Signals:", + to_csv=to_csv, + csv_path=csv_path, + ) -def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'): +def _select_rows_within_dates(df, timerange=None, df_date_col: str = "date"): if timerange: - if timerange.starttype == 'date': + if timerange.starttype == "date": df = df.loc[(df[df_date_col] >= timerange.startdt)] - if timerange.stoptype == 'date': + if timerange.stoptype == "date": df = df.loc[(df[df_date_col] < timerange.stopdt)] return df def _select_rows_by_tags(df, enter_reason_list, exit_reason_list): if enter_reason_list and "all" not in enter_reason_list: - df = df.loc[(df['enter_reason'].isin(enter_reason_list))] + df = df.loc[(df["enter_reason"].isin(enter_reason_list))] if exit_reason_list and "all" not in exit_reason_list: - df = df.loc[(df['exit_reason'].isin(exit_reason_list))] + df = df.loc[(df["exit_reason"].isin(exit_reason_list))] return df -def prepare_results(analysed_trades, stratname, - enter_reason_list, exit_reason_list, - timerange=None): +def prepare_results( + analysed_trades, stratname, enter_reason_list, exit_reason_list, timerange=None +): res_df = pd.DataFrame() for pair, trades in analysed_trades[stratname].items(): - if (trades.shape[0] > 0): - trades.dropna(subset=['close_date'], inplace=True) + if trades.shape[0] > 0: + trades.dropna(subset=["close_date"], inplace=True) res_df = pd.concat([res_df, trades], ignore_index=True) res_df = _select_rows_within_dates(res_df, timerange) - if res_df is not None and res_df.shape[0] > 0 and ('enter_reason' in res_df.columns): + if res_df is not None and res_df.shape[0] > 0 and ("enter_reason" in res_df.columns): res_df = _select_rows_by_tags(res_df, enter_reason_list, exit_reason_list) return res_df -def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str], - csv_path: Path, rejected_signals=None, to_csv=False): +def print_results( + res_df: pd.DataFrame, + analysis_groups: List[str], + indicator_list: List[str], + csv_path: Path, + rejected_signals=None, + to_csv=False, +): if res_df.shape[0] > 0: if analysis_groups: _do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path) @@ -237,30 +270,31 @@ def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_li # NB this can be large for big dataframes! if "all" in indicator_list: - _print_table(res_df, - show_index=False, - name="Indicators:", - to_csv=to_csv, - csv_path=csv_path) + _print_table( + res_df, show_index=False, name="Indicators:", to_csv=to_csv, csv_path=csv_path + ) elif indicator_list is not None and indicator_list: available_inds = [] for ind in indicator_list: if ind in res_df: available_inds.append(ind) ilist = ["pair", "enter_reason", "exit_reason"] + available_inds - _print_table(res_df[ilist], - sortcols=['exit_reason'], - show_index=False, - name="Indicators:", - to_csv=to_csv, - csv_path=csv_path) + _print_table( + res_df[ilist], + sortcols=["exit_reason"], + show_index=False, + name="Indicators:", + to_csv=to_csv, + csv_path=csv_path, + ) else: print("\\No trades to show") -def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None, - to_csv=False, csv_path: Path): - if (sortcols is not None): +def _print_table( + df: pd.DataFrame, sortcols=None, *, show_index=False, name=None, to_csv=False, csv_path: Path +): + if sortcols is not None: data = df.sort_values(sortcols) else: data = df @@ -273,60 +307,64 @@ def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None if name is not None: print(name) - print( - tabulate( - data, - headers='keys', - tablefmt='psql', - showindex=show_index - ) - ) + print(tabulate(data, headers="keys", tablefmt="psql", showindex=show_index)) def process_entry_exit_reasons(config: Config): try: - analysis_groups = config.get('analysis_groups', []) - enter_reason_list = config.get('enter_reason_list', ["all"]) - exit_reason_list = config.get('exit_reason_list', ["all"]) - indicator_list = config.get('indicator_list', []) - do_rejected = config.get('analysis_rejected', False) - to_csv = config.get('analysis_to_csv', False) - csv_path = Path(config.get('analysis_csv_path', config['exportfilename'])) + analysis_groups = config.get("analysis_groups", []) + enter_reason_list = config.get("enter_reason_list", ["all"]) + exit_reason_list = config.get("exit_reason_list", ["all"]) + indicator_list = config.get("indicator_list", []) + do_rejected = config.get("analysis_rejected", False) + to_csv = config.get("analysis_to_csv", False) + csv_path = Path(config.get("analysis_csv_path", config["exportfilename"])) if to_csv and not csv_path.is_dir(): raise OperationalException(f"Specified directory {csv_path} does not exist.") - timerange = TimeRange.parse_timerange(None if config.get( - 'timerange') is None else str(config.get('timerange'))) + timerange = TimeRange.parse_timerange( + None if config.get("timerange") is None else str(config.get("timerange")) + ) - backtest_stats = load_backtest_stats(config['exportfilename']) + backtest_stats = load_backtest_stats(config["exportfilename"]) - for strategy_name, results in backtest_stats['strategy'].items(): - trades = load_backtest_data(config['exportfilename'], strategy_name) + for strategy_name, results in backtest_stats["strategy"].items(): + trades = load_backtest_data(config["exportfilename"], strategy_name) if trades is not None and not trades.empty: - signal_candles = _load_signal_candles(config['exportfilename']) + signal_candles = _load_signal_candles(config["exportfilename"]) rej_df = None if do_rejected: - rejected_signals_dict = _load_rejected_signals(config['exportfilename']) - rej_df = prepare_results(rejected_signals_dict, strategy_name, - enter_reason_list, exit_reason_list, - timerange=timerange) + rejected_signals_dict = _load_rejected_signals(config["exportfilename"]) + rej_df = prepare_results( + rejected_signals_dict, + strategy_name, + enter_reason_list, + exit_reason_list, + timerange=timerange, + ) analysed_trades_dict = _process_candles_and_indicators( - config['exchange']['pair_whitelist'], strategy_name, - trades, signal_candles) + config["exchange"]["pair_whitelist"], strategy_name, trades, signal_candles + ) - res_df = prepare_results(analysed_trades_dict, strategy_name, - enter_reason_list, exit_reason_list, - timerange=timerange) + res_df = prepare_results( + analysed_trades_dict, + strategy_name, + enter_reason_list, + exit_reason_list, + timerange=timerange, + ) - print_results(res_df, - analysis_groups, - indicator_list, - rejected_signals=rej_df, - to_csv=to_csv, - csv_path=csv_path) + print_results( + res_df, + analysis_groups, + indicator_list, + rejected_signals=rej_df, + to_csv=to_csv, + csv_path=csv_path, + ) except ValueError as e: raise OperationalException(e) from e diff --git a/freqtrade/data/history/__init__.py b/freqtrade/data/history/__init__.py index f989fd801..bce4bc284 100644 --- a/freqtrade/data/history/__init__.py +++ b/freqtrade/data/history/__init__.py @@ -5,8 +5,17 @@ Includes: * load data for a pair (or a list of pairs) from disk * download data from exchange and store to disk """ + # flake8: noqa: F401 from .datahandlers import get_datahandler -from .history_utils import (convert_trades_to_ohlcv, download_data_main, get_timerange, load_data, - load_pair_history, refresh_backtest_ohlcv_data, - refresh_backtest_trades_data, refresh_data, validate_backtest_data) +from .history_utils import ( + convert_trades_to_ohlcv, + download_data_main, + get_timerange, + load_data, + load_pair_history, + refresh_backtest_ohlcv_data, + refresh_backtest_trades_data, + refresh_data, + validate_backtest_data, +) diff --git a/freqtrade/data/history/datahandlers/featherdatahandler.py b/freqtrade/data/history/datahandlers/featherdatahandler.py index 6d57dbed7..8b1acb09c 100644 --- a/freqtrade/data/history/datahandlers/featherdatahandler.py +++ b/freqtrade/data/history/datahandlers/featherdatahandler.py @@ -14,11 +14,11 @@ logger = logging.getLogger(__name__) class FeatherDataHandler(IDataHandler): - _columns = DEFAULT_DATAFRAME_COLUMNS def ohlcv_store( - self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None: + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType + ) -> None: """ Store data in json format "values". format looks as follows: @@ -33,11 +33,12 @@ class FeatherDataHandler(IDataHandler): self.create_dir_if_needed(filename) data.reset_index(drop=True).loc[:, self._columns].to_feather( - filename, compression_level=9, compression='lz4') + filename, compression_level=9, compression="lz4" + ) - def _ohlcv_load(self, pair: str, timeframe: str, - timerange: Optional[TimeRange], candle_type: CandleType - ) -> DataFrame: + def _ohlcv_load( + self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType + ) -> DataFrame: """ Internal method used to load data for one pair from disk. Implements the loading and conversion to a Pandas dataframe. @@ -50,28 +51,31 @@ class FeatherDataHandler(IDataHandler): :param candle_type: Any of the enum CandleType (must match trading mode!) :return: DataFrame with ohlcv data, or empty DataFrame """ - filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type) + filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type) if not filename.exists(): # Fallback mode for 1M files filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True) + self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True + ) if not filename.exists(): return DataFrame(columns=self._columns) pairdata = read_feather(filename) pairdata.columns = self._columns - pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float', - 'low': 'float', 'close': 'float', 'volume': 'float'}) - pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True) + pairdata = pairdata.astype( + dtype={ + "open": "float", + "high": "float", + "low": "float", + "close": "float", + "volume": "float", + } + ) + pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True) return pairdata def ohlcv_append( - self, - pair: str, - timeframe: str, - data: DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType ) -> None: """ Append data to existing data structures @@ -92,7 +96,7 @@ class FeatherDataHandler(IDataHandler): """ filename = self._pair_trades_filename(self._datadir, pair, trading_mode) self.create_dir_if_needed(filename) - data.reset_index(drop=True).to_feather(filename, compression_level=9, compression='lz4') + data.reset_index(drop=True).to_feather(filename, compression_level=9, compression="lz4") def trades_append(self, pair: str, data: DataFrame): """ @@ -104,7 +108,7 @@ class FeatherDataHandler(IDataHandler): raise NotImplementedError() def _trades_load( - self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None + self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None ) -> DataFrame: """ Load a pair from file, either .json.gz or .json diff --git a/freqtrade/data/history/datahandlers/hdf5datahandler.py b/freqtrade/data/history/datahandlers/hdf5datahandler.py index cb2cdd884..99d0a98a6 100644 --- a/freqtrade/data/history/datahandlers/hdf5datahandler.py +++ b/freqtrade/data/history/datahandlers/hdf5datahandler.py @@ -15,11 +15,11 @@ logger = logging.getLogger(__name__) class HDF5DataHandler(IDataHandler): - _columns = DEFAULT_DATAFRAME_COLUMNS def ohlcv_store( - self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType) -> None: + self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType + ) -> None: """ Store data in hdf5 file. :param pair: Pair - used to generate filename @@ -35,13 +35,18 @@ class HDF5DataHandler(IDataHandler): self.create_dir_if_needed(filename) _data.loc[:, self._columns].to_hdf( - filename, key=key, mode='a', complevel=9, complib='blosc', - format='table', data_columns=['date'] + filename, + key=key, + mode="a", + complevel=9, + complib="blosc", + format="table", + data_columns=["date"], ) - def _ohlcv_load(self, pair: str, timeframe: str, - timerange: Optional[TimeRange], candle_type: CandleType - ) -> pd.DataFrame: + def _ohlcv_load( + self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType + ) -> pd.DataFrame: """ Internal method used to load data for one pair from disk. Implements the loading and conversion to a Pandas dataframe. @@ -55,41 +60,40 @@ class HDF5DataHandler(IDataHandler): :return: DataFrame with ohlcv data, or empty DataFrame """ key = self._pair_ohlcv_key(pair, timeframe) - filename = self._pair_data_filename( - self._datadir, - pair, - timeframe, - candle_type=candle_type - ) + filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type) if not filename.exists(): # Fallback mode for 1M files filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True) + self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True + ) if not filename.exists(): return pd.DataFrame(columns=self._columns) where = [] if timerange: - if timerange.starttype == 'date': + if timerange.starttype == "date": where.append(f"date >= Timestamp({timerange.startts * 1e9})") - if timerange.stoptype == 'date': + if timerange.stoptype == "date": where.append(f"date <= Timestamp({timerange.stopts * 1e9})") pairdata = pd.read_hdf(filename, key=key, mode="r", where=where) if list(pairdata.columns) != self._columns: raise ValueError("Wrong dataframe format") - pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float', - 'low': 'float', 'close': 'float', 'volume': 'float'}) + pairdata = pairdata.astype( + dtype={ + "open": "float", + "high": "float", + "low": "float", + "close": "float", + "volume": "float", + } + ) pairdata = pairdata.reset_index(drop=True) return pairdata def ohlcv_append( - self, - pair: str, - timeframe: str, - data: pd.DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, data: pd.DataFrame, candle_type: CandleType ) -> None: """ Append data to existing data structures @@ -111,9 +115,13 @@ class HDF5DataHandler(IDataHandler): key = self._pair_trades_key(pair) data.to_hdf( - self._pair_trades_filename(self._datadir, pair, trading_mode), key=key, - mode='a', complevel=9, complib='blosc', - format='table', data_columns=['timestamp'] + self._pair_trades_filename(self._datadir, pair, trading_mode), + key=key, + mode="a", + complevel=9, + complib="blosc", + format="table", + data_columns=["timestamp"], ) def trades_append(self, pair: str, data: pd.DataFrame): @@ -142,13 +150,13 @@ class HDF5DataHandler(IDataHandler): return pd.DataFrame(columns=DEFAULT_TRADES_COLUMNS) where = [] if timerange: - if timerange.starttype == 'date': + if timerange.starttype == "date": where.append(f"timestamp >= {timerange.startts * 1e3}") - if timerange.stoptype == 'date': + if timerange.stoptype == "date": where.append(f"timestamp < {timerange.stopts * 1e3}") trades: pd.DataFrame = pd.read_hdf(filename, key=key, mode="r", where=where) - trades[['id', 'type']] = trades[['id', 'type']].replace({np.nan: None}) + trades[["id", "type"]] = trades[["id", "type"]].replace({np.nan: None}) return trades @classmethod @@ -158,7 +166,7 @@ class HDF5DataHandler(IDataHandler): @classmethod def _pair_ohlcv_key(cls, pair: str, timeframe: str) -> str: # Escape futures pairs to avoid warnings - pair_esc = pair.replace(':', '_') + pair_esc = pair.replace(":", "_") return f"{pair_esc}/ohlcv/tf_{timeframe}" @classmethod diff --git a/freqtrade/data/history/datahandlers/idatahandler.py b/freqtrade/data/history/datahandlers/idatahandler.py index cff26760f..e335ea770 100644 --- a/freqtrade/data/history/datahandlers/idatahandler.py +++ b/freqtrade/data/history/datahandlers/idatahandler.py @@ -3,6 +3,7 @@ Abstract datahandler interface. It's subclasses handle and storing data from disk. """ + import logging import re from abc import ABC, abstractmethod @@ -16,8 +17,12 @@ from pandas import DataFrame from freqtrade import misc from freqtrade.configuration import TimeRange from freqtrade.constants import DEFAULT_TRADES_COLUMNS, ListPairsWithTimeframes -from freqtrade.data.converter import (clean_ohlcv_dataframe, trades_convert_types, - trades_df_remove_duplicates, trim_dataframe) +from freqtrade.data.converter import ( + clean_ohlcv_dataframe, + trades_convert_types, + trades_df_remove_duplicates, + trim_dataframe, +) from freqtrade.enums import CandleType, TradingMode from freqtrade.exchange import timeframe_to_seconds @@ -26,8 +31,7 @@ logger = logging.getLogger(__name__) class IDataHandler(ABC): - - _OHLCV_REGEX = r'^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)' + _OHLCV_REGEX = r"^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)" def __init__(self, datadir: Path) -> None: self._datadir = datadir @@ -41,7 +45,8 @@ class IDataHandler(ABC): @classmethod def ohlcv_get_available_data( - cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes: + cls, datadir: Path, trading_mode: TradingMode + ) -> ListPairsWithTimeframes: """ Returns a list of all pairs with ohlcv data available in this datadir :param datadir: Directory to search for ohlcv files @@ -49,17 +54,20 @@ class IDataHandler(ABC): :return: List of Tuples of (pair, timeframe, CandleType) """ if trading_mode == TradingMode.FUTURES: - datadir = datadir.joinpath('futures') + datadir = datadir.joinpath("futures") _tmp = [ - re.search( - cls._OHLCV_REGEX, p.name - ) for p in datadir.glob(f"*.{cls._get_file_extension()}")] + re.search(cls._OHLCV_REGEX, p.name) + for p in datadir.glob(f"*.{cls._get_file_extension()}") + ] return [ ( cls.rebuild_pair_from_filename(match[1]), cls.rebuild_timeframe_from_filename(match[2]), - CandleType.from_string(match[3]) - ) for match in _tmp if match and len(match.groups()) > 1] + CandleType.from_string(match[3]), + ) + for match in _tmp + if match and len(match.groups()) > 1 + ] @classmethod def ohlcv_get_pairs(cls, datadir: Path, timeframe: str, candle_type: CandleType) -> List[str]: @@ -73,17 +81,20 @@ class IDataHandler(ABC): """ candle = "" if candle_type != CandleType.SPOT: - datadir = datadir.joinpath('futures') + datadir = datadir.joinpath("futures") candle = f"-{candle_type}" ext = cls._get_file_extension() - _tmp = [re.search(r'^(\S+)(?=\-' + timeframe + candle + f'.{ext})', p.name) - for p in datadir.glob(f"*{timeframe}{candle}.{ext}")] + _tmp = [ + re.search(r"^(\S+)(?=\-" + timeframe + candle + f".{ext})", p.name) + for p in datadir.glob(f"*{timeframe}{candle}.{ext}") + ] # Check if regex found something and only return these results return [cls.rebuild_pair_from_filename(match[0]) for match in _tmp if match] @abstractmethod def ohlcv_store( - self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None: + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType + ) -> None: """ Store ohlcv data. :param pair: Pair - used to generate filename @@ -93,8 +104,9 @@ class IDataHandler(ABC): :return: None """ - def ohlcv_data_min_max(self, pair: str, timeframe: str, - candle_type: CandleType) -> Tuple[datetime, datetime, int]: + def ohlcv_data_min_max( + self, pair: str, timeframe: str, candle_type: CandleType + ) -> Tuple[datetime, datetime, int]: """ Returns the min and max timestamp for the given pair and timeframe. :param pair: Pair to get min/max for @@ -109,12 +121,12 @@ class IDataHandler(ABC): datetime.fromtimestamp(0, tz=timezone.utc), 0, ) - return df.iloc[0]['date'].to_pydatetime(), df.iloc[-1]['date'].to_pydatetime(), len(df) + return df.iloc[0]["date"].to_pydatetime(), df.iloc[-1]["date"].to_pydatetime(), len(df) @abstractmethod - def _ohlcv_load(self, pair: str, timeframe: str, timerange: Optional[TimeRange], - candle_type: CandleType - ) -> DataFrame: + def _ohlcv_load( + self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType + ) -> DataFrame: """ Internal method used to load data for one pair from disk. Implements the loading and conversion to a Pandas dataframe. @@ -144,11 +156,7 @@ class IDataHandler(ABC): @abstractmethod def ohlcv_append( - self, - pair: str, - timeframe: str, - data: DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType ) -> None: """ Append data to existing data structures @@ -166,8 +174,10 @@ class IDataHandler(ABC): :return: List of Pairs """ _ext = cls._get_file_extension() - _tmp = [re.search(r'^(\S+)(?=\-trades.' + _ext + ')', p.name) - for p in datadir.glob(f"*trades.{_ext}")] + _tmp = [ + re.search(r"^(\S+)(?=\-trades." + _ext + ")", p.name) + for p in datadir.glob(f"*trades.{_ext}") + ] # Check if regex found something and only return these results to avoid exceptions. return [cls.rebuild_pair_from_filename(match[0]) for match in _tmp if match] @@ -227,7 +237,7 @@ class IDataHandler(ABC): return False def trades_load( - self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None + self, pair: str, trading_mode: TradingMode, timerange: Optional[TimeRange] = None ) -> DataFrame: """ Load a pair from file, either .json.gz or .json @@ -260,7 +270,7 @@ class IDataHandler(ABC): pair: str, timeframe: str, candle_type: CandleType, - no_timeframe_modify: bool = False + no_timeframe_modify: bool = False, ) -> Path: pair_s = misc.pair_to_filename(pair) candle = "" @@ -268,10 +278,9 @@ class IDataHandler(ABC): timeframe = cls.timeframe_to_file(timeframe) if candle_type != CandleType.SPOT: - datadir = datadir.joinpath('futures') + datadir = datadir.joinpath("futures") candle = f"-{candle_type}" - filename = datadir.joinpath( - f'{pair_s}-{timeframe}{candle}.{cls._get_file_extension()}') + filename = datadir.joinpath(f"{pair_s}-{timeframe}{candle}.{cls._get_file_extension()}") return filename @classmethod @@ -279,14 +288,14 @@ class IDataHandler(ABC): pair_s = misc.pair_to_filename(pair) if trading_mode == TradingMode.FUTURES: # Futures pair ... - datadir = datadir.joinpath('futures') + datadir = datadir.joinpath("futures") - filename = datadir.joinpath(f'{pair_s}-trades.{cls._get_file_extension()}') + filename = datadir.joinpath(f"{pair_s}-trades.{cls._get_file_extension()}") return filename @staticmethod def timeframe_to_file(timeframe: str): - return timeframe.replace('M', 'Mo') + return timeframe.replace("M", "Mo") @staticmethod def rebuild_timeframe_from_filename(timeframe: str) -> str: @@ -294,7 +303,7 @@ class IDataHandler(ABC): converts timeframe from disk to file Replaces mo with M (to avoid problems on case-insensitive filesystems) """ - return re.sub('1mo', '1M', timeframe, flags=re.IGNORECASE) + return re.sub("1mo", "1M", timeframe, flags=re.IGNORECASE) @staticmethod def rebuild_pair_from_filename(pair: str) -> str: @@ -302,18 +311,22 @@ class IDataHandler(ABC): Rebuild pair name from filename Assumes a asset name of max. 7 length to also support BTC-PERP and BTC-PERP:USD names. """ - res = re.sub(r'^(([A-Za-z\d]{1,10})|^([A-Za-z\-]{1,6}))(_)', r'\g<1>/', pair, count=1) - res = re.sub('_', ':', res, count=1) + res = re.sub(r"^(([A-Za-z\d]{1,10})|^([A-Za-z\-]{1,6}))(_)", r"\g<1>/", pair, count=1) + res = re.sub("_", ":", res, count=1) return res - def ohlcv_load(self, pair, timeframe: str, - candle_type: CandleType, *, - timerange: Optional[TimeRange] = None, - fill_missing: bool = True, - drop_incomplete: bool = False, - startup_candles: int = 0, - warn_no_data: bool = True, - ) -> DataFrame: + def ohlcv_load( + self, + pair, + timeframe: str, + candle_type: CandleType, + *, + timerange: Optional[TimeRange] = None, + fill_missing: bool = True, + drop_incomplete: bool = False, + startup_candles: int = 0, + warn_no_data: bool = True, + ) -> DataFrame: """ Load cached candle (OHLCV) data for the given pair. @@ -333,15 +346,12 @@ class IDataHandler(ABC): timerange_startup.subtract_start(timeframe_to_seconds(timeframe) * startup_candles) pairdf = self._ohlcv_load( - pair, - timeframe, - timerange=timerange_startup, - candle_type=candle_type + pair, timeframe, timerange=timerange_startup, candle_type=candle_type ) if self._check_empty_df(pairdf, pair, timeframe, candle_type, warn_no_data): return pairdf else: - enddate = pairdf.iloc[-1]['date'] + enddate = pairdf.iloc[-1]["date"] if timerange_startup: self._validate_pairdata(pair, pairdf, timeframe, candle_type, timerange_startup) @@ -350,17 +360,25 @@ class IDataHandler(ABC): return pairdf # incomplete candles should only be dropped if we didn't trim the end beforehand. - pairdf = clean_ohlcv_dataframe(pairdf, timeframe, - pair=pair, - fill_missing=fill_missing, - drop_incomplete=(drop_incomplete and - enddate == pairdf.iloc[-1]['date'])) + pairdf = clean_ohlcv_dataframe( + pairdf, + timeframe, + pair=pair, + fill_missing=fill_missing, + drop_incomplete=(drop_incomplete and enddate == pairdf.iloc[-1]["date"]), + ) self._check_empty_df(pairdf, pair, timeframe, candle_type, warn_no_data) return pairdf def _check_empty_df( - self, pairdf: DataFrame, pair: str, timeframe: str, candle_type: CandleType, - warn_no_data: bool, warn_price: bool = False) -> bool: + self, + pairdf: DataFrame, + pair: str, + timeframe: str, + candle_type: CandleType, + warn_no_data: bool, + warn_price: bool = False, + ) -> bool: """ Warn on empty dataframe """ @@ -373,39 +391,55 @@ class IDataHandler(ABC): return True elif warn_price: candle_price_gap = 0 - if (candle_type in (CandleType.SPOT, CandleType.FUTURES) and - not pairdf.empty - and 'close' in pairdf.columns and 'open' in pairdf.columns): + if ( + candle_type in (CandleType.SPOT, CandleType.FUTURES) + and not pairdf.empty + and "close" in pairdf.columns + and "open" in pairdf.columns + ): # Detect gaps between prior close and open - gaps = ((pairdf['open'] - pairdf['close'].shift(1)) / pairdf['close'].shift(1)) + gaps = (pairdf["open"] - pairdf["close"].shift(1)) / pairdf["close"].shift(1) gaps = gaps.dropna() if len(gaps): candle_price_gap = max(abs(gaps)) if candle_price_gap > 0.1: - logger.info(f"Price jump in {pair}, {timeframe}, {candle_type} between two candles " - f"of {candle_price_gap:.2%} detected.") + logger.info( + f"Price jump in {pair}, {timeframe}, {candle_type} between two candles " + f"of {candle_price_gap:.2%} detected." + ) return False - def _validate_pairdata(self, pair, pairdata: DataFrame, timeframe: str, - candle_type: CandleType, timerange: TimeRange): + def _validate_pairdata( + self, + pair, + pairdata: DataFrame, + timeframe: str, + candle_type: CandleType, + timerange: TimeRange, + ): """ Validates pairdata for missing data at start end end and logs warnings. :param pairdata: Dataframe to validate :param timerange: Timerange specified for start and end dates """ - if timerange.starttype == 'date': - if pairdata.iloc[0]['date'] > timerange.startdt: - logger.warning(f"{pair}, {candle_type}, {timeframe}, " - f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}") - if timerange.stoptype == 'date': - if pairdata.iloc[-1]['date'] < timerange.stopdt: - logger.warning(f"{pair}, {candle_type}, {timeframe}, " - f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}") + if timerange.starttype == "date": + if pairdata.iloc[0]["date"] > timerange.startdt: + logger.warning( + f"{pair}, {candle_type}, {timeframe}, " + f"data starts at {pairdata.iloc[0]['date']:%Y-%m-%d %H:%M:%S}" + ) + if timerange.stoptype == "date": + if pairdata.iloc[-1]["date"] < timerange.stopdt: + logger.warning( + f"{pair}, {candle_type}, {timeframe}, " + f"data ends at {pairdata.iloc[-1]['date']:%Y-%m-%d %H:%M:%S}" + ) def rename_futures_data( - self, pair: str, new_pair: str, timeframe: str, candle_type: CandleType): + self, pair: str, new_pair: str, timeframe: str, candle_type: CandleType + ): """ Temporary method to migrate data from old naming to new naming (BTC/USDT -> BTC/USDT:USDT) Only used for binance to support the binance futures naming unification. @@ -431,18 +465,19 @@ class IDataHandler(ABC): if funding_rate_combs: logger.warning( - f'Migrating {len(funding_rate_combs)} funding fees to correct timeframe.') + f"Migrating {len(funding_rate_combs)} funding fees to correct timeframe." + ) for pair, timeframe, candletype in funding_rate_combs: old_name = self._pair_data_filename(self._datadir, pair, timeframe, candletype) new_name = self._pair_data_filename(self._datadir, pair, ff_timeframe, candletype) if not Path(old_name).exists(): - logger.warning(f'{old_name} does not exist, skipping.') + logger.warning(f"{old_name} does not exist, skipping.") continue if Path(new_name).exists(): - logger.warning(f'{new_name} already exists, Removing.') + logger.warning(f"{new_name} already exists, Removing.") Path(new_name).unlink() Path(old_name).rename(new_name) @@ -457,27 +492,33 @@ def get_datahandlerclass(datatype: str) -> Type[IDataHandler]: :return: Datahandler class """ - if datatype == 'json': + if datatype == "json": from .jsondatahandler import JsonDataHandler + return JsonDataHandler - elif datatype == 'jsongz': + elif datatype == "jsongz": from .jsondatahandler import JsonGzDataHandler + return JsonGzDataHandler - elif datatype == 'hdf5': + elif datatype == "hdf5": from .hdf5datahandler import HDF5DataHandler + return HDF5DataHandler - elif datatype == 'feather': + elif datatype == "feather": from .featherdatahandler import FeatherDataHandler + return FeatherDataHandler - elif datatype == 'parquet': + elif datatype == "parquet": from .parquetdatahandler import ParquetDataHandler + return ParquetDataHandler else: raise ValueError(f"No datahandler for datatype {datatype} available.") -def get_datahandler(datadir: Path, data_format: Optional[str] = None, - data_handler: Optional[IDataHandler] = None) -> IDataHandler: +def get_datahandler( + datadir: Path, data_format: Optional[str] = None, data_handler: Optional[IDataHandler] = None +) -> IDataHandler: """ :param datadir: Folder to save data :param data_format: dataformat to use @@ -485,6 +526,6 @@ def get_datahandler(datadir: Path, data_format: Optional[str] = None, """ if not data_handler: - HandlerClass = get_datahandlerclass(data_format or 'feather') + HandlerClass = get_datahandlerclass(data_format or "feather") data_handler = HandlerClass(datadir) return data_handler diff --git a/freqtrade/data/history/datahandlers/jsondatahandler.py b/freqtrade/data/history/datahandlers/jsondatahandler.py index 2d0333fed..b97b4b867 100644 --- a/freqtrade/data/history/datahandlers/jsondatahandler.py +++ b/freqtrade/data/history/datahandlers/jsondatahandler.py @@ -17,12 +17,12 @@ logger = logging.getLogger(__name__) class JsonDataHandler(IDataHandler): - _use_zip = False _columns = DEFAULT_DATAFRAME_COLUMNS def ohlcv_store( - self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None: + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType + ) -> None: """ Store data in json format "values". format looks as follows: @@ -37,16 +37,16 @@ class JsonDataHandler(IDataHandler): self.create_dir_if_needed(filename) _data = data.copy() # Convert date to int - _data['date'] = _data['date'].astype(np.int64) // 1000 // 1000 + _data["date"] = _data["date"].astype(np.int64) // 1000 // 1000 # Reset index, select only appropriate columns and save as json _data.reset_index(drop=True).loc[:, self._columns].to_json( - filename, orient="values", - compression='gzip' if self._use_zip else None) + filename, orient="values", compression="gzip" if self._use_zip else None + ) - def _ohlcv_load(self, pair: str, timeframe: str, - timerange: Optional[TimeRange], candle_type: CandleType - ) -> DataFrame: + def _ohlcv_load( + self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType + ) -> DataFrame: """ Internal method used to load data for one pair from disk. Implements the loading and conversion to a Pandas dataframe. @@ -59,31 +59,34 @@ class JsonDataHandler(IDataHandler): :param candle_type: Any of the enum CandleType (must match trading mode!) :return: DataFrame with ohlcv data, or empty DataFrame """ - filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type) + filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type) if not filename.exists(): # Fallback mode for 1M files filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True) + self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True + ) if not filename.exists(): return DataFrame(columns=self._columns) try: - pairdata = read_json(filename, orient='values') + pairdata = read_json(filename, orient="values") pairdata.columns = self._columns except ValueError: logger.error(f"Could not load data for {pair}.") return DataFrame(columns=self._columns) - pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float', - 'low': 'float', 'close': 'float', 'volume': 'float'}) - pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True) + pairdata = pairdata.astype( + dtype={ + "open": "float", + "high": "float", + "low": "float", + "close": "float", + "volume": "float", + } + ) + pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True) return pairdata def ohlcv_append( - self, - pair: str, - timeframe: str, - data: DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType ) -> None: """ Append data to existing data structures @@ -145,5 +148,4 @@ class JsonDataHandler(IDataHandler): class JsonGzDataHandler(JsonDataHandler): - _use_zip = True diff --git a/freqtrade/data/history/datahandlers/parquetdatahandler.py b/freqtrade/data/history/datahandlers/parquetdatahandler.py index 01becdc84..e226d4749 100644 --- a/freqtrade/data/history/datahandlers/parquetdatahandler.py +++ b/freqtrade/data/history/datahandlers/parquetdatahandler.py @@ -14,11 +14,11 @@ logger = logging.getLogger(__name__) class ParquetDataHandler(IDataHandler): - _columns = DEFAULT_DATAFRAME_COLUMNS def ohlcv_store( - self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType) -> None: + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType + ) -> None: """ Store data in json format "values". format looks as follows: @@ -34,9 +34,9 @@ class ParquetDataHandler(IDataHandler): data.reset_index(drop=True).loc[:, self._columns].to_parquet(filename) - def _ohlcv_load(self, pair: str, timeframe: str, - timerange: Optional[TimeRange], candle_type: CandleType - ) -> DataFrame: + def _ohlcv_load( + self, pair: str, timeframe: str, timerange: Optional[TimeRange], candle_type: CandleType + ) -> DataFrame: """ Internal method used to load data for one pair from disk. Implements the loading and conversion to a Pandas dataframe. @@ -49,28 +49,31 @@ class ParquetDataHandler(IDataHandler): :param candle_type: Any of the enum CandleType (must match trading mode!) :return: DataFrame with ohlcv data, or empty DataFrame """ - filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type) + filename = self._pair_data_filename(self._datadir, pair, timeframe, candle_type=candle_type) if not filename.exists(): # Fallback mode for 1M files filename = self._pair_data_filename( - self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True) + self._datadir, pair, timeframe, candle_type=candle_type, no_timeframe_modify=True + ) if not filename.exists(): return DataFrame(columns=self._columns) pairdata = read_parquet(filename) pairdata.columns = self._columns - pairdata = pairdata.astype(dtype={'open': 'float', 'high': 'float', - 'low': 'float', 'close': 'float', 'volume': 'float'}) - pairdata['date'] = to_datetime(pairdata['date'], unit='ms', utc=True) + pairdata = pairdata.astype( + dtype={ + "open": "float", + "high": "float", + "low": "float", + "close": "float", + "volume": "float", + } + ) + pairdata["date"] = to_datetime(pairdata["date"], unit="ms", utc=True) return pairdata def ohlcv_append( - self, - pair: str, - timeframe: str, - data: DataFrame, - candle_type: CandleType + self, pair: str, timeframe: str, data: DataFrame, candle_type: CandleType ) -> None: """ Append data to existing data structures diff --git a/freqtrade/data/history/history_utils.py b/freqtrade/data/history/history_utils.py index e9ff2b2df..bdca599c6 100644 --- a/freqtrade/data/history/history_utils.py +++ b/freqtrade/data/history/history_utils.py @@ -7,11 +7,20 @@ from typing import Dict, List, Optional, Tuple from pandas import DataFrame, concat from freqtrade.configuration import TimeRange -from freqtrade.constants import (DATETIME_PRINT_FORMAT, DEFAULT_DATAFRAME_COLUMNS, - DL_DATA_TIMEFRAMES, DOCS_LINK, Config) -from freqtrade.data.converter import (clean_ohlcv_dataframe, convert_trades_to_ohlcv, - ohlcv_to_dataframe, trades_df_remove_duplicates, - trades_list_to_df) +from freqtrade.constants import ( + DATETIME_PRINT_FORMAT, + DEFAULT_DATAFRAME_COLUMNS, + DL_DATA_TIMEFRAMES, + DOCS_LINK, + Config, +) +from freqtrade.data.converter import ( + clean_ohlcv_dataframe, + convert_trades_to_ohlcv, + ohlcv_to_dataframe, + trades_df_remove_duplicates, + trades_list_to_df, +) from freqtrade.data.history.datahandlers import IDataHandler, get_datahandler from freqtrade.enums import CandleType, TradingMode from freqtrade.exceptions import OperationalException @@ -25,17 +34,19 @@ from freqtrade.util.migrations import migrate_data logger = logging.getLogger(__name__) -def load_pair_history(pair: str, - timeframe: str, - datadir: Path, *, - timerange: Optional[TimeRange] = None, - fill_up_missing: bool = True, - drop_incomplete: bool = False, - startup_candles: int = 0, - data_format: Optional[str] = None, - data_handler: Optional[IDataHandler] = None, - candle_type: CandleType = CandleType.SPOT - ) -> DataFrame: +def load_pair_history( + pair: str, + timeframe: str, + datadir: Path, + *, + timerange: Optional[TimeRange] = None, + fill_up_missing: bool = True, + drop_incomplete: bool = False, + startup_candles: int = 0, + data_format: Optional[str] = None, + data_handler: Optional[IDataHandler] = None, + candle_type: CandleType = CandleType.SPOT, +) -> DataFrame: """ Load cached ohlcv history for the given pair. @@ -54,27 +65,30 @@ def load_pair_history(pair: str, """ data_handler = get_datahandler(datadir, data_format, data_handler) - return data_handler.ohlcv_load(pair=pair, - timeframe=timeframe, - timerange=timerange, - fill_missing=fill_up_missing, - drop_incomplete=drop_incomplete, - startup_candles=startup_candles, - candle_type=candle_type, - ) + return data_handler.ohlcv_load( + pair=pair, + timeframe=timeframe, + timerange=timerange, + fill_missing=fill_up_missing, + drop_incomplete=drop_incomplete, + startup_candles=startup_candles, + candle_type=candle_type, + ) -def load_data(datadir: Path, - timeframe: str, - pairs: List[str], *, - timerange: Optional[TimeRange] = None, - fill_up_missing: bool = True, - startup_candles: int = 0, - fail_without_data: bool = False, - data_format: str = 'feather', - candle_type: CandleType = CandleType.SPOT, - user_futures_funding_rate: Optional[int] = None, - ) -> Dict[str, DataFrame]: +def load_data( + datadir: Path, + timeframe: str, + pairs: List[str], + *, + timerange: Optional[TimeRange] = None, + fill_up_missing: bool = True, + startup_candles: int = 0, + fail_without_data: bool = False, + data_format: str = "feather", + candle_type: CandleType = CandleType.SPOT, + user_futures_funding_rate: Optional[int] = None, +) -> Dict[str, DataFrame]: """ Load ohlcv history data for a list of pairs. @@ -91,18 +105,21 @@ def load_data(datadir: Path, """ result: Dict[str, DataFrame] = {} if startup_candles > 0 and timerange: - logger.info(f'Using indicator startup period: {startup_candles} ...') + logger.info(f"Using indicator startup period: {startup_candles} ...") data_handler = get_datahandler(datadir, data_format) for pair in pairs: - hist = load_pair_history(pair=pair, timeframe=timeframe, - datadir=datadir, timerange=timerange, - fill_up_missing=fill_up_missing, - startup_candles=startup_candles, - data_handler=data_handler, - candle_type=candle_type, - ) + hist = load_pair_history( + pair=pair, + timeframe=timeframe, + datadir=datadir, + timerange=timerange, + fill_up_missing=fill_up_missing, + startup_candles=startup_candles, + data_handler=data_handler, + candle_type=candle_type, + ) if not hist.empty: result[pair] = hist else: @@ -116,14 +133,16 @@ def load_data(datadir: Path, return result -def refresh_data(*, datadir: Path, - timeframe: str, - pairs: List[str], - exchange: Exchange, - data_format: Optional[str] = None, - timerange: Optional[TimeRange] = None, - candle_type: CandleType, - ) -> None: +def refresh_data( + *, + datadir: Path, + timeframe: str, + pairs: List[str], + exchange: Exchange, + data_format: Optional[str] = None, + timerange: Optional[TimeRange] = None, + candle_type: CandleType, +) -> None: """ Refresh ohlcv history data for a list of pairs. @@ -137,11 +156,17 @@ def refresh_data(*, datadir: Path, """ data_handler = get_datahandler(datadir, data_format) for idx, pair in enumerate(pairs): - process = f'{idx}/{len(pairs)}' - _download_pair_history(pair=pair, process=process, - timeframe=timeframe, datadir=datadir, - timerange=timerange, exchange=exchange, data_handler=data_handler, - candle_type=candle_type) + process = f"{idx}/{len(pairs)}" + _download_pair_history( + pair=pair, + process=process, + timeframe=timeframe, + datadir=datadir, + timerange=timerange, + exchange=exchange, + data_handler=data_handler, + candle_type=candle_type, + ) def _load_cached_data_for_updating( @@ -163,42 +188,49 @@ def _load_cached_data_for_updating( start = None end = None if timerange: - if timerange.starttype == 'date': + if timerange.starttype == "date": start = timerange.startdt - if timerange.stoptype == 'date': + if timerange.stoptype == "date": end = timerange.stopdt # Intentionally don't pass timerange in - since we need to load the full dataset. - data = data_handler.ohlcv_load(pair, timeframe=timeframe, - timerange=None, fill_missing=False, - drop_incomplete=True, warn_no_data=False, - candle_type=candle_type) + data = data_handler.ohlcv_load( + pair, + timeframe=timeframe, + timerange=None, + fill_missing=False, + drop_incomplete=True, + warn_no_data=False, + candle_type=candle_type, + ) if not data.empty: - if not prepend and start and start < data.iloc[0]['date']: + if not prepend and start and start < data.iloc[0]["date"]: # Earlier data than existing data requested, redownload all data = DataFrame(columns=DEFAULT_DATAFRAME_COLUMNS) else: if prepend: - end = data.iloc[0]['date'] + end = data.iloc[0]["date"] else: - start = data.iloc[-1]['date'] + start = data.iloc[-1]["date"] start_ms = int(start.timestamp() * 1000) if start else None end_ms = int(end.timestamp() * 1000) if end else None return data, start_ms, end_ms -def _download_pair_history(pair: str, *, - datadir: Path, - exchange: Exchange, - timeframe: str = '5m', - process: str = '', - new_pairs_days: int = 30, - data_handler: Optional[IDataHandler] = None, - timerange: Optional[TimeRange] = None, - candle_type: CandleType, - erase: bool = False, - prepend: bool = False, - ) -> bool: +def _download_pair_history( + pair: str, + *, + datadir: Path, + exchange: Exchange, + timeframe: str = "5m", + process: str = "", + new_pairs_days: int = 30, + data_handler: Optional[IDataHandler] = None, + timerange: Optional[TimeRange] = None, + candle_type: CandleType, + erase: bool = False, + prepend: bool = False, +) -> bool: """ Download latest candles from the exchange for the pair and timeframe passed in parameters The data is downloaded starting from the last correct data that @@ -217,54 +249,71 @@ def _download_pair_history(pair: str, *, try: if erase: if data_handler.ohlcv_purge(pair, timeframe, candle_type=candle_type): - logger.info(f'Deleting existing data for pair {pair}, {timeframe}, {candle_type}.') + logger.info(f"Deleting existing data for pair {pair}, {timeframe}, {candle_type}.") data, since_ms, until_ms = _load_cached_data_for_updating( - pair, timeframe, timerange, + pair, + timeframe, + timerange, data_handler=data_handler, candle_type=candle_type, - prepend=prepend) + prepend=prepend, + ) - logger.info(f'({process}) - Download history data for "{pair}", {timeframe}, ' - f'{candle_type} and store in {datadir}. ' - f'From {format_ms_time(since_ms) if since_ms else "start"} to ' - f'{format_ms_time(until_ms) if until_ms else "now"}' - ) + logger.info( + f'({process}) - Download history data for "{pair}", {timeframe}, ' + f"{candle_type} and store in {datadir}. " + f'From {format_ms_time(since_ms) if since_ms else "start"} to ' + f'{format_ms_time(until_ms) if until_ms else "now"}' + ) - logger.debug("Current Start: %s", - f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" - if not data.empty else 'None') - logger.debug("Current End: %s", - f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" - if not data.empty else 'None') + logger.debug( + "Current Start: %s", + f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None", + ) + logger.debug( + "Current End: %s", + f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None", + ) # Default since_ms to 30 days if nothing is given - new_data = exchange.get_historic_ohlcv(pair=pair, - timeframe=timeframe, - since_ms=since_ms if since_ms else - int((datetime.now() - timedelta(days=new_pairs_days) - ).timestamp()) * 1000, - is_new_pair=data.empty, - candle_type=candle_type, - until_ms=until_ms if until_ms else None - ) + new_data = exchange.get_historic_ohlcv( + pair=pair, + timeframe=timeframe, + since_ms=( + since_ms + if since_ms + else int((datetime.now() - timedelta(days=new_pairs_days)).timestamp()) * 1000 + ), + is_new_pair=data.empty, + candle_type=candle_type, + until_ms=until_ms if until_ms else None, + ) # TODO: Maybe move parsing to exchange class (?) - new_dataframe = ohlcv_to_dataframe(new_data, timeframe, pair, - fill_missing=False, drop_incomplete=True) + new_dataframe = ohlcv_to_dataframe( + new_data, timeframe, pair, fill_missing=False, drop_incomplete=True + ) if data.empty: data = new_dataframe else: # Run cleaning again to ensure there were no duplicate candles # Especially between existing and new data. - data = clean_ohlcv_dataframe(concat([data, new_dataframe], axis=0), timeframe, pair, - fill_missing=False, drop_incomplete=False) + data = clean_ohlcv_dataframe( + concat([data, new_dataframe], axis=0), + timeframe, + pair, + fill_missing=False, + drop_incomplete=False, + ) - logger.debug("New Start: %s", - f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" - if not data.empty else 'None') - logger.debug("New End: %s", - f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" - if not data.empty else 'None') + logger.debug( + "New Start: %s", + f"{data.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None", + ) + logger.debug( + "New End: %s", + f"{data.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}" if not data.empty else "None", + ) data_handler.ohlcv_store(pair, timeframe, data=data, candle_type=candle_type) return True @@ -276,13 +325,18 @@ def _download_pair_history(pair: str, *, return False -def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes: List[str], - datadir: Path, trading_mode: str, - timerange: Optional[TimeRange] = None, - new_pairs_days: int = 30, erase: bool = False, - data_format: Optional[str] = None, - prepend: bool = False, - ) -> List[str]: +def refresh_backtest_ohlcv_data( + exchange: Exchange, + pairs: List[str], + timeframes: List[str], + datadir: Path, + trading_mode: str, + timerange: Optional[TimeRange] = None, + new_pairs_days: int = 30, + erase: bool = False, + data_format: Optional[str] = None, + prepend: bool = False, +) -> List[str]: """ Refresh stored ohlcv data for backtesting and hyperopt operations. Used by freqtrade download-data subcommand. @@ -291,63 +345,77 @@ def refresh_backtest_ohlcv_data(exchange: Exchange, pairs: List[str], timeframes pairs_not_available = [] data_handler = get_datahandler(datadir, data_format) candle_type = CandleType.get_default(trading_mode) - process = '' + process = "" for idx, pair in enumerate(pairs, start=1): if pair not in exchange.markets: pairs_not_available.append(pair) logger.info(f"Skipping pair {pair}...") continue for timeframe in timeframes: - - logger.debug(f'Downloading pair {pair}, {candle_type}, interval {timeframe}.') - process = f'{idx}/{len(pairs)}' - _download_pair_history(pair=pair, process=process, - datadir=datadir, exchange=exchange, - timerange=timerange, data_handler=data_handler, - timeframe=str(timeframe), new_pairs_days=new_pairs_days, - candle_type=candle_type, - erase=erase, prepend=prepend) - if trading_mode == 'futures': + logger.debug(f"Downloading pair {pair}, {candle_type}, interval {timeframe}.") + process = f"{idx}/{len(pairs)}" + _download_pair_history( + pair=pair, + process=process, + datadir=datadir, + exchange=exchange, + timerange=timerange, + data_handler=data_handler, + timeframe=str(timeframe), + new_pairs_days=new_pairs_days, + candle_type=candle_type, + erase=erase, + prepend=prepend, + ) + if trading_mode == "futures": # Predefined candletype (and timeframe) depending on exchange # Downloads what is necessary to backtest based on futures data. - tf_mark = exchange.get_option('mark_ohlcv_timeframe') - tf_funding_rate = exchange.get_option('funding_fee_timeframe') + tf_mark = exchange.get_option("mark_ohlcv_timeframe") + tf_funding_rate = exchange.get_option("funding_fee_timeframe") - fr_candle_type = CandleType.from_string(exchange.get_option('mark_ohlcv_price')) + fr_candle_type = CandleType.from_string(exchange.get_option("mark_ohlcv_price")) # All exchanges need FundingRate for futures trading. # The timeframe is aligned to the mark-price timeframe. combs = ((CandleType.FUNDING_RATE, tf_funding_rate), (fr_candle_type, tf_mark)) for candle_type_f, tf in combs: - logger.debug(f'Downloading pair {pair}, {candle_type_f}, interval {tf}.') - _download_pair_history(pair=pair, process=process, - datadir=datadir, exchange=exchange, - timerange=timerange, data_handler=data_handler, - timeframe=str(tf), new_pairs_days=new_pairs_days, - candle_type=candle_type_f, - erase=erase, prepend=prepend) + logger.debug(f"Downloading pair {pair}, {candle_type_f}, interval {tf}.") + _download_pair_history( + pair=pair, + process=process, + datadir=datadir, + exchange=exchange, + timerange=timerange, + data_handler=data_handler, + timeframe=str(tf), + new_pairs_days=new_pairs_days, + candle_type=candle_type_f, + erase=erase, + prepend=prepend, + ) return pairs_not_available -def _download_trades_history(exchange: Exchange, - pair: str, *, - new_pairs_days: int = 30, - timerange: Optional[TimeRange] = None, - data_handler: IDataHandler, - trading_mode: TradingMode, - ) -> bool: +def _download_trades_history( + exchange: Exchange, + pair: str, + *, + new_pairs_days: int = 30, + timerange: Optional[TimeRange] = None, + data_handler: IDataHandler, + trading_mode: TradingMode, +) -> bool: """ Download trade history from the exchange. Appends to previously downloaded trades data. """ try: - until = None since = 0 if timerange: - if timerange.starttype == 'date': + if timerange.starttype == "date": since = timerange.startts * 1000 - if timerange.stoptype == 'date': + if timerange.stoptype == "date": until = timerange.stopts * 1000 trades = data_handler.trades_load(pair, trading_mode) @@ -356,60 +424,76 @@ def _download_trades_history(exchange: Exchange, # DEFAULT_TRADES_COLUMNS: 0 -> timestamp # DEFAULT_TRADES_COLUMNS: 1 -> id - if not trades.empty and since > 0 and since < trades.iloc[0]['timestamp']: + if not trades.empty and since > 0 and since < trades.iloc[0]["timestamp"]: # since is before the first trade - logger.info(f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than " - f"available data. Redownloading trades for {pair}...") + logger.info( + f"Start ({trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}) earlier than " + f"available data. Redownloading trades for {pair}..." + ) trades = trades_list_to_df([]) - from_id = trades.iloc[-1]['id'] if not trades.empty else None - if not trades.empty and since < trades.iloc[-1]['timestamp']: + from_id = trades.iloc[-1]["id"] if not trades.empty else None + if not trades.empty and since < trades.iloc[-1]["timestamp"]: # Reset since to the last available point # - 5 seconds (to ensure we're getting all trades) - since = trades.iloc[-1]['timestamp'] - (5 * 1000) - logger.info(f"Using last trade date -5s - Downloading trades for {pair} " - f"since: {format_ms_time(since)}.") + since = trades.iloc[-1]["timestamp"] - (5 * 1000) + logger.info( + f"Using last trade date -5s - Downloading trades for {pair} " + f"since: {format_ms_time(since)}." + ) if not since: since = dt_ts(dt_now() - timedelta(days=new_pairs_days)) - logger.debug("Current Start: %s", 'None' if trades.empty else - f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}") - logger.debug("Current End: %s", 'None' if trades.empty else - f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}") + logger.debug( + "Current Start: %s", + "None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}", + ) + logger.debug( + "Current End: %s", + "None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}", + ) logger.info(f"Current Amount of trades: {len(trades)}") # Default since_ms to 30 days if nothing is given - new_trades = exchange.get_historic_trades(pair=pair, - since=since, - until=until, - from_id=from_id, - ) + new_trades = exchange.get_historic_trades( + pair=pair, + since=since, + until=until, + from_id=from_id, + ) new_trades_df = trades_list_to_df(new_trades[1]) trades = concat([trades, new_trades_df], axis=0) # Remove duplicates to make sure we're not storing data we don't need trades = trades_df_remove_duplicates(trades) data_handler.trades_store(pair, trades, trading_mode) - logger.debug("New Start: %s", 'None' if trades.empty else - f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}") - logger.debug("New End: %s", 'None' if trades.empty else - f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}") + logger.debug( + "New Start: %s", + "None" if trades.empty else f"{trades.iloc[0]['date']:{DATETIME_PRINT_FORMAT}}", + ) + logger.debug( + "New End: %s", + "None" if trades.empty else f"{trades.iloc[-1]['date']:{DATETIME_PRINT_FORMAT}}", + ) logger.info(f"New Amount of trades: {len(trades)}") return True except Exception: - logger.exception( - f'Failed to download historic trades for pair: "{pair}". ' - ) + logger.exception(f'Failed to download historic trades for pair: "{pair}". ') return False -def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: Path, - timerange: TimeRange, trading_mode: TradingMode, - new_pairs_days: int = 30, - erase: bool = False, data_format: str = 'feather', - ) -> List[str]: +def refresh_backtest_trades_data( + exchange: Exchange, + pairs: List[str], + datadir: Path, + timerange: TimeRange, + trading_mode: TradingMode, + new_pairs_days: int = 30, + erase: bool = False, + data_format: str = "feather", +) -> List[str]: """ Refresh stored trades data for backtesting and hyperopt operations. Used by freqtrade download-data subcommand. @@ -425,15 +509,17 @@ def refresh_backtest_trades_data(exchange: Exchange, pairs: List[str], datadir: if erase: if data_handler.trades_purge(pair, trading_mode): - logger.info(f'Deleting existing data for pair {pair}.') + logger.info(f"Deleting existing data for pair {pair}.") - logger.info(f'Downloading trades for pair {pair}.') - _download_trades_history(exchange=exchange, - pair=pair, - new_pairs_days=new_pairs_days, - timerange=timerange, - data_handler=data_handler, - trading_mode=trading_mode) + logger.info(f"Downloading trades for pair {pair}.") + _download_trades_history( + exchange=exchange, + pair=pair, + new_pairs_days=new_pairs_days, + timerange=timerange, + data_handler=data_handler, + trading_mode=trading_mode, + ) return pairs_not_available @@ -445,15 +531,18 @@ def get_timerange(data: Dict[str, DataFrame]) -> Tuple[datetime, datetime]: :return: tuple containing min_date, max_date """ timeranges = [ - (frame['date'].min().to_pydatetime(), frame['date'].max().to_pydatetime()) + (frame["date"].min().to_pydatetime(), frame["date"].max().to_pydatetime()) for frame in data.values() ] - return (min(timeranges, key=operator.itemgetter(0))[0], - max(timeranges, key=operator.itemgetter(1))[1]) + return ( + min(timeranges, key=operator.itemgetter(0))[0], + max(timeranges, key=operator.itemgetter(1))[1], + ) -def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime, - max_date: datetime, timeframe_min: int) -> bool: +def validate_backtest_data( + data: DataFrame, pair: str, min_date: datetime, max_date: datetime, timeframe_min: int +) -> bool: """ Validates preprocessed backtesting data for missing values and shows warnings about it that. @@ -469,89 +558,114 @@ def validate_backtest_data(data: DataFrame, pair: str, min_date: datetime, dflen = len(data) if dflen < expected_frames: found_missing = True - logger.warning("%s has missing frames: expected %s, got %s, that's %s missing values", - pair, expected_frames, dflen, expected_frames - dflen) + logger.warning( + "%s has missing frames: expected %s, got %s, that's %s missing values", + pair, + expected_frames, + dflen, + expected_frames - dflen, + ) return found_missing def download_data_main(config: Config) -> None: - timerange = TimeRange() - if 'days' in config: - time_since = (datetime.now() - timedelta(days=config['days'])).strftime("%Y%m%d") - timerange = TimeRange.parse_timerange(f'{time_since}-') + if "days" in config: + time_since = (datetime.now() - timedelta(days=config["days"])).strftime("%Y%m%d") + timerange = TimeRange.parse_timerange(f"{time_since}-") - if 'timerange' in config: - timerange = timerange.parse_timerange(config['timerange']) + if "timerange" in config: + timerange = timerange.parse_timerange(config["timerange"]) # Remove stake-currency to skip checks which are not relevant for datadownload - config['stake_currency'] = '' + config["stake_currency"] = "" pairs_not_available: List[str] = [] # Init exchange from freqtrade.resolvers.exchange_resolver import ExchangeResolver + exchange = ExchangeResolver.load_exchange(config, validate=False) available_pairs = [ - p for p in exchange.get_markets( - tradable_only=True, active_only=not config.get('include_inactive') - ).keys() + p + for p in exchange.get_markets( + tradable_only=True, active_only=not config.get("include_inactive") + ).keys() ] expanded_pairs = dynamic_expand_pairlist(config, available_pairs) - if 'timeframes' not in config: - config['timeframes'] = DL_DATA_TIMEFRAMES + if "timeframes" not in config: + config["timeframes"] = DL_DATA_TIMEFRAMES # Manual validations of relevant settings - if not config['exchange'].get('skip_pair_validation', False): + if not config["exchange"].get("skip_pair_validation", False): exchange.validate_pairs(expanded_pairs) - logger.info(f"About to download pairs: {expanded_pairs}, " - f"intervals: {config['timeframes']} to {config['datadir']}") + logger.info( + f"About to download pairs: {expanded_pairs}, " + f"intervals: {config['timeframes']} to {config['datadir']}" + ) if len(expanded_pairs) == 0: logger.warning( "No pairs available for download. " "Please make sure you're using the correct Pair naming for your selected trade mode. \n" - f"More info: {DOCS_LINK}/bot-basics/#pair-naming") + f"More info: {DOCS_LINK}/bot-basics/#pair-naming" + ) - for timeframe in config['timeframes']: + for timeframe in config["timeframes"]: exchange.validate_timeframes(timeframe) # Start downloading try: - if config.get('download_trades'): + if config.get("download_trades"): pairs_not_available = refresh_backtest_trades_data( - exchange, pairs=expanded_pairs, datadir=config['datadir'], - timerange=timerange, new_pairs_days=config['new_pairs_days'], - erase=bool(config.get('erase')), data_format=config['dataformat_trades'], - trading_mode=config.get('trading_mode', TradingMode.SPOT), - ) - - # Convert downloaded trade data to different timeframes - convert_trades_to_ohlcv( - pairs=expanded_pairs, timeframes=config['timeframes'], - datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')), - data_format_ohlcv=config['dataformat_ohlcv'], - data_format_trades=config['dataformat_trades'], - candle_type=config.get('candle_type_def', CandleType.SPOT), + exchange, + pairs=expanded_pairs, + datadir=config["datadir"], + timerange=timerange, + new_pairs_days=config["new_pairs_days"], + erase=bool(config.get("erase")), + data_format=config["dataformat_trades"], + trading_mode=config.get("trading_mode", TradingMode.SPOT), ) + + if config.get("convert_trades") or not exchange.get_option("ohlcv_has_history", True): + # Convert downloaded trade data to different timeframes + # Only auto-convert for exchanges without historic klines + + convert_trades_to_ohlcv( + pairs=expanded_pairs, + timeframes=config["timeframes"], + datadir=config["datadir"], + timerange=timerange, + erase=bool(config.get("erase")), + data_format_ohlcv=config["dataformat_ohlcv"], + data_format_trades=config["dataformat_trades"], + candle_type=config.get("candle_type_def", CandleType.SPOT), + ) else: - if not exchange.get_option('ohlcv_has_history', True): + if not exchange.get_option("ohlcv_has_history", True): raise OperationalException( f"Historic klines not available for {exchange.name}. " "Please use `--dl-trades` instead for this exchange " "(will unfortunately take a long time)." - ) + ) migrate_data(config, exchange) pairs_not_available = refresh_backtest_ohlcv_data( - exchange, pairs=expanded_pairs, timeframes=config['timeframes'], - datadir=config['datadir'], timerange=timerange, - new_pairs_days=config['new_pairs_days'], - erase=bool(config.get('erase')), data_format=config['dataformat_ohlcv'], - trading_mode=config.get('trading_mode', 'spot'), - prepend=config.get('prepend_data', False) + exchange, + pairs=expanded_pairs, + timeframes=config["timeframes"], + datadir=config["datadir"], + timerange=timerange, + new_pairs_days=config["new_pairs_days"], + erase=bool(config.get("erase")), + data_format=config["dataformat_ohlcv"], + trading_mode=config.get("trading_mode", "spot"), + prepend=config.get("prepend_data", False), ) finally: if pairs_not_available: - logger.info(f"Pairs [{','.join(pairs_not_available)}] not available " - f"on exchange {exchange.name}.") + logger.info( + f"Pairs [{','.join(pairs_not_available)}] not available " + f"on exchange {exchange.name}." + ) diff --git a/freqtrade/data/metrics.py b/freqtrade/data/metrics.py index 43a33fa0d..2e4673fb5 100644 --- a/freqtrade/data/metrics.py +++ b/freqtrade/data/metrics.py @@ -1,5 +1,6 @@ import logging import math +from dataclasses import dataclass from datetime import datetime from typing import Dict, Tuple @@ -31,7 +32,8 @@ def calculate_market_change(data: Dict[str, pd.DataFrame], column: str = "close" def combine_dataframes_by_column( - data: Dict[str, pd.DataFrame], column: str = "close") -> pd.DataFrame: + data: Dict[str, pd.DataFrame], column: str = "close" +) -> pd.DataFrame: """ Combine multiple dataframes "column" :param data: Dict of Dataframes, dict key should be pair. @@ -41,14 +43,15 @@ def combine_dataframes_by_column( """ if not data: raise ValueError("No data provided.") - df_comb = pd.concat([data[pair].set_index('date').rename( - {column: pair}, axis=1)[pair] for pair in data], axis=1) + df_comb = pd.concat( + [data[pair].set_index("date").rename({column: pair}, axis=1)[pair] for pair in data], axis=1 + ) return df_comb def combined_dataframes_with_rel_mean( - data: Dict[str, pd.DataFrame], fromdt: datetime, todt: datetime, - column: str = "close") -> pd.DataFrame: + data: Dict[str, pd.DataFrame], fromdt: datetime, todt: datetime, column: str = "close" +) -> pd.DataFrame: """ Combine multiple dataframes "column" :param data: Dict of Dataframes, dict key should be pair. @@ -60,14 +63,15 @@ def combined_dataframes_with_rel_mean( df_comb = combine_dataframes_by_column(data, column) # Trim dataframes to the given timeframe df_comb = df_comb.iloc[(df_comb.index >= fromdt) & (df_comb.index < todt)] - df_comb['count'] = df_comb.count(axis=1) - df_comb['mean'] = df_comb.mean(axis=1) - df_comb['rel_mean'] = df_comb['mean'].pct_change().fillna(0).cumsum() - return df_comb[['mean', 'rel_mean', 'count']] + df_comb["count"] = df_comb.count(axis=1) + df_comb["mean"] = df_comb.mean(axis=1) + df_comb["rel_mean"] = df_comb["mean"].pct_change().fillna(0).cumsum() + return df_comb[["mean", "rel_mean", "count"]] def combine_dataframes_with_mean( - data: Dict[str, pd.DataFrame], column: str = "close") -> pd.DataFrame: + data: Dict[str, pd.DataFrame], column: str = "close" +) -> pd.DataFrame: """ Combine multiple dataframes "column" :param data: Dict of Dataframes, dict key should be pair. @@ -78,13 +82,14 @@ def combine_dataframes_with_mean( """ df_comb = combine_dataframes_by_column(data, column) - df_comb['mean'] = df_comb.mean(axis=1) + df_comb["mean"] = df_comb.mean(axis=1) return df_comb -def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str, - timeframe: str) -> pd.DataFrame: +def create_cum_profit( + df: pd.DataFrame, trades: pd.DataFrame, col_name: str, timeframe: str +) -> pd.DataFrame: """ Adds a column `col_name` with the cumulative profit for the given trades array. :param df: DataFrame with date index @@ -97,11 +102,11 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str, if len(trades) == 0: raise ValueError("Trade dataframe empty.") from freqtrade.exchange import timeframe_to_resample_freq + timeframe_freq = timeframe_to_resample_freq(timeframe) # Resample to timeframe to make sure trades match candles - _trades_sum = trades.resample(timeframe_freq, on='close_date' - )[['profit_abs']].sum() - df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum() + _trades_sum = trades.resample(timeframe_freq, on="close_date")[["profit_abs"]].sum() + df.loc[:, col_name] = _trades_sum["profit_abs"].cumsum() # Set first value to 0 df.loc[df.iloc[0].name, col_name] = 0 # FFill to get continuous @@ -109,29 +114,34 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str, return df -def _calc_drawdown_series(profit_results: pd.DataFrame, *, date_col: str, value_col: str, - starting_balance: float) -> pd.DataFrame: +def _calc_drawdown_series( + profit_results: pd.DataFrame, *, date_col: str, value_col: str, starting_balance: float +) -> pd.DataFrame: max_drawdown_df = pd.DataFrame() - max_drawdown_df['cumulative'] = profit_results[value_col].cumsum() - max_drawdown_df['high_value'] = max_drawdown_df['cumulative'].cummax() - max_drawdown_df['drawdown'] = max_drawdown_df['cumulative'] - max_drawdown_df['high_value'] - max_drawdown_df['date'] = profit_results.loc[:, date_col] + max_drawdown_df["cumulative"] = profit_results[value_col].cumsum() + max_drawdown_df["high_value"] = max_drawdown_df["cumulative"].cummax() + max_drawdown_df["drawdown"] = max_drawdown_df["cumulative"] - max_drawdown_df["high_value"] + max_drawdown_df["date"] = profit_results.loc[:, date_col] if starting_balance: - cumulative_balance = starting_balance + max_drawdown_df['cumulative'] - max_balance = starting_balance + max_drawdown_df['high_value'] - max_drawdown_df['drawdown_relative'] = ((max_balance - cumulative_balance) / max_balance) + cumulative_balance = starting_balance + max_drawdown_df["cumulative"] + max_balance = starting_balance + max_drawdown_df["high_value"] + max_drawdown_df["drawdown_relative"] = (max_balance - cumulative_balance) / max_balance else: # NOTE: This is not completely accurate, # but might good enough if starting_balance is not available - max_drawdown_df['drawdown_relative'] = ( - (max_drawdown_df['high_value'] - max_drawdown_df['cumulative']) - / max_drawdown_df['high_value']) + max_drawdown_df["drawdown_relative"] = ( + max_drawdown_df["high_value"] - max_drawdown_df["cumulative"] + ) / max_drawdown_df["high_value"] return max_drawdown_df -def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date', - value_col: str = 'profit_ratio', starting_balance: float = 0.0 - ): +def calculate_underwater( + trades: pd.DataFrame, + *, + date_col: str = "close_date", + value_col: str = "profit_ratio", + starting_balance: float = 0.0, +): """ Calculate max drawdown and the corresponding close dates :param trades: DataFrame containing trades (requires columns close_date and profit_ratio) @@ -145,25 +155,37 @@ def calculate_underwater(trades: pd.DataFrame, *, date_col: str = 'close_date', raise ValueError("Trade dataframe empty.") profit_results = trades.sort_values(date_col).reset_index(drop=True) max_drawdown_df = _calc_drawdown_series( - profit_results, - date_col=date_col, - value_col=value_col, - starting_balance=starting_balance) + profit_results, date_col=date_col, value_col=value_col, starting_balance=starting_balance + ) return max_drawdown_df -def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date', - value_col: str = 'profit_abs', starting_balance: float = 0, - relative: bool = False - ) -> Tuple[float, pd.Timestamp, pd.Timestamp, float, float, float]: +@dataclass() +class DrawDownResult: + drawdown_abs: float = 0.0 + high_date: pd.Timestamp = None + low_date: pd.Timestamp = None + high_value: float = 0.0 + low_value: float = 0.0 + relative_account_drawdown: float = 0.0 + + +def calculate_max_drawdown( + trades: pd.DataFrame, + *, + date_col: str = "close_date", + value_col: str = "profit_abs", + starting_balance: float = 0, + relative: bool = False, +) -> DrawDownResult: """ Calculate max drawdown and the corresponding close dates :param trades: DataFrame containing trades (requires columns close_date and profit_ratio) :param date_col: Column in DataFrame to use for dates (defaults to 'close_date') :param value_col: Column in DataFrame to use for values (defaults to 'profit_abs') :param starting_balance: Portfolio starting balance - properly calculate relative drawdown. - :return: Tuple (float, highdate, lowdate, highvalue, lowvalue, relative_drawdown) + :return: DrawDownResult object with absolute max drawdown, high and low time and high and low value, and the relative account drawdown :raise: ValueError if trade-dataframe was found empty. @@ -172,32 +194,31 @@ def calculate_max_drawdown(trades: pd.DataFrame, *, date_col: str = 'close_date' raise ValueError("Trade dataframe empty.") profit_results = trades.sort_values(date_col).reset_index(drop=True) max_drawdown_df = _calc_drawdown_series( - profit_results, - date_col=date_col, - value_col=value_col, - starting_balance=starting_balance + profit_results, date_col=date_col, value_col=value_col, starting_balance=starting_balance ) idxmin = ( - max_drawdown_df['drawdown_relative'].idxmax() - if relative else max_drawdown_df['drawdown'].idxmin() + max_drawdown_df["drawdown_relative"].idxmax() + if relative + else max_drawdown_df["drawdown"].idxmin() ) if idxmin == 0: raise ValueError("No losing trade, therefore no drawdown.") - high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]['high_value'].idxmax(), date_col] + high_date = profit_results.loc[max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), date_col] low_date = profit_results.loc[idxmin, date_col] - high_val = max_drawdown_df.loc[max_drawdown_df.iloc[:idxmin] - ['high_value'].idxmax(), 'cumulative'] - low_val = max_drawdown_df.loc[idxmin, 'cumulative'] - max_drawdown_rel = max_drawdown_df.loc[idxmin, 'drawdown_relative'] + high_val = max_drawdown_df.loc[ + max_drawdown_df.iloc[:idxmin]["high_value"].idxmax(), "cumulative" + ] + low_val = max_drawdown_df.loc[idxmin, "cumulative"] + max_drawdown_rel = max_drawdown_df.loc[idxmin, "drawdown_relative"] - return ( - abs(max_drawdown_df.loc[idxmin, 'drawdown']), - high_date, - low_date, - high_val, - low_val, - max_drawdown_rel + return DrawDownResult( + drawdown_abs=abs(max_drawdown_df.loc[idxmin, "drawdown"]), + high_date=high_date, + low_date=low_date, + high_value=high_val, + low_value=low_val, + relative_account_drawdown=max_drawdown_rel, ) @@ -213,9 +234,9 @@ def calculate_csum(trades: pd.DataFrame, starting_balance: float = 0) -> Tuple[f raise ValueError("Trade dataframe empty.") csum_df = pd.DataFrame() - csum_df['sum'] = trades['profit_abs'].cumsum() - csum_min = csum_df['sum'].min() + starting_balance - csum_max = csum_df['sum'].max() + starting_balance + csum_df["sum"] = trades["profit_abs"].cumsum() + csum_min = csum_df["sum"].min() + starting_balance + csum_max = csum_df["sum"].max() + starting_balance return csum_min, csum_max @@ -245,28 +266,29 @@ def calculate_expectancy(trades: pd.DataFrame) -> Tuple[float, float]: expectancy_ratio = 100 if len(trades) > 0: - winning_trades = trades.loc[trades['profit_abs'] > 0] - losing_trades = trades.loc[trades['profit_abs'] < 0] - profit_sum = winning_trades['profit_abs'].sum() - loss_sum = abs(losing_trades['profit_abs'].sum()) + winning_trades = trades.loc[trades["profit_abs"] > 0] + losing_trades = trades.loc[trades["profit_abs"] < 0] + profit_sum = winning_trades["profit_abs"].sum() + loss_sum = abs(losing_trades["profit_abs"].sum()) nb_win_trades = len(winning_trades) nb_loss_trades = len(losing_trades) average_win = (profit_sum / nb_win_trades) if nb_win_trades > 0 else 0 average_loss = (loss_sum / nb_loss_trades) if nb_loss_trades > 0 else 0 - winrate = (nb_win_trades / len(trades)) - loserate = (nb_loss_trades / len(trades)) + winrate = nb_win_trades / len(trades) + loserate = nb_loss_trades / len(trades) expectancy = (winrate * average_win) - (loserate * average_loss) - if (average_loss > 0): + if average_loss > 0: risk_reward_ratio = average_win / average_loss expectancy_ratio = ((1 + risk_reward_ratio) * winrate) - 1 return expectancy, expectancy_ratio -def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: datetime, - starting_balance: float) -> float: +def calculate_sortino( + trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float +) -> float: """ Calculate sortino :param trades: DataFrame containing trades (requires columns profit_abs) @@ -275,12 +297,12 @@ def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: dateti if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date): return 0 - total_profit = trades['profit_abs'] / starting_balance + total_profit = trades["profit_abs"] / starting_balance days_period = max(1, (max_date - min_date).days) expected_returns_mean = total_profit.sum() / days_period - down_stdev = np.std(trades.loc[trades['profit_abs'] < 0, 'profit_abs'] / starting_balance) + down_stdev = np.std(trades.loc[trades["profit_abs"] < 0, "profit_abs"] / starting_balance) if down_stdev != 0 and not np.isnan(down_stdev): sortino_ratio = expected_returns_mean / down_stdev * np.sqrt(365) @@ -292,8 +314,9 @@ def calculate_sortino(trades: pd.DataFrame, min_date: datetime, max_date: dateti return sortino_ratio -def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetime, - starting_balance: float) -> float: +def calculate_sharpe( + trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float +) -> float: """ Calculate sharpe :param trades: DataFrame containing trades (requires column profit_abs) @@ -302,7 +325,7 @@ def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetim if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date): return 0 - total_profit = trades['profit_abs'] / starting_balance + total_profit = trades["profit_abs"] / starting_balance days_period = max(1, (max_date - min_date).days) expected_returns_mean = total_profit.sum() / days_period @@ -318,8 +341,9 @@ def calculate_sharpe(trades: pd.DataFrame, min_date: datetime, max_date: datetim return sharp_ratio -def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetime, - starting_balance: float) -> float: +def calculate_calmar( + trades: pd.DataFrame, min_date: datetime, max_date: datetime, starting_balance: float +) -> float: """ Calculate calmar :param trades: DataFrame containing trades (requires columns close_date and profit_abs) @@ -328,7 +352,7 @@ def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetim if (len(trades) == 0) or (min_date is None) or (max_date is None) or (min_date == max_date): return 0 - total_profit = trades['profit_abs'].sum() / starting_balance + total_profit = trades["profit_abs"].sum() / starting_balance days_period = max(1, (max_date - min_date).days) # adding slippage of 0.1% per trade @@ -337,9 +361,10 @@ def calculate_calmar(trades: pd.DataFrame, min_date: datetime, max_date: datetim # calculate max drawdown try: - _, _, _, _, _, max_drawdown = calculate_max_drawdown( + drawdown = calculate_max_drawdown( trades, value_col="profit_abs", starting_balance=starting_balance ) + max_drawdown = drawdown.relative_account_drawdown except ValueError: max_drawdown = 0 diff --git a/freqtrade/edge/edge_positioning.py b/freqtrade/edge/edge_positioning.py index d863be03b..b6cc1a7df 100644 --- a/freqtrade/edge/edge_positioning.py +++ b/freqtrade/edge/edge_positioning.py @@ -1,5 +1,6 @@ # pragma pylint: disable=W0603 -""" Edge positioning package """ +"""Edge positioning package""" + import logging from collections import defaultdict from copy import deepcopy @@ -46,48 +47,49 @@ class Edge: _cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs def __init__(self, config: Config, exchange, strategy) -> None: - self.config = config self.exchange = exchange self.strategy: IStrategy = strategy - self.edge_config = self.config.get('edge', {}) + self.edge_config = self.config.get("edge", {}) self._cached_pairs: Dict[str, Any] = {} # Keeps a list of pairs self._final_pairs: list = [] # checking max_open_trades. it should be -1 as with Edge # the number of trades is determined by position size - if self.config['max_open_trades'] != float('inf'): - logger.critical('max_open_trades should be -1 in config !') + if self.config["max_open_trades"] != float("inf"): + logger.critical("max_open_trades should be -1 in config !") - if self.config['stake_amount'] != UNLIMITED_STAKE_AMOUNT: - raise OperationalException('Edge works only with unlimited stake amount') + if self.config["stake_amount"] != UNLIMITED_STAKE_AMOUNT: + raise OperationalException("Edge works only with unlimited stake amount") - self._capital_ratio: float = self.config['tradable_balance_ratio'] - self._allowed_risk: float = self.edge_config.get('allowed_risk') - self._since_number_of_days: int = self.edge_config.get('calculate_since_number_of_days', 14) + self._capital_ratio: float = self.config["tradable_balance_ratio"] + self._allowed_risk: float = self.edge_config.get("allowed_risk") + self._since_number_of_days: int = self.edge_config.get("calculate_since_number_of_days", 14) self._last_updated: int = 0 # Timestamp of pairs last updated time self._refresh_pairs = True - self._stoploss_range_min = float(self.edge_config.get('stoploss_range_min', -0.01)) - self._stoploss_range_max = float(self.edge_config.get('stoploss_range_max', -0.05)) - self._stoploss_range_step = float(self.edge_config.get('stoploss_range_step', -0.001)) + self._stoploss_range_min = float(self.edge_config.get("stoploss_range_min", -0.01)) + self._stoploss_range_max = float(self.edge_config.get("stoploss_range_max", -0.05)) + self._stoploss_range_step = float(self.edge_config.get("stoploss_range_step", -0.001)) # calculating stoploss range self._stoploss_range = np.arange( - self._stoploss_range_min, - self._stoploss_range_max, - self._stoploss_range_step + self._stoploss_range_min, self._stoploss_range_max, self._stoploss_range_step ) self._timerange: TimeRange = TimeRange.parse_timerange( - f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-") - if config.get('fee'): - self.fee = config['fee'] + f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-" + ) + if config.get("fee"): + self.fee = config["fee"] else: try: - self.fee = self.exchange.get_fee(symbol=expand_pairlist( - self.config['exchange']['pair_whitelist'], list(self.exchange.markets))[0]) + self.fee = self.exchange.get_fee( + symbol=expand_pairlist( + self.config["exchange"]["pair_whitelist"], list(self.exchange.markets) + )[0] + ) except IndexError: self.fee = None @@ -95,28 +97,30 @@ class Edge: if self.fee is None and pairs: self.fee = self.exchange.get_fee(pairs[0]) - heartbeat = self.edge_config.get('process_throttle_secs') + heartbeat = self.edge_config.get("process_throttle_secs") if (self._last_updated > 0) and ( - self._last_updated + heartbeat > int(dt_now().timestamp())): + self._last_updated + heartbeat > int(dt_now().timestamp()) + ): return False data: Dict[str, Any] = {} - logger.info('Using stake_currency: %s ...', self.config['stake_currency']) - logger.info('Using local backtesting data (using whitelist in given config) ...') + logger.info("Using stake_currency: %s ...", self.config["stake_currency"]) + logger.info("Using local backtesting data (using whitelist in given config) ...") if self._refresh_pairs: timerange_startup = deepcopy(self._timerange) - timerange_startup.subtract_start(timeframe_to_seconds( - self.strategy.timeframe) * self.strategy.startup_candle_count) + timerange_startup.subtract_start( + timeframe_to_seconds(self.strategy.timeframe) * self.strategy.startup_candle_count + ) refresh_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=pairs, exchange=self.exchange, timeframe=self.strategy.timeframe, timerange=timerange_startup, - data_format=self.config['dataformat_ohlcv'], - candle_type=self.config.get('candle_type_def', CandleType.SPOT), + data_format=self.config["dataformat_ohlcv"], + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) # Download informative pairs too res = defaultdict(list) @@ -124,26 +128,27 @@ class Edge: res[timeframe].append(pair) for timeframe, inf_pairs in res.items(): timerange_startup = deepcopy(self._timerange) - timerange_startup.subtract_start(timeframe_to_seconds( - timeframe) * self.strategy.startup_candle_count) + timerange_startup.subtract_start( + timeframe_to_seconds(timeframe) * self.strategy.startup_candle_count + ) refresh_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=inf_pairs, exchange=self.exchange, timeframe=timeframe, timerange=timerange_startup, - data_format=self.config['dataformat_ohlcv'], - candle_type=self.config.get('candle_type_def', CandleType.SPOT), + data_format=self.config["dataformat_ohlcv"], + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) data = load_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=pairs, timeframe=self.strategy.timeframe, timerange=self._timerange, startup_candles=self.strategy.startup_candle_count, - data_format=self.config['dataformat_ohlcv'], - candle_type=self.config.get('candle_type_def', CandleType.SPOT), + data_format=self.config["dataformat_ohlcv"], + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) if not data: @@ -152,27 +157,29 @@ class Edge: logger.critical("No data found. Edge is stopped ...") return False # Fake run-mode to Edge - prior_rm = self.config['runmode'] - self.config['runmode'] = RunMode.EDGE + prior_rm = self.config["runmode"] + self.config["runmode"] = RunMode.EDGE preprocessed = self.strategy.advise_all_indicators(data) - self.config['runmode'] = prior_rm + self.config["runmode"] = prior_rm # Print timeframe min_date, max_date = get_timerange(preprocessed) - logger.info(f'Measuring data from {min_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'({(max_date - min_date).days} days)..') + logger.info( + f"Measuring data from {min_date.strftime(DATETIME_PRINT_FORMAT)} " + f"up to {max_date.strftime(DATETIME_PRINT_FORMAT)} " + f"({(max_date - min_date).days} days).." + ) # TODO: Should edge support shorts? needs to be investigated further # * (add enter_short exit_short) - headers = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long'] + headers = ["date", "open", "high", "low", "close", "enter_long", "exit_long"] trades: list = [] for pair, pair_data in preprocessed.items(): # Sorting dataframe by date and reset index - pair_data = pair_data.sort_values(by=['date']) + pair_data = pair_data.sort_values(by=["date"]) pair_data = pair_data.reset_index(drop=True) - df_analyzed = self.strategy.ft_advise_signals(pair_data, {'pair': pair})[headers].copy() + df_analyzed = self.strategy.ft_advise_signals(pair_data, {"pair": pair})[headers].copy() trades += self._find_trades_for_stoploss_range(df_analyzed, pair, self._stoploss_range) @@ -188,8 +195,9 @@ class Edge: return True - def stake_amount(self, pair: str, free_capital: float, - total_capital: float, capital_in_trade: float) -> float: + def stake_amount( + self, pair: str, free_capital: float, total_capital: float, capital_in_trade: float + ) -> float: stoploss = self.get_stoploss(pair) available_capital = (total_capital + capital_in_trade) * self._capital_ratio allowed_capital_at_risk = available_capital * self._allowed_risk @@ -198,14 +206,18 @@ class Edge: position_size = min(min(max_position_size, free_capital), available_capital) if pair in self._cached_pairs: logger.info( - 'winrate: %s, expectancy: %s, position size: %s, pair: %s,' - ' capital in trade: %s, free capital: %s, total capital: %s,' - ' stoploss: %s, available capital: %s.', + "winrate: %s, expectancy: %s, position size: %s, pair: %s," + " capital in trade: %s, free capital: %s, total capital: %s," + " stoploss: %s, available capital: %s.", self._cached_pairs[pair].winrate, self._cached_pairs[pair].expectancy, - position_size, pair, - capital_in_trade, free_capital, total_capital, - stoploss, available_capital + position_size, + pair, + capital_in_trade, + free_capital, + total_capital, + stoploss, + available_capital, ) return round(position_size, 15) @@ -213,8 +225,10 @@ class Edge: if pair in self._cached_pairs: return self._cached_pairs[pair].stoploss else: - logger.warning(f'Tried to access stoploss of non-existing pair {pair}, ' - 'strategy stoploss is returned instead.') + logger.warning( + f"Tried to access stoploss of non-existing pair {pair}, " + "strategy stoploss is returned instead." + ) return self.strategy.stoploss def adjust(self, pairs: List[str]) -> list: @@ -224,8 +238,8 @@ class Edge: final = [] for pair, info in self._cached_pairs.items(): if ( - info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) - and info.winrate > float(self.edge_config.get('minimum_winrate', 0.60)) + info.expectancy > float(self.edge_config.get("minimum_expectancy", 0.2)) + and info.winrate > float(self.edge_config.get("minimum_winrate", 0.60)) and pair in pairs ): final.append(pair) @@ -234,14 +248,14 @@ class Edge: self._final_pairs = final if self._final_pairs: logger.info( - 'Minimum expectancy and minimum winrate are met only for %s,' - ' so other pairs are filtered out.', - self._final_pairs + "Minimum expectancy and minimum winrate are met only for %s," + " so other pairs are filtered out.", + self._final_pairs, ) else: logger.info( - 'Edge removed all pairs as no pair with minimum expectancy ' - 'and minimum winrate was found !' + "Edge removed all pairs as no pair with minimum expectancy " + "and minimum winrate was found !" ) return self._final_pairs @@ -252,14 +266,17 @@ class Edge: """ final = [] for pair, info in self._cached_pairs.items(): - if (info.expectancy > float(self.edge_config.get('minimum_expectancy', 0.2)) and - info.winrate > float(self.edge_config.get('minimum_winrate', 0.60))): - final.append({ - 'Pair': pair, - 'Winrate': info.winrate, - 'Expectancy': info.expectancy, - 'Stoploss': info.stoploss, - }) + if info.expectancy > float( + self.edge_config.get("minimum_expectancy", 0.2) + ) and info.winrate > float(self.edge_config.get("minimum_winrate", 0.60)): + final.append( + { + "Pair": pair, + "Winrate": info.winrate, + "Expectancy": info.expectancy, + "Stoploss": info.stoploss, + } + ) return final def _fill_calculable_fields(self, result: DataFrame) -> DataFrame: @@ -279,28 +296,29 @@ class Edge: # All returned values are relative, they are defined as ratios. stake = 0.015 - result['trade_duration'] = result['close_date'] - result['open_date'] + result["trade_duration"] = result["close_date"] - result["open_date"] - result['trade_duration'] = result['trade_duration'].map( - lambda x: int(x.total_seconds() / 60)) + result["trade_duration"] = result["trade_duration"].map( + lambda x: int(x.total_seconds() / 60) + ) # Spends, Takes, Profit, Absolute Profit # Buy Price - result['buy_vol'] = stake / result['open_rate'] # How many target are we buying - result['buy_fee'] = stake * self.fee - result['buy_spend'] = stake + result['buy_fee'] # How much we're spending + result["buy_vol"] = stake / result["open_rate"] # How many target are we buying + result["buy_fee"] = stake * self.fee + result["buy_spend"] = stake + result["buy_fee"] # How much we're spending # Sell price - result['sell_sum'] = result['buy_vol'] * result['close_rate'] - result['sell_fee'] = result['sell_sum'] * self.fee - result['sell_take'] = result['sell_sum'] - result['sell_fee'] + result["sell_sum"] = result["buy_vol"] * result["close_rate"] + result["sell_fee"] = result["sell_sum"] * self.fee + result["sell_take"] = result["sell_sum"] - result["sell_fee"] # profit_ratio - result['profit_ratio'] = (result['sell_take'] - result['buy_spend']) / result['buy_spend'] + result["profit_ratio"] = (result["sell_take"] - result["buy_spend"]) / result["buy_spend"] # Absolute profit - result['profit_abs'] = result['sell_take'] - result['buy_spend'] + result["profit_abs"] = result["sell_take"] - result["buy_spend"] return result @@ -310,8 +328,8 @@ class Edge: The calculation will be done per pair and per strategy. """ # Removing pairs having less than min_trades_number - min_trades_number = self.edge_config.get('min_trade_number', 10) - results = results.groupby(['pair', 'stoploss']).filter(lambda x: len(x) > min_trades_number) + min_trades_number = self.edge_config.get("min_trade_number", 10) + results = results.groupby(["pair", "stoploss"]).filter(lambda x: len(x) > min_trades_number) ################################### # Removing outliers (Only Pumps) from the dataset @@ -319,13 +337,15 @@ class Edge: # Then every value more than (standard deviation + 2*average) is out (pump) # # Removing Pumps - if self.edge_config.get('remove_pumps', False): - results = results[results['profit_abs'] < 2 * results['profit_abs'].std() - + results['profit_abs'].mean()] + if self.edge_config.get("remove_pumps", False): + results = results[ + results["profit_abs"] + < 2 * results["profit_abs"].std() + results["profit_abs"].mean() + ] ########################################################################## # Removing trades having a duration more than X minutes (set in config) - max_trade_duration = self.edge_config.get('max_trade_duration_minute', 1440) + max_trade_duration = self.edge_config.get("max_trade_duration_minute", 1440) results = results[results.trade_duration < max_trade_duration] ####################################################################### @@ -333,44 +353,54 @@ class Edge: return {} groupby_aggregator = { - 'profit_abs': [ - ('nb_trades', 'count'), # number of all trades - ('profit_sum', lambda x: x[x > 0].sum()), # cumulative profit of all winning trades - ('loss_sum', lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades - ('nb_win_trades', lambda x: x[x > 0].count()) # number of winning trades + "profit_abs": [ + ("nb_trades", "count"), # number of all trades + ("profit_sum", lambda x: x[x > 0].sum()), # cumulative profit of all winning trades + ("loss_sum", lambda x: abs(x[x < 0].sum())), # cumulative loss of all losing trades + ("nb_win_trades", lambda x: x[x > 0].count()), # number of winning trades ], - 'trade_duration': [('avg_trade_duration', 'mean')] + "trade_duration": [("avg_trade_duration", "mean")], } # Group by (pair and stoploss) by applying above aggregator - df = results.groupby(['pair', 'stoploss'])[['profit_abs', 'trade_duration']].agg( - groupby_aggregator).reset_index(col_level=1) + df = ( + results.groupby(["pair", "stoploss"])[["profit_abs", "trade_duration"]] + .agg(groupby_aggregator) + .reset_index(col_level=1) + ) # Dropping level 0 as we don't need it df.columns = df.columns.droplevel(0) # Calculating number of losing trades, average win and average loss - df['nb_loss_trades'] = df['nb_trades'] - df['nb_win_trades'] - df['average_win'] = np.where(df['nb_win_trades'] == 0, 0.0, - df['profit_sum'] / df['nb_win_trades']) - df['average_loss'] = np.where(df['nb_loss_trades'] == 0, 0.0, - df['loss_sum'] / df['nb_loss_trades']) + df["nb_loss_trades"] = df["nb_trades"] - df["nb_win_trades"] + df["average_win"] = np.where( + df["nb_win_trades"] == 0, 0.0, df["profit_sum"] / df["nb_win_trades"] + ) + df["average_loss"] = np.where( + df["nb_loss_trades"] == 0, 0.0, df["loss_sum"] / df["nb_loss_trades"] + ) # Win rate = number of profitable trades / number of trades - df['winrate'] = df['nb_win_trades'] / df['nb_trades'] + df["winrate"] = df["nb_win_trades"] / df["nb_trades"] # risk_reward_ratio = average win / average loss - df['risk_reward_ratio'] = df['average_win'] / df['average_loss'] + df["risk_reward_ratio"] = df["average_win"] / df["average_loss"] # required_risk_reward = (1 / winrate) - 1 - df['required_risk_reward'] = (1 / df['winrate']) - 1 + df["required_risk_reward"] = (1 / df["winrate"]) - 1 # expectancy = (risk_reward_ratio * winrate) - (lossrate) - df['expectancy'] = (df['risk_reward_ratio'] * df['winrate']) - (1 - df['winrate']) + df["expectancy"] = (df["risk_reward_ratio"] * df["winrate"]) - (1 - df["winrate"]) # sort by expectancy and stoploss - df = df.sort_values(by=['expectancy', 'stoploss'], ascending=False).groupby( - 'pair').first().sort_values(by=['expectancy'], ascending=False).reset_index() + df = ( + df.sort_values(by=["expectancy", "stoploss"], ascending=False) + .groupby("pair") + .first() + .sort_values(by=["expectancy"], ascending=False) + .reset_index() + ) final = {} for x in df.itertuples(): @@ -381,17 +411,17 @@ class Edge: x.required_risk_reward, x.expectancy, x.nb_trades, - x.avg_trade_duration + x.avg_trade_duration, ) # Returning a list of pairs in order of "expectancy" return final def _find_trades_for_stoploss_range(self, df, pair: str, stoploss_range) -> list: - buy_column = df['enter_long'].values - sell_column = df['exit_long'].values - date_column = df['date'].values - ohlc_columns = df[['open', 'high', 'low', 'close']].values + buy_column = df["enter_long"].values + sell_column = df["exit_long"].values + date_column = df["date"].values + ohlc_columns = df[["open", "high", "low", "close"]].values result: list = [] for stoploss in stoploss_range: @@ -401,8 +431,9 @@ class Edge: return result - def _detect_next_stop_or_sell_point(self, buy_column, sell_column, date_column, - ohlc_columns, stoploss, pair: str): + def _detect_next_stop_or_sell_point( + self, buy_column, sell_column, date_column, ohlc_columns, stoploss, pair: str + ): """ Iterate through ohlc_columns in order to find the next trade Next trade opens from the first buy signal noticed to @@ -429,27 +460,28 @@ class Edge: open_trade_index += 1 open_price = ohlc_columns[open_trade_index, 0] - stop_price = (open_price * (stoploss + 1)) + stop_price = open_price * (stoploss + 1) # Searching for the index where stoploss is hit stop_index = utf1st.find_1st( - ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller) + ohlc_columns[open_trade_index:, 2], stop_price, utf1st.cmp_smaller + ) # If we don't find it then we assume stop_index will be far in future (infinite number) if stop_index == -1: - stop_index = float('inf') + stop_index = float("inf") # Searching for the index where sell is hit sell_index = utf1st.find_1st(sell_column[open_trade_index:], 1, utf1st.cmp_equal) # If we don't find it then we assume sell_index will be far in future (infinite number) if sell_index == -1: - sell_index = float('inf') + sell_index = float("inf") # Check if we don't find any stop or sell point (in that case trade remains open) # It is not interesting for Edge to consider it so we simply ignore the trade # And stop iterating there is no more entry - if stop_index == sell_index == float('inf'): + if stop_index == sell_index == float("inf"): break if stop_index <= sell_index: @@ -467,17 +499,18 @@ class Edge: exit_type = ExitType.EXIT_SIGNAL exit_price = ohlc_columns[exit_index, 0] - trade = {'pair': pair, - 'stoploss': stoploss, - 'profit_ratio': '', - 'profit_abs': '', - 'open_date': date_column[open_trade_index], - 'close_date': date_column[exit_index], - 'trade_duration': '', - 'open_rate': round(open_price, 15), - 'close_rate': round(exit_price, 15), - 'exit_type': exit_type - } + trade = { + "pair": pair, + "stoploss": stoploss, + "profit_ratio": "", + "profit_abs": "", + "open_date": date_column[open_trade_index], + "close_date": date_column[exit_index], + "trade_duration": "", + "open_rate": round(open_price, 15), + "close_rate": round(exit_price, 15), + "exit_type": exit_type, + } result.append(trade) diff --git a/freqtrade/enums/backteststate.py b/freqtrade/enums/backteststate.py index 490814497..f9c8d0ab9 100644 --- a/freqtrade/enums/backteststate.py +++ b/freqtrade/enums/backteststate.py @@ -5,6 +5,7 @@ class BacktestState(Enum): """ Bot application states """ + STARTUP = 1 DATALOAD = 2 ANALYZE = 3 diff --git a/freqtrade/enums/candletype.py b/freqtrade/enums/candletype.py index dcb9f1448..eb7ae50b8 100644 --- a/freqtrade/enums/candletype.py +++ b/freqtrade/enums/candletype.py @@ -3,6 +3,7 @@ from enum import Enum class CandleType(str, Enum): """Enum to distinguish candle types""" + SPOT = "spot" FUTURES = "futures" MARK = "mark" @@ -17,14 +18,14 @@ class CandleType(str, Enum): return f"{self.name.lower()}" @staticmethod - def from_string(value: str) -> 'CandleType': + def from_string(value: str) -> "CandleType": if not value: # Default to spot return CandleType.SPOT return CandleType(value) @staticmethod - def get_default(trading_mode: str) -> 'CandleType': - if trading_mode == 'futures': + def get_default(trading_mode: str) -> "CandleType": + if trading_mode == "futures": return CandleType.FUTURES return CandleType.SPOT diff --git a/freqtrade/enums/exitchecktuple.py b/freqtrade/enums/exitchecktuple.py index cb6411caf..686b1742c 100644 --- a/freqtrade/enums/exitchecktuple.py +++ b/freqtrade/enums/exitchecktuple.py @@ -5,10 +5,11 @@ class ExitCheckTuple: """ NamedTuple for Exit type + reason """ - exit_type: ExitType - exit_reason: str = '' - def __init__(self, exit_type: ExitType, exit_reason: str = ''): + exit_type: ExitType + exit_reason: str = "" + + def __init__(self, exit_type: ExitType, exit_reason: str = ""): self.exit_type = exit_type self.exit_reason = exit_reason or exit_type.value diff --git a/freqtrade/enums/exittype.py b/freqtrade/enums/exittype.py index c21b62667..630cb3dd1 100644 --- a/freqtrade/enums/exittype.py +++ b/freqtrade/enums/exittype.py @@ -5,6 +5,7 @@ class ExitType(Enum): """ Enum to distinguish between exit reasons """ + ROI = "roi" STOP_LOSS = "stop_loss" STOPLOSS_ON_EXCHANGE = "stoploss_on_exchange" diff --git a/freqtrade/enums/hyperoptstate.py b/freqtrade/enums/hyperoptstate.py index 6716e123a..68326505e 100644 --- a/freqtrade/enums/hyperoptstate.py +++ b/freqtrade/enums/hyperoptstate.py @@ -2,7 +2,8 @@ from enum import Enum class HyperoptState(Enum): - """ Hyperopt states """ + """Hyperopt states""" + STARTUP = 1 DATALOAD = 2 INDICATORS = 3 diff --git a/freqtrade/enums/marginmode.py b/freqtrade/enums/marginmode.py index 7fd749b29..0e8887a9a 100644 --- a/freqtrade/enums/marginmode.py +++ b/freqtrade/enums/marginmode.py @@ -7,6 +7,7 @@ class MarginMode(str, Enum): cross margin/futures margin_mode and isolated margin/futures margin_mode """ + CROSS = "cross" ISOLATED = "isolated" - NONE = '' + NONE = "" diff --git a/freqtrade/enums/marketstatetype.py b/freqtrade/enums/marketstatetype.py index 5cede32c2..92eb75377 100644 --- a/freqtrade/enums/marketstatetype.py +++ b/freqtrade/enums/marketstatetype.py @@ -5,6 +5,7 @@ class MarketDirection(Enum): """ Enum for various market directions. """ + LONG = "long" SHORT = "short" EVEN = "even" diff --git a/freqtrade/enums/ordertypevalue.py b/freqtrade/enums/ordertypevalue.py index 9bb716171..3a253166d 100644 --- a/freqtrade/enums/ordertypevalue.py +++ b/freqtrade/enums/ordertypevalue.py @@ -2,5 +2,5 @@ from enum import Enum class OrderTypeValues(str, Enum): - limit = 'limit' - market = 'market' + limit = "limit" + market = "market" diff --git a/freqtrade/enums/pricetype.py b/freqtrade/enums/pricetype.py index bf0922b9f..f2e3bef23 100644 --- a/freqtrade/enums/pricetype.py +++ b/freqtrade/enums/pricetype.py @@ -3,6 +3,7 @@ from enum import Enum class PriceType(str, Enum): """Enum to distinguish possible trigger prices for stoplosses""" + LAST = "last" MARK = "mark" INDEX = "index" diff --git a/freqtrade/enums/rpcmessagetype.py b/freqtrade/enums/rpcmessagetype.py index 16d81b1d8..6fdd788e8 100644 --- a/freqtrade/enums/rpcmessagetype.py +++ b/freqtrade/enums/rpcmessagetype.py @@ -2,27 +2,27 @@ from enum import Enum class RPCMessageType(str, Enum): - STATUS = 'status' - WARNING = 'warning' - EXCEPTION = 'exception' - STARTUP = 'startup' + STATUS = "status" + WARNING = "warning" + EXCEPTION = "exception" + STARTUP = "startup" - ENTRY = 'entry' - ENTRY_FILL = 'entry_fill' - ENTRY_CANCEL = 'entry_cancel' + ENTRY = "entry" + ENTRY_FILL = "entry_fill" + ENTRY_CANCEL = "entry_cancel" - EXIT = 'exit' - EXIT_FILL = 'exit_fill' - EXIT_CANCEL = 'exit_cancel' + EXIT = "exit" + EXIT_FILL = "exit_fill" + EXIT_CANCEL = "exit_cancel" - PROTECTION_TRIGGER = 'protection_trigger' - PROTECTION_TRIGGER_GLOBAL = 'protection_trigger_global' + PROTECTION_TRIGGER = "protection_trigger" + PROTECTION_TRIGGER_GLOBAL = "protection_trigger_global" - STRATEGY_MSG = 'strategy_msg' + STRATEGY_MSG = "strategy_msg" - WHITELIST = 'whitelist' - ANALYZED_DF = 'analyzed_df' - NEW_CANDLE = 'new_candle' + WHITELIST = "whitelist" + ANALYZED_DF = "analyzed_df" + NEW_CANDLE = "new_candle" def __repr__(self): return self.value @@ -33,10 +33,10 @@ class RPCMessageType(str, Enum): # Enum for parsing requests from ws consumers class RPCRequestType(str, Enum): - SUBSCRIBE = 'subscribe' + SUBSCRIBE = "subscribe" - WHITELIST = 'whitelist' - ANALYZED_DF = 'analyzed_df' + WHITELIST = "whitelist" + ANALYZED_DF = "analyzed_df" def __str__(self): return self.value diff --git a/freqtrade/enums/runmode.py b/freqtrade/enums/runmode.py index 17caea466..61409abf6 100644 --- a/freqtrade/enums/runmode.py +++ b/freqtrade/enums/runmode.py @@ -1,11 +1,12 @@ -from enum import Enum +from enum import StrEnum -class RunMode(Enum): +class RunMode(StrEnum): """ Bot running mode (backtest, hyperopt, ...) can be "live", "dry-run", "backtest", "edge", "hyperopt". """ + LIVE = "live" DRY_RUN = "dry_run" BACKTEST = "backtest" diff --git a/freqtrade/enums/signaltype.py b/freqtrade/enums/signaltype.py index b5af1f1b2..267a37ab3 100644 --- a/freqtrade/enums/signaltype.py +++ b/freqtrade/enums/signaltype.py @@ -5,6 +5,7 @@ class SignalType(Enum): """ Enum to distinguish between enter and exit signals """ + ENTER_LONG = "enter_long" EXIT_LONG = "exit_long" ENTER_SHORT = "enter_short" @@ -18,6 +19,7 @@ class SignalTagType(Enum): """ Enum for signal columns """ + ENTER_TAG = "enter_tag" EXIT_TAG = "exit_tag" @@ -26,8 +28,8 @@ class SignalTagType(Enum): class SignalDirection(str, Enum): - LONG = 'long' - SHORT = 'short' + LONG = "long" + SHORT = "short" def __str__(self): return f"{self.name.lower()}" diff --git a/freqtrade/enums/state.py b/freqtrade/enums/state.py index 572e2299f..1ce486920 100644 --- a/freqtrade/enums/state.py +++ b/freqtrade/enums/state.py @@ -5,6 +5,7 @@ class State(Enum): """ Bot application states """ + RUNNING = 1 STOPPED = 2 RELOAD_CONFIG = 3 diff --git a/freqtrade/enums/tradingmode.py b/freqtrade/enums/tradingmode.py index 2f838b7c6..62f9b4255 100644 --- a/freqtrade/enums/tradingmode.py +++ b/freqtrade/enums/tradingmode.py @@ -6,6 +6,7 @@ class TradingMode(str, Enum): Enum to distinguish between spot, margin, futures or any other trading method """ + SPOT = "spot" MARGIN = "margin" FUTURES = "futures" diff --git a/freqtrade/exchange/__init__.py b/freqtrade/exchange/__init__.py index 109f3c1e8..6510e8d4f 100644 --- a/freqtrade/exchange/__init__.py +++ b/freqtrade/exchange/__init__.py @@ -2,6 +2,7 @@ # isort: off from freqtrade.exchange.common import remove_exchange_credentials, MAP_EXCHANGE_CHILDCLASS from freqtrade.exchange.exchange import Exchange + # isort: on from freqtrade.exchange.binance import Binance from freqtrade.exchange.bingx import Bingx @@ -10,18 +11,30 @@ from freqtrade.exchange.bitpanda import Bitpanda from freqtrade.exchange.bitvavo import Bitvavo from freqtrade.exchange.bybit import Bybit from freqtrade.exchange.coinbasepro import Coinbasepro -from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision, - amount_to_contracts, amount_to_precision, - available_exchanges, ccxt_exchanges, - contracts_to_amount, date_minus_candles, - is_exchange_known_ccxt, list_available_exchanges, - market_is_active, price_to_precision, - validate_exchange) -from freqtrade.exchange.exchange_utils_timeframe import (timeframe_to_minutes, timeframe_to_msecs, - timeframe_to_next_date, - timeframe_to_prev_date, - timeframe_to_resample_freq, - timeframe_to_seconds) +from freqtrade.exchange.exchange_utils import ( + ROUND_DOWN, + ROUND_UP, + amount_to_contract_precision, + amount_to_contracts, + amount_to_precision, + available_exchanges, + ccxt_exchanges, + contracts_to_amount, + date_minus_candles, + is_exchange_known_ccxt, + list_available_exchanges, + market_is_active, + price_to_precision, + validate_exchange, +) +from freqtrade.exchange.exchange_utils_timeframe import ( + timeframe_to_minutes, + timeframe_to_msecs, + timeframe_to_next_date, + timeframe_to_prev_date, + timeframe_to_resample_freq, + timeframe_to_seconds, +) from freqtrade.exchange.gate import Gate from freqtrade.exchange.hitbtc import Hitbtc from freqtrade.exchange.htx import Htx diff --git a/freqtrade/exchange/binance.py b/freqtrade/exchange/binance.py index 8cfe52d51..07c4f9286 100644 --- a/freqtrade/exchange/binance.py +++ b/freqtrade/exchange/binance.py @@ -1,4 +1,5 @@ -""" Binance exchange subclass """ +"""Binance exchange subclass""" + import logging from datetime import datetime, timezone from pathlib import Path @@ -18,7 +19,6 @@ logger = logging.getLogger(__name__) class Binance(Exchange): - _ft_has: Dict = { "stoploss_on_exchange": True, "stop_price_param": "stopPrice", @@ -36,7 +36,7 @@ class Binance(Exchange): "tickers_have_price": False, "floor_leverage": True, "stop_price_type_field": "workingType", - "order_props_in_contracts": ['amount', 'cost', 'filled', 'remaining'], + "order_props_in_contracts": ["amount", "cost", "filled", "remaining"], "stop_price_type_value_mapping": { PriceType.LAST: "CONTRACT_PRICE", PriceType.MARK: "MARK_PRICE", @@ -67,36 +67,44 @@ class Binance(Exchange): Must be overridden in child methods if required. """ try: - if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']: + if self.trading_mode == TradingMode.FUTURES and not self._config["dry_run"]: position_side = self._api.fapiPrivateGetPositionSideDual() - self._log_exchange_response('position_side_setting', position_side) + self._log_exchange_response("position_side_setting", position_side) assets_margin = self._api.fapiPrivateGetMultiAssetsMargin() - self._log_exchange_response('multi_asset_margin', assets_margin) + self._log_exchange_response("multi_asset_margin", assets_margin) msg = "" - if position_side.get('dualSidePosition') is True: + if position_side.get("dualSidePosition") is True: msg += ( "\nHedge Mode is not supported by freqtrade. " - "Please change 'Position Mode' on your binance futures account.") - if assets_margin.get('multiAssetsMargin') is True: - msg += ("\nMulti-Asset Mode is not supported by freqtrade. " - "Please change 'Asset Mode' on your binance futures account.") + "Please change 'Position Mode' on your binance futures account." + ) + if assets_margin.get("multiAssetsMargin") is True: + msg += ( + "\nMulti-Asset Mode is not supported by freqtrade. " + "Please change 'Asset Mode' on your binance futures account." + ) if msg: raise OperationalException(msg) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}' - ) from e + f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e - async def _async_get_historic_ohlcv(self, pair: str, timeframe: str, - since_ms: int, candle_type: CandleType, - is_new_pair: bool = False, raise_: bool = False, - until_ms: Optional[int] = None - ) -> OHLCVResponse: + async def _async_get_historic_ohlcv( + self, + pair: str, + timeframe: str, + since_ms: int, + candle_type: CandleType, + is_new_pair: bool = False, + raise_: bool = False, + until_ms: Optional[int] = None, + ) -> OHLCVResponse: """ Overwrite to introduce "fast new pair" functionality by detecting the pair's listing date Does not work for other exchanges, which don't return the earliest data when called with "0" @@ -109,7 +117,8 @@ class Binance(Exchange): since_ms = x[3][0][0] logger.info( f"Candle-data for {pair} available starting with " - f"{datetime.fromtimestamp(since_ms // 1000, tz=timezone.utc).isoformat()}.") + f"{datetime.fromtimestamp(since_ms // 1000, tz=timezone.utc).isoformat()}." + ) return await super()._async_get_historic_ohlcv( pair=pair, @@ -135,7 +144,7 @@ class Binance(Exchange): def dry_run_liquidation_price( self, pair: str, - open_rate: float, # Entry price of position + open_rate: float, # Entry price of position is_short: bool, amount: float, stake_amount: float, @@ -177,7 +186,7 @@ class Binance(Exchange): # maintenance_amt: (CUM) Maintenance Amount of position mm_ratio, maintenance_amt = self.get_maintenance_ratio_and_amt(pair, stake_amount) - if (maintenance_amt is None): + if maintenance_amt is None: raise OperationalException( "Parameter maintenance_amt is required by Binance.liquidation_price" f"for {self.trading_mode.value}" @@ -185,35 +194,20 @@ class Binance(Exchange): if self.trading_mode == TradingMode.FUTURES: return ( - ( - (wallet_balance + cross_vars + maintenance_amt) - - (side_1 * amount * open_rate) - ) / ( - (amount * mm_ratio) - (side_1 * amount) - ) - ) + (wallet_balance + cross_vars + maintenance_amt) - (side_1 * amount * open_rate) + ) / ((amount * mm_ratio) - (side_1 * amount)) else: raise OperationalException( - "Freqtrade only supports isolated futures for leverage trading") + "Freqtrade only supports isolated futures for leverage trading" + ) - @retrier def load_leverage_tiers(self) -> Dict[str, List[Dict]]: if self.trading_mode == TradingMode.FUTURES: - if self._config['dry_run']: - leverage_tiers_path = ( - Path(__file__).parent / 'binance_leverage_tiers.json' - ) + if self._config["dry_run"]: + leverage_tiers_path = Path(__file__).parent / "binance_leverage_tiers.json" with leverage_tiers_path.open() as json_file: return json_load(json_file) else: - try: - return self._api.fetch_leverage_tiers() - except ccxt.DDoSProtection as e: - raise DDosProtection(e) from e - except (ccxt.OperationFailed, ccxt.ExchangeError) as e: - raise TemporaryError(f'Could not fetch leverage amounts due to' - f'{e.__class__.__name__}. Message: {e}') from e - except ccxt.BaseError as e: - raise OperationalException(e) from e + return self.get_leverage_tiers() else: return {} diff --git a/freqtrade/exchange/binance_leverage_tiers.json b/freqtrade/exchange/binance_leverage_tiers.json index abc135395..71744bf36 100644 --- a/freqtrade/exchange/binance_leverage_tiers.json +++ b/freqtrade/exchange/binance_leverage_tiers.json @@ -1,4 +1,134 @@ { + "1000BONK/USDC:USDC": [ + { + "tier": 1.0, + "currency": "USDC", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDC", + "minNotional": 5000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, + "info": { + "bracket": "2", + "initialLeverage": "25", + "notionalCap": "50000", + "notionalFloor": "5000", + "maintMarginRatio": "0.02", + "cum": "50.0" + } + }, + { + "tier": 3.0, + "currency": "USDC", + "minNotional": 50000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "3", + "initialLeverage": "20", + "notionalCap": "600000", + "notionalFloor": "50000", + "maintMarginRatio": "0.025", + "cum": "300.0" + } + }, + { + "tier": 4.0, + "currency": "USDC", + "minNotional": 600000.0, + "maxNotional": 1200000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "4", + "initialLeverage": "10", + "notionalCap": "1200000", + "notionalFloor": "600000", + "maintMarginRatio": "0.05", + "cum": "15300.0" + } + }, + { + "tier": 5.0, + "currency": "USDC", + "minNotional": 1200000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "5", + "initialLeverage": "5", + "notionalCap": "3000000", + "notionalFloor": "1200000", + "maintMarginRatio": "0.1", + "cum": "75300.0" + } + }, + { + "tier": 6.0, + "currency": "USDC", + "minNotional": 3000000.0, + "maxNotional": 4000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "6", + "initialLeverage": "4", + "notionalCap": "4000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.125", + "cum": "150300.0" + } + }, + { + "tier": 7.0, + "currency": "USDC", + "minNotional": 4000000.0, + "maxNotional": 6000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "6000000", + "notionalFloor": "4000000", + "maintMarginRatio": "0.25", + "cum": "650300.0" + } + }, + { + "tier": 8.0, + "currency": "USDC", + "minNotional": 6000000.0, + "maxNotional": 10000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "8", + "initialLeverage": "1", + "notionalCap": "10000000", + "notionalFloor": "6000000", + "maintMarginRatio": "0.5", + "cum": "2150300.0" + } + } + ], "1000BONK/USDT:USDT": [ { "tier": 1.0, @@ -508,13 +638,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 10000.0, + "maxNotional": 50000.0, "maintenanceMarginRate": 0.02, "maxLeverage": 25.0, "info": { "bracket": "2", "initialLeverage": "25", - "notionalCap": "10000", + "notionalCap": "50000", "notionalFloor": "5000", "maintMarginRatio": "0.02", "cum": "25.0" @@ -523,97 +653,97 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 10000.0, - "maxNotional": 50000.0, + "minNotional": 50000.0, + "maxNotional": 200000.0, "maintenanceMarginRate": 0.025, "maxLeverage": 20.0, "info": { "bracket": "3", "initialLeverage": "20", - "notionalCap": "50000", - "notionalFloor": "10000", + "notionalCap": "200000", + "notionalFloor": "50000", "maintMarginRatio": "0.025", - "cum": "75.0" + "cum": "275.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 50000.0, - "maxNotional": 1200000.0, + "minNotional": 200000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.05, "maxLeverage": 10.0, "info": { "bracket": "4", "initialLeverage": "10", - "notionalCap": "1200000", - "notionalFloor": "50000", + "notionalCap": "2000000", + "notionalFloor": "200000", "maintMarginRatio": "0.05", - "cum": "1325.0" + "cum": "5275.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 1200000.0, - "maxNotional": 3000000.0, + "minNotional": 2000000.0, + "maxNotional": 4000000.0, "maintenanceMarginRate": 0.1, "maxLeverage": 5.0, "info": { "bracket": "5", "initialLeverage": "5", - "notionalCap": "3000000", - "notionalFloor": "1200000", + "notionalCap": "4000000", + "notionalFloor": "2000000", "maintMarginRatio": "0.1", - "cum": "61325.0" + "cum": "105275.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 3000000.0, - "maxNotional": 3600000.0, + "minNotional": 4000000.0, + "maxNotional": 5000000.0, "maintenanceMarginRate": 0.125, "maxLeverage": 4.0, "info": { "bracket": "6", "initialLeverage": "4", - "notionalCap": "3600000", - "notionalFloor": "3000000", + "notionalCap": "5000000", + "notionalFloor": "4000000", "maintMarginRatio": "0.125", - "cum": "136325.0" + "cum": "205275.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 3600000.0, - "maxNotional": 9000000.0, + "minNotional": 5000000.0, + "maxNotional": 10000000.0, "maintenanceMarginRate": 0.25, "maxLeverage": 2.0, "info": { "bracket": "7", "initialLeverage": "2", - "notionalCap": "9000000", - "notionalFloor": "3600000", + "notionalCap": "10000000", + "notionalFloor": "5000000", "maintMarginRatio": "0.25", - "cum": "586325.0" + "cum": "830275.0" } }, { "tier": 8.0, "currency": "USDT", - "minNotional": 9000000.0, - "maxNotional": 15000000.0, + "minNotional": 10000000.0, + "maxNotional": 20000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "8", "initialLeverage": "1", - "notionalCap": "15000000", - "notionalFloor": "9000000", + "notionalCap": "20000000", + "notionalFloor": "10000000", "maintMarginRatio": "0.5", - "cum": "2836325.0" + "cum": "3330275.0" } } ], @@ -3041,104 +3171,6 @@ } } ], - "ANT/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.015, - "maxLeverage": 21.0, - "info": { - "bracket": "1", - "initialLeverage": "21", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.015", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "50.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "675.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5675.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11925.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386925.0" - } - } - ], "APE/USDT:USDT": [ { "tier": 1.0, @@ -4701,104 +4733,6 @@ } } ], - "AUDIO/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, - "info": { - "bracket": "1", - "initialLeverage": "20", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 10.0, - "info": { - "bracket": "2", - "initialLeverage": "10", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "25.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 8.0, - "info": { - "bracket": "3", - "initialLeverage": "8", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5650.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11900.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 3000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "3000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386900.0" - } - } - ], "AVAX/USDC:USDC": [ { "tier": 1.0, @@ -5921,6 +5855,120 @@ } } ], + "BB/USDT:USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.015", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "2", + "initialLeverage": "20", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "50.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "675.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "200000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5675.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 200000.0, + "maxNotional": 500000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "500000", + "notionalFloor": "200000", + "maintMarginRatio": "0.125", + "cum": "10675.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 500000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "500000", + "maintMarginRatio": "0.25", + "cum": "73175.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 2000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "2000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "323175.0" + } + } + ], "BCH/USDC:USDC": [ { "tier": 1.0, @@ -6685,104 +6733,6 @@ } } ], - "BLUEBIRD/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 11.0, - "info": { - "bracket": "1", - "initialLeverage": "11", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.01", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 10.0, - "info": { - "bracket": "2", - "initialLeverage": "10", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 8.0, - "info": { - "bracket": "3", - "initialLeverage": "8", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386950.0" - } - } - ], "BLUR/USDT:USDT": [ { "tier": 1.0, @@ -8218,13 +8168,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 500000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.005, "maxLeverage": 100.0, "info": { "bracket": "2", "initialLeverage": "100", - "notionalCap": "500000", + "notionalCap": "600000", "notionalFloor": "50000", "maintMarginRatio": "0.005", "cum": "50.0" @@ -8233,129 +8183,161 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 10000000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, + "minNotional": 600000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.0065, + "maxLeverage": 75.0, "info": { "bracket": "3", - "initialLeverage": "50", - "notionalCap": "10000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.01", - "cum": "2550.0" + "initialLeverage": "75", + "notionalCap": "3000000", + "notionalFloor": "600000", + "maintMarginRatio": "0.0065", + "cum": "950.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 10000000.0, - "maxNotional": 80000000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "minNotional": 3000000.0, + "maxNotional": 12000000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50.0, "info": { "bracket": "4", - "initialLeverage": "20", - "notionalCap": "80000000", - "notionalFloor": "10000000", - "maintMarginRatio": "0.025", - "cum": "152550.0" + "initialLeverage": "50", + "notionalCap": "12000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.01", + "cum": "11450.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 80000000.0, - "maxNotional": 150000000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 12000000.0, + "maxNotional": 70000000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, "info": { "bracket": "5", - "initialLeverage": "10", - "notionalCap": "150000000", - "notionalFloor": "80000000", - "maintMarginRatio": "0.05", - "cum": "2152550.0" + "initialLeverage": "25", + "notionalCap": "70000000", + "notionalFloor": "12000000", + "maintMarginRatio": "0.02", + "cum": "131450.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 150000000.0, - "maxNotional": 300000000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 70000000.0, + "maxNotional": 100000000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "6", - "initialLeverage": "5", - "notionalCap": "300000000", - "notionalFloor": "150000000", - "maintMarginRatio": "0.1", - "cum": "9652550.0" + "initialLeverage": "20", + "notionalCap": "100000000", + "notionalFloor": "70000000", + "maintMarginRatio": "0.025", + "cum": "481450.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 300000000.0, - "maxNotional": 450000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "minNotional": 100000000.0, + "maxNotional": 230000000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "7", - "initialLeverage": "4", - "notionalCap": "450000000", - "notionalFloor": "300000000", - "maintMarginRatio": "0.125", - "cum": "17152550.0" + "initialLeverage": "10", + "notionalCap": "230000000", + "notionalFloor": "100000000", + "maintMarginRatio": "0.05", + "cum": "2981450.0" } }, { "tier": 8.0, "currency": "USDT", - "minNotional": 450000000.0, - "maxNotional": 600000000.0, - "maintenanceMarginRate": 0.15, - "maxLeverage": 3.0, + "minNotional": 230000000.0, + "maxNotional": 480000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "8", - "initialLeverage": "3", - "notionalCap": "600000000", - "notionalFloor": "450000000", - "maintMarginRatio": "0.15", - "cum": "28402550.0" + "initialLeverage": "5", + "notionalCap": "480000000", + "notionalFloor": "230000000", + "maintMarginRatio": "0.1", + "cum": "14481450.0" } }, { "tier": 9.0, "currency": "USDT", - "minNotional": 600000000.0, - "maxNotional": 800000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "minNotional": 480000000.0, + "maxNotional": 600000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "9", - "initialLeverage": "2", - "notionalCap": "800000000", - "notionalFloor": "600000000", - "maintMarginRatio": "0.25", - "cum": "88402550.0" + "initialLeverage": "4", + "notionalCap": "600000000", + "notionalFloor": "480000000", + "maintMarginRatio": "0.125", + "cum": "26481450.0" } }, { "tier": 10.0, "currency": "USDT", + "minNotional": 600000000.0, + "maxNotional": 800000000.0, + "maintenanceMarginRate": 0.15, + "maxLeverage": 3.0, + "info": { + "bracket": "10", + "initialLeverage": "3", + "notionalCap": "800000000", + "notionalFloor": "600000000", + "maintMarginRatio": "0.15", + "cum": "41481450.0" + } + }, + { + "tier": 11.0, + "currency": "USDT", "minNotional": 800000000.0, - "maxNotional": 1000000000.0, + "maxNotional": 1200000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "11", + "initialLeverage": "2", + "notionalCap": "1200000000", + "notionalFloor": "800000000", + "maintMarginRatio": "0.25", + "cum": "121481450.0" + } + }, + { + "tier": 12.0, + "currency": "USDT", + "minNotional": 1200000000.0, + "maxNotional": 1800000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "10", + "bracket": "12", "initialLeverage": "1", - "notionalCap": "1000000000", - "notionalFloor": "800000000", + "notionalCap": "1800000000", + "notionalFloor": "1200000000", "maintMarginRatio": "0.5", - "cum": "288402550.0" + "cum": "421481450.0" } } ], @@ -8815,104 +8797,6 @@ } } ], - "BTS/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50.0, - "info": { - "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.01", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "75.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "700.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5700.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11950.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386950.0" - } - } - ], "C98/USDT:USDT": [ { "tier": 1.0, @@ -9809,104 +9693,6 @@ } } ], - "COCOS/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 8.0, - "info": { - "bracket": "1", - "initialLeverage": "8", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 7.0, - "info": { - "bracket": "2", - "initialLeverage": "7", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "25.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 6.0, - "info": { - "bracket": "3", - "initialLeverage": "6", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5650.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11900.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386900.0" - } - } - ], "COMBO/USDT:USDT": [ { "tier": 1.0, @@ -10710,13 +10496,13 @@ "tier": 5.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "5", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "2000000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "386875.0" @@ -12545,6 +12331,136 @@ } } ], + "ENA/USDC:USDC": [ + { + "tier": 1.0, + "currency": "USDC", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDC", + "minNotional": 5000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, + "info": { + "bracket": "2", + "initialLeverage": "25", + "notionalCap": "50000", + "notionalFloor": "5000", + "maintMarginRatio": "0.02", + "cum": "50.0" + } + }, + { + "tier": 3.0, + "currency": "USDC", + "minNotional": 50000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "3", + "initialLeverage": "20", + "notionalCap": "600000", + "notionalFloor": "50000", + "maintMarginRatio": "0.025", + "cum": "300.0" + } + }, + { + "tier": 4.0, + "currency": "USDC", + "minNotional": 600000.0, + "maxNotional": 1200000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "4", + "initialLeverage": "10", + "notionalCap": "1200000", + "notionalFloor": "600000", + "maintMarginRatio": "0.05", + "cum": "15300.0" + } + }, + { + "tier": 5.0, + "currency": "USDC", + "minNotional": 1200000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "5", + "initialLeverage": "5", + "notionalCap": "3000000", + "notionalFloor": "1200000", + "maintMarginRatio": "0.1", + "cum": "75300.0" + } + }, + { + "tier": 6.0, + "currency": "USDC", + "minNotional": 3000000.0, + "maxNotional": 4000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "6", + "initialLeverage": "4", + "notionalCap": "4000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.125", + "cum": "150300.0" + } + }, + { + "tier": 7.0, + "currency": "USDC", + "minNotional": 4000000.0, + "maxNotional": 6000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "6000000", + "notionalFloor": "4000000", + "maintMarginRatio": "0.25", + "cum": "650300.0" + } + }, + { + "tier": 8.0, + "currency": "USDC", + "minNotional": 6000000.0, + "maxNotional": 10000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "8", + "initialLeverage": "1", + "notionalCap": "10000000", + "notionalFloor": "6000000", + "maintMarginRatio": "0.5", + "cum": "2150300.0" + } + } + ], "ENA/USDT:USDT": [ { "tier": 1.0, @@ -13588,13 +13504,13 @@ "tier": 2.0, "currency": "USDT", "minNotional": 50000.0, - "maxNotional": 500000.0, + "maxNotional": 600000.0, "maintenanceMarginRate": 0.005, "maxLeverage": 100.0, "info": { "bracket": "2", "initialLeverage": "100", - "notionalCap": "500000", + "notionalCap": "600000", "notionalFloor": "50000", "maintMarginRatio": "0.005", "cum": "50.0" @@ -13603,145 +13519,161 @@ { "tier": 3.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, + "minNotional": 600000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.0065, "maxLeverage": 75.0, "info": { "bracket": "3", "initialLeverage": "75", - "notionalCap": "1000000", - "notionalFloor": "500000", + "notionalCap": "3000000", + "notionalFloor": "600000", "maintMarginRatio": "0.0065", - "cum": "800.0" + "cum": "950.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 5000000.0, + "minNotional": 3000000.0, + "maxNotional": 12000000.0, "maintenanceMarginRate": 0.01, "maxLeverage": 50.0, "info": { "bracket": "4", "initialLeverage": "50", - "notionalCap": "5000000", - "notionalFloor": "1000000", + "notionalCap": "12000000", + "notionalFloor": "3000000", "maintMarginRatio": "0.01", - "cum": "4300.0" + "cum": "11450.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 5000000.0, + "minNotional": 12000000.0, "maxNotional": 50000000.0, "maintenanceMarginRate": 0.02, - "maxLeverage": 20.0, + "maxLeverage": 25.0, "info": { "bracket": "5", - "initialLeverage": "20", + "initialLeverage": "25", "notionalCap": "50000000", - "notionalFloor": "5000000", + "notionalFloor": "12000000", "maintMarginRatio": "0.02", - "cum": "54300.0" + "cum": "131450.0" } }, { "tier": 6.0, "currency": "USDT", "minNotional": 50000000.0, - "maxNotional": 100000000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxNotional": 65000000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "6", - "initialLeverage": "10", - "notionalCap": "100000000", + "initialLeverage": "20", + "notionalCap": "65000000", "notionalFloor": "50000000", - "maintMarginRatio": "0.05", - "cum": "1554300.0" + "maintMarginRatio": "0.025", + "cum": "381450.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 100000000.0, + "minNotional": 65000000.0, "maxNotional": 150000000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "7", - "initialLeverage": "5", + "initialLeverage": "10", "notionalCap": "150000000", - "notionalFloor": "100000000", - "maintMarginRatio": "0.1", - "cum": "6554300.0" + "notionalFloor": "65000000", + "maintMarginRatio": "0.05", + "cum": "2006450.0" } }, { "tier": 8.0, "currency": "USDT", "minNotional": 150000000.0, - "maxNotional": 300000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "maxNotional": 320000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "8", - "initialLeverage": "4", - "notionalCap": "300000000", + "initialLeverage": "5", + "notionalCap": "320000000", "notionalFloor": "150000000", - "maintMarginRatio": "0.125", - "cum": "10304300.0" + "maintMarginRatio": "0.1", + "cum": "9506450.0" } }, { "tier": 9.0, "currency": "USDT", - "minNotional": 300000000.0, + "minNotional": 320000000.0, "maxNotional": 400000000.0, - "maintenanceMarginRate": 0.15, - "maxLeverage": 3.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "9", - "initialLeverage": "3", + "initialLeverage": "4", "notionalCap": "400000000", - "notionalFloor": "300000000", - "maintMarginRatio": "0.15", - "cum": "17804300.0" + "notionalFloor": "320000000", + "maintMarginRatio": "0.125", + "cum": "17506450.0" } }, { "tier": 10.0, "currency": "USDT", "minNotional": 400000000.0, - "maxNotional": 500000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "maxNotional": 530000000.0, + "maintenanceMarginRate": 0.15, + "maxLeverage": 3.0, "info": { "bracket": "10", - "initialLeverage": "2", - "notionalCap": "500000000", + "initialLeverage": "3", + "notionalCap": "530000000", "notionalFloor": "400000000", - "maintMarginRatio": "0.25", - "cum": "57804300.0" + "maintMarginRatio": "0.15", + "cum": "27506450.0" } }, { "tier": 11.0, "currency": "USDT", - "minNotional": 500000000.0, + "minNotional": 530000000.0, "maxNotional": 800000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "11", + "initialLeverage": "2", + "notionalCap": "800000000", + "notionalFloor": "530000000", + "maintMarginRatio": "0.25", + "cum": "80506450.0" + } + }, + { + "tier": 12.0, + "currency": "USDT", + "minNotional": 800000000.0, + "maxNotional": 1200000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "11", + "bracket": "12", "initialLeverage": "1", - "notionalCap": "800000000", - "notionalFloor": "500000000", + "notionalCap": "1200000000", + "notionalFloor": "800000000", "maintMarginRatio": "0.5", - "cum": "182804300.0" + "cum": "280506450.0" } } ], @@ -14005,6 +13937,136 @@ } } ], + "ETHFI/USDC:USDC": [ + { + "tier": 1.0, + "currency": "USDC", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.01", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDC", + "minNotional": 5000.0, + "maxNotional": 50000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, + "info": { + "bracket": "2", + "initialLeverage": "25", + "notionalCap": "50000", + "notionalFloor": "5000", + "maintMarginRatio": "0.02", + "cum": "50.0" + } + }, + { + "tier": 3.0, + "currency": "USDC", + "minNotional": 50000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "3", + "initialLeverage": "20", + "notionalCap": "600000", + "notionalFloor": "50000", + "maintMarginRatio": "0.025", + "cum": "300.0" + } + }, + { + "tier": 4.0, + "currency": "USDC", + "minNotional": 600000.0, + "maxNotional": 1200000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "4", + "initialLeverage": "10", + "notionalCap": "1200000", + "notionalFloor": "600000", + "maintMarginRatio": "0.05", + "cum": "15300.0" + } + }, + { + "tier": 5.0, + "currency": "USDC", + "minNotional": 1200000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "5", + "initialLeverage": "5", + "notionalCap": "3000000", + "notionalFloor": "1200000", + "maintMarginRatio": "0.1", + "cum": "75300.0" + } + }, + { + "tier": 6.0, + "currency": "USDC", + "minNotional": 3000000.0, + "maxNotional": 4000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "6", + "initialLeverage": "4", + "notionalCap": "4000000", + "notionalFloor": "3000000", + "maintMarginRatio": "0.125", + "cum": "150300.0" + } + }, + { + "tier": 7.0, + "currency": "USDC", + "minNotional": 4000000.0, + "maxNotional": 6000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "6000000", + "notionalFloor": "4000000", + "maintMarginRatio": "0.25", + "cum": "650300.0" + } + }, + { + "tier": 8.0, + "currency": "USDC", + "minNotional": 6000000.0, + "maxNotional": 10000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "8", + "initialLeverage": "1", + "notionalCap": "10000000", + "notionalFloor": "6000000", + "maintMarginRatio": "0.5", + "cum": "2150300.0" + } + } + ], "ETHFI/USDT:USDT": [ { "tier": 1.0, @@ -14255,14 +14317,14 @@ "currency": "USDT", "minNotional": 0.0, "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 25.0, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50.0, "info": { "bracket": "1", - "initialLeverage": "25", + "initialLeverage": "50", "notionalCap": "5000", "notionalFloor": "0", - "maintMarginRatio": "0.02", + "maintMarginRatio": "0.015", "cum": "0.0" } }, @@ -14271,14 +14333,14 @@ "currency": "USDT", "minNotional": 5000.0, "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, "info": { "bracket": "2", - "initialLeverage": "20", + "initialLeverage": "25", "notionalCap": "25000", "notionalFloor": "5000", - "maintMarginRatio": "0.025", + "maintMarginRatio": "0.02", "cum": "25.0" } }, @@ -14286,80 +14348,96 @@ "tier": 3.0, "currency": "USDT", "minNotional": 25000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "maxNotional": 80000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "200000", + "initialLeverage": "20", + "notionalCap": "80000", "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" + "maintMarginRatio": "0.025", + "cum": "150.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 80000.0, + "maxNotional": 800000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "500000", - "notionalFloor": "200000", - "maintMarginRatio": "0.1", - "cum": "10650.0" + "initialLeverage": "10", + "notionalCap": "800000", + "notionalFloor": "80000", + "maintMarginRatio": "0.05", + "cum": "2150.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "minNotional": 800000.0, + "maxNotional": 1600000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "5", - "initialLeverage": "4", - "notionalCap": "1000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.125", - "cum": "23150.0" + "initialLeverage": "5", + "notionalCap": "1600000", + "notionalFloor": "800000", + "maintMarginRatio": "0.1", + "cum": "42150.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 3000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "minNotional": 1600000.0, + "maxNotional": 2000000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "6", - "initialLeverage": "2", - "notionalCap": "3000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.25", - "cum": "148150.0" + "initialLeverage": "4", + "notionalCap": "2000000", + "notionalFloor": "1600000", + "maintMarginRatio": "0.125", + "cum": "82150.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 3000000.0, - "maxNotional": 5000000.0, + "minNotional": 2000000.0, + "maxNotional": 4000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "4000000", + "notionalFloor": "2000000", + "maintMarginRatio": "0.25", + "cum": "332150.0" + } + }, + { + "tier": 8.0, + "currency": "USDT", + "minNotional": 4000000.0, + "maxNotional": 8000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "7", + "bracket": "8", "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "3000000", + "notionalCap": "8000000", + "notionalFloor": "4000000", "maintMarginRatio": "0.5", - "cum": "898150.0" + "cum": "1332150.0" } } ], @@ -14851,104 +14929,6 @@ } } ], - "FOOTBALL/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 11.0, - "info": { - "bracket": "1", - "initialLeverage": "11", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 10.0, - "info": { - "bracket": "2", - "initialLeverage": "10", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "25.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 8.0, - "info": { - "bracket": "3", - "initialLeverage": "8", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 250000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "250000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5650.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 250000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "250000", - "maintMarginRatio": "0.125", - "cum": "11900.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "386900.0" - } - } - ], "FRONT/USDT:USDT": [ { "tier": 1.0, @@ -15736,96 +15716,112 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxNotional": 20000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", + "initialLeverage": "25", + "notionalCap": "20000", "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "50.0" + "maintMarginRatio": "0.02", + "cum": "25.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 20000.0, + "maxNotional": 30000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "675.0" + "initialLeverage": "20", + "notionalCap": "30000", + "notionalFloor": "20000", + "maintMarginRatio": "0.025", + "cum": "125.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 30000.0, + "maxNotional": 300000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "200000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5675.0" + "initialLeverage": "10", + "notionalCap": "300000", + "notionalFloor": "30000", + "maintMarginRatio": "0.05", + "cum": "875.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "minNotional": 300000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "5", - "initialLeverage": "4", - "notionalCap": "500000", - "notionalFloor": "200000", - "maintMarginRatio": "0.125", - "cum": "10675.0" + "initialLeverage": "5", + "notionalCap": "600000", + "notionalFloor": "300000", + "maintMarginRatio": "0.1", + "cum": "15875.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "minNotional": 600000.0, + "maxNotional": 750000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "6", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.25", - "cum": "73175.0" + "initialLeverage": "4", + "notionalCap": "750000", + "notionalFloor": "600000", + "maintMarginRatio": "0.125", + "cum": "30875.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 750000.0, + "maxNotional": 1500000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "1500000", + "notionalFloor": "750000", + "maintMarginRatio": "0.25", + "cum": "124625.0" + } + }, + { + "tier": 8.0, + "currency": "USDT", + "minNotional": 1500000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "7", + "bracket": "8", "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "3000000", + "notionalFloor": "1500000", "maintMarginRatio": "0.5", - "cum": "323175.0" + "cum": "499625.0" } } ], @@ -16823,120 +16819,6 @@ } } ], - "HNT/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.02, - "maxLeverage": 10.0, - "info": { - "bracket": "1", - "initialLeverage": "10", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.02", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 8.0, - "info": { - "bracket": "2", - "initialLeverage": "8", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "25.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 300000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 6.0, - "info": { - "bracket": "3", - "initialLeverage": "6", - "notionalCap": "300000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "650.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 300000.0, - "maxNotional": 800000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "800000", - "notionalFloor": "300000", - "maintMarginRatio": "0.1", - "cum": "15650.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 800000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "5", - "initialLeverage": "4", - "notionalCap": "1000000", - "notionalFloor": "800000", - "maintMarginRatio": "0.125", - "cum": "35650.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "6", - "initialLeverage": "2", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.25", - "cum": "160650.0" - } - }, - { - "tier": 7.0, - "currency": "USDT", - "minNotional": 1500000.0, - "maxNotional": 2000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "7", - "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1500000", - "maintMarginRatio": "0.5", - "cum": "535650.0" - } - } - ], "HOOK/USDT:USDT": [ { "tier": 1.0, @@ -17560,13 +17442,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 3000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3500000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "898150.0" @@ -22113,120 +21995,6 @@ } } ], - "MBL/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 5000.0, - "maintenanceMarginRate": 0.015, - "maxLeverage": 50.0, - "info": { - "bracket": "1", - "initialLeverage": "50", - "notionalCap": "5000", - "notionalFloor": "0", - "maintMarginRatio": "0.015", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, - "info": { - "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", - "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "50.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, - "info": { - "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "675.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "4", - "initialLeverage": "5", - "notionalCap": "200000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5675.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "5", - "initialLeverage": "4", - "notionalCap": "500000", - "notionalFloor": "200000", - "maintMarginRatio": "0.125", - "cum": "10675.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "6", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.25", - "cum": "73175.0" - } - }, - { - "tier": 7.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "7", - "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "323175.0" - } - } - ], "MDT/USDT:USDT": [ { "tier": 1.0, @@ -22328,13 +22096,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 3000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3500000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "898150.0" @@ -24033,6 +23801,120 @@ } } ], + "NOT/USDT:USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.015", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 25000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "2", + "initialLeverage": "20", + "notionalCap": "25000", + "notionalFloor": "5000", + "maintMarginRatio": "0.025", + "cum": "50.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 25000.0, + "maxNotional": 100000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "3", + "initialLeverage": "10", + "notionalCap": "100000", + "notionalFloor": "25000", + "maintMarginRatio": "0.05", + "cum": "675.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 100000.0, + "maxNotional": 200000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "4", + "initialLeverage": "5", + "notionalCap": "200000", + "notionalFloor": "100000", + "maintMarginRatio": "0.1", + "cum": "5675.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 200000.0, + "maxNotional": 500000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "5", + "initialLeverage": "4", + "notionalCap": "500000", + "notionalFloor": "200000", + "maintMarginRatio": "0.125", + "cum": "10675.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 500000.0, + "maxNotional": 1000000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "6", + "initialLeverage": "2", + "notionalCap": "1000000", + "notionalFloor": "500000", + "maintMarginRatio": "0.25", + "cum": "73175.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 1000000.0, + "maxNotional": 2000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "7", + "initialLeverage": "1", + "notionalCap": "2000000", + "notionalFloor": "1000000", + "maintMarginRatio": "0.5", + "cum": "323175.0" + } + } + ], "NTRN/USDT:USDT": [ { "tier": 1.0, @@ -24592,96 +24474,112 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxNotional": 20000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", + "initialLeverage": "25", + "notionalCap": "20000", "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "50.0" + "maintMarginRatio": "0.02", + "cum": "25.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 20000.0, + "maxNotional": 30000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "675.0" + "initialLeverage": "20", + "notionalCap": "30000", + "notionalFloor": "20000", + "maintMarginRatio": "0.025", + "cum": "125.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 30000.0, + "maxNotional": 300000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "200000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5675.0" + "initialLeverage": "10", + "notionalCap": "300000", + "notionalFloor": "30000", + "maintMarginRatio": "0.05", + "cum": "875.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "minNotional": 300000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "5", - "initialLeverage": "4", - "notionalCap": "500000", - "notionalFloor": "200000", - "maintMarginRatio": "0.125", - "cum": "10675.0" + "initialLeverage": "5", + "notionalCap": "600000", + "notionalFloor": "300000", + "maintMarginRatio": "0.1", + "cum": "15875.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "minNotional": 600000.0, + "maxNotional": 750000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "6", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.25", - "cum": "73175.0" + "initialLeverage": "4", + "notionalCap": "750000", + "notionalFloor": "600000", + "maintMarginRatio": "0.125", + "cum": "30875.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 750000.0, + "maxNotional": 1500000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "1500000", + "notionalFloor": "750000", + "maintMarginRatio": "0.25", + "cum": "124625.0" + } + }, + { + "tier": 8.0, + "currency": "USDT", + "minNotional": 1500000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "7", + "bracket": "8", "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "3000000", + "notionalFloor": "1500000", "maintMarginRatio": "0.5", - "cum": "323175.0" + "cum": "499625.0" } } ], @@ -27210,13 +27108,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 3000000.0, - "maxNotional": 5000000.0, + "maxNotional": 3500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "5000000", + "notionalCap": "3500000", "notionalFloor": "3000000", "maintMarginRatio": "0.5", "cum": "898150.0" @@ -27647,6 +27545,136 @@ } } ], + "REZ/USDT:USDT": [ + { + "tier": 1.0, + "currency": "USDT", + "minNotional": 0.0, + "maxNotional": 5000.0, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50.0, + "info": { + "bracket": "1", + "initialLeverage": "50", + "notionalCap": "5000", + "notionalFloor": "0", + "maintMarginRatio": "0.015", + "cum": "0.0" + } + }, + { + "tier": 2.0, + "currency": "USDT", + "minNotional": 5000.0, + "maxNotional": 20000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, + "info": { + "bracket": "2", + "initialLeverage": "25", + "notionalCap": "20000", + "notionalFloor": "5000", + "maintMarginRatio": "0.02", + "cum": "25.0" + } + }, + { + "tier": 3.0, + "currency": "USDT", + "minNotional": 20000.0, + "maxNotional": 30000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, + "info": { + "bracket": "3", + "initialLeverage": "20", + "notionalCap": "30000", + "notionalFloor": "20000", + "maintMarginRatio": "0.025", + "cum": "125.0" + } + }, + { + "tier": 4.0, + "currency": "USDT", + "minNotional": 30000.0, + "maxNotional": 300000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, + "info": { + "bracket": "4", + "initialLeverage": "10", + "notionalCap": "300000", + "notionalFloor": "30000", + "maintMarginRatio": "0.05", + "cum": "875.0" + } + }, + { + "tier": 5.0, + "currency": "USDT", + "minNotional": 300000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, + "info": { + "bracket": "5", + "initialLeverage": "5", + "notionalCap": "600000", + "notionalFloor": "300000", + "maintMarginRatio": "0.1", + "cum": "15875.0" + } + }, + { + "tier": 6.0, + "currency": "USDT", + "minNotional": 600000.0, + "maxNotional": 750000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, + "info": { + "bracket": "6", + "initialLeverage": "4", + "notionalCap": "750000", + "notionalFloor": "600000", + "maintMarginRatio": "0.125", + "cum": "30875.0" + } + }, + { + "tier": 7.0, + "currency": "USDT", + "minNotional": 750000.0, + "maxNotional": 1500000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "1500000", + "notionalFloor": "750000", + "maintMarginRatio": "0.25", + "cum": "124625.0" + } + }, + { + "tier": 8.0, + "currency": "USDT", + "minNotional": 1500000.0, + "maxNotional": 3000000.0, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1.0, + "info": { + "bracket": "8", + "initialLeverage": "1", + "notionalCap": "3000000", + "notionalFloor": "1500000", + "maintMarginRatio": "0.5", + "cum": "499625.0" + } + } + ], "RIF/USDT:USDT": [ { "tier": 1.0, @@ -29250,10 +29278,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.015, - "maxLeverage": 50.0, + "maxLeverage": 21.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "21", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.015", @@ -29344,13 +29372,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "maxNotional": 1500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "2000000", + "notionalCap": "1500000", "notionalFloor": "1000000", "maintMarginRatio": "0.5", "cum": "323175.0" @@ -29364,10 +29392,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.015, - "maxLeverage": 50.0, + "maxLeverage": 21.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "21", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.015", @@ -30039,88 +30067,6 @@ } } ], - "SRM/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 15000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 8.0, - "info": { - "bracket": "1", - "initialLeverage": "8", - "notionalCap": "15000", - "notionalFloor": "0", - "maintMarginRatio": "0.025", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 15000.0, - "maxNotional": 50000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 6.0, - "info": { - "bracket": "2", - "initialLeverage": "6", - "notionalCap": "50000", - "notionalFloor": "15000", - "maintMarginRatio": "0.05", - "cum": "375.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 50000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "3", - "initialLeverage": "5", - "notionalCap": "200000", - "notionalFloor": "50000", - "maintMarginRatio": "0.1", - "cum": "2875.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 2.0, - "info": { - "bracket": "4", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "200000", - "maintMarginRatio": "0.125", - "cum": "7875.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 1500000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "5", - "initialLeverage": "1", - "notionalCap": "1500000", - "notionalFloor": "1000000", - "maintMarginRatio": "0.5", - "cum": "382875.0" - } - } - ], "SSV/USDT:USDT": [ { "tier": 1.0, @@ -30682,10 +30628,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.015, - "maxLeverage": 50.0, + "maxLeverage": 21.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "21", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.015", @@ -30776,13 +30722,13 @@ "tier": 7.0, "currency": "USDT", "minNotional": 1500000.0, - "maxNotional": 3000000.0, + "maxNotional": 2000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "7", "initialLeverage": "1", - "notionalCap": "3000000", + "notionalCap": "2000000", "notionalFloor": "1500000", "maintMarginRatio": "0.5", "cum": "484425.0" @@ -32470,96 +32416,112 @@ "tier": 2.0, "currency": "USDT", "minNotional": 5000.0, - "maxNotional": 25000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20.0, + "maxNotional": 20000.0, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25.0, "info": { "bracket": "2", - "initialLeverage": "20", - "notionalCap": "25000", + "initialLeverage": "25", + "notionalCap": "20000", "notionalFloor": "5000", - "maintMarginRatio": "0.025", - "cum": "50.0" + "maintMarginRatio": "0.02", + "cum": "25.0" } }, { "tier": 3.0, "currency": "USDT", - "minNotional": 25000.0, - "maxNotional": 100000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10.0, + "minNotional": 20000.0, + "maxNotional": 30000.0, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20.0, "info": { "bracket": "3", - "initialLeverage": "10", - "notionalCap": "100000", - "notionalFloor": "25000", - "maintMarginRatio": "0.05", - "cum": "675.0" + "initialLeverage": "20", + "notionalCap": "30000", + "notionalFloor": "20000", + "maintMarginRatio": "0.025", + "cum": "125.0" } }, { "tier": 4.0, "currency": "USDT", - "minNotional": 100000.0, - "maxNotional": 200000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, + "minNotional": 30000.0, + "maxNotional": 300000.0, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10.0, "info": { "bracket": "4", - "initialLeverage": "5", - "notionalCap": "200000", - "notionalFloor": "100000", - "maintMarginRatio": "0.1", - "cum": "5675.0" + "initialLeverage": "10", + "notionalCap": "300000", + "notionalFloor": "30000", + "maintMarginRatio": "0.05", + "cum": "875.0" } }, { "tier": 5.0, "currency": "USDT", - "minNotional": 200000.0, - "maxNotional": 500000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, + "minNotional": 300000.0, + "maxNotional": 600000.0, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5.0, "info": { "bracket": "5", - "initialLeverage": "4", - "notionalCap": "500000", - "notionalFloor": "200000", - "maintMarginRatio": "0.125", - "cum": "10675.0" + "initialLeverage": "5", + "notionalCap": "600000", + "notionalFloor": "300000", + "maintMarginRatio": "0.1", + "cum": "15875.0" } }, { "tier": 6.0, "currency": "USDT", - "minNotional": 500000.0, - "maxNotional": 1000000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, + "minNotional": 600000.0, + "maxNotional": 750000.0, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4.0, "info": { "bracket": "6", - "initialLeverage": "2", - "notionalCap": "1000000", - "notionalFloor": "500000", - "maintMarginRatio": "0.25", - "cum": "73175.0" + "initialLeverage": "4", + "notionalCap": "750000", + "notionalFloor": "600000", + "maintMarginRatio": "0.125", + "cum": "30875.0" } }, { "tier": 7.0, "currency": "USDT", - "minNotional": 1000000.0, - "maxNotional": 2000000.0, + "minNotional": 750000.0, + "maxNotional": 1500000.0, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2.0, + "info": { + "bracket": "7", + "initialLeverage": "2", + "notionalCap": "1500000", + "notionalFloor": "750000", + "maintMarginRatio": "0.25", + "cum": "124625.0" + } + }, + { + "tier": 8.0, + "currency": "USDT", + "minNotional": 1500000.0, + "maxNotional": 3000000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { - "bracket": "7", + "bracket": "8", "initialLeverage": "1", - "notionalCap": "2000000", - "notionalFloor": "1000000", + "notionalCap": "3000000", + "notionalFloor": "1500000", "maintMarginRatio": "0.5", - "cum": "323175.0" + "cum": "499625.0" } } ], @@ -32677,104 +32639,6 @@ } } ], - "TOMO/USDT:USDT": [ - { - "tier": 1.0, - "currency": "USDT", - "minNotional": 0.0, - "maxNotional": 50000.0, - "maintenanceMarginRate": 0.025, - "maxLeverage": 8.0, - "info": { - "bracket": "1", - "initialLeverage": "8", - "notionalCap": "50000", - "notionalFloor": "0", - "maintMarginRatio": "0.025", - "cum": "0.0" - } - }, - { - "tier": 2.0, - "currency": "USDT", - "minNotional": 50000.0, - "maxNotional": 600000.0, - "maintenanceMarginRate": 0.05, - "maxLeverage": 6.0, - "info": { - "bracket": "2", - "initialLeverage": "6", - "notionalCap": "600000", - "notionalFloor": "50000", - "maintMarginRatio": "0.05", - "cum": "1250.0" - } - }, - { - "tier": 3.0, - "currency": "USDT", - "minNotional": 600000.0, - "maxNotional": 1280000.0, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5.0, - "info": { - "bracket": "3", - "initialLeverage": "5", - "notionalCap": "1280000", - "notionalFloor": "600000", - "maintMarginRatio": "0.1", - "cum": "31250.0" - } - }, - { - "tier": 4.0, - "currency": "USDT", - "minNotional": 1280000.0, - "maxNotional": 1600000.0, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4.0, - "info": { - "bracket": "4", - "initialLeverage": "4", - "notionalCap": "1600000", - "notionalFloor": "1280000", - "maintMarginRatio": "0.125", - "cum": "63250.0" - } - }, - { - "tier": 5.0, - "currency": "USDT", - "minNotional": 1600000.0, - "maxNotional": 4800000.0, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2.0, - "info": { - "bracket": "5", - "initialLeverage": "2", - "notionalCap": "4800000", - "notionalFloor": "1600000", - "maintMarginRatio": "0.25", - "cum": "263250.0" - } - }, - { - "tier": 6.0, - "currency": "USDT", - "minNotional": 4800000.0, - "maxNotional": 5000000.0, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1.0, - "info": { - "bracket": "6", - "initialLeverage": "1", - "notionalCap": "5000000", - "notionalFloor": "4800000", - "maintMarginRatio": "0.5", - "cum": "1463250.0" - } - } - ], "TON/USDT:USDT": [ { "tier": 1.0, @@ -32912,10 +32776,10 @@ "minNotional": 0.0, "maxNotional": 5000.0, "maintenanceMarginRate": 0.015, - "maxLeverage": 50.0, + "maxLeverage": 26.0, "info": { "bracket": "1", - "initialLeverage": "50", + "initialLeverage": "26", "notionalCap": "5000", "notionalFloor": "0", "maintMarginRatio": "0.015", @@ -33022,13 +32886,13 @@ "tier": 8.0, "currency": "USDT", "minNotional": 4000000.0, - "maxNotional": 8000000.0, + "maxNotional": 4500000.0, "maintenanceMarginRate": 0.5, "maxLeverage": 1.0, "info": { "bracket": "8", "initialLeverage": "1", - "notionalCap": "8000000", + "notionalCap": "4500000", "notionalFloor": "4000000", "maintMarginRatio": "0.5", "cum": "1297775.0" diff --git a/freqtrade/exchange/bingx.py b/freqtrade/exchange/bingx.py index 0bbf4a19d..2d81643a1 100644 --- a/freqtrade/exchange/bingx.py +++ b/freqtrade/exchange/bingx.py @@ -1,4 +1,5 @@ -""" Bingx exchange subclass """ +"""Bingx exchange subclass""" + import logging from typing import Dict @@ -16,4 +17,7 @@ class Bingx(Exchange): _ft_has: Dict = { "ohlcv_candle_limit": 1000, + "stoploss_on_exchange": True, + "stoploss_order_types": {"limit": "limit", "market": "market"}, + "order_time_in_force": ["GTC", "IOC", "PO"], } diff --git a/freqtrade/exchange/bitmart.py b/freqtrade/exchange/bitmart.py index 5d792b153..ffc8ac67a 100644 --- a/freqtrade/exchange/bitmart.py +++ b/freqtrade/exchange/bitmart.py @@ -1,4 +1,5 @@ -""" Bitmart exchange subclass """ +"""Bitmart exchange subclass""" + import logging from typing import Dict diff --git a/freqtrade/exchange/bitpanda.py b/freqtrade/exchange/bitpanda.py index 4cac35ce8..1e93256e7 100644 --- a/freqtrade/exchange/bitpanda.py +++ b/freqtrade/exchange/bitpanda.py @@ -1,4 +1,5 @@ -""" Bitpanda exchange subclass """ +"""Bitpanda exchange subclass""" + import logging from datetime import datetime, timezone from typing import Dict, List, Optional @@ -15,8 +16,9 @@ class Bitpanda(Exchange): with this exchange. """ - def get_trades_for_order(self, order_id: str, pair: str, since: datetime, - params: Optional[Dict] = None) -> List: + def get_trades_for_order( + self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None + ) -> List: """ Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id. The "since" argument passed in is coming from the database and is in UTC, @@ -33,5 +35,5 @@ class Bitpanda(Exchange): :param pair: Pair the order is for :param since: datetime object of the order creation time. Assumes object is in UTC. """ - params = {'to': int(datetime.now(timezone.utc).timestamp() * 1000)} + params = {"to": int(datetime.now(timezone.utc).timestamp() * 1000)} return super().get_trades_for_order(order_id, pair, since, params) diff --git a/freqtrade/exchange/bitvavo.py b/freqtrade/exchange/bitvavo.py index ba1d355cc..d088e3435 100644 --- a/freqtrade/exchange/bitvavo.py +++ b/freqtrade/exchange/bitvavo.py @@ -1,4 +1,5 @@ """Kucoin exchange subclass.""" + import logging from typing import Dict diff --git a/freqtrade/exchange/bybit.py b/freqtrade/exchange/bybit.py index 1891902f5..c8b05d1de 100644 --- a/freqtrade/exchange/bybit.py +++ b/freqtrade/exchange/bybit.py @@ -1,4 +1,5 @@ -""" Bybit exchange subclass """ +"""Bybit exchange subclass""" + import logging from datetime import datetime, timedelta from typing import Any, Dict, List, Optional, Tuple @@ -25,6 +26,7 @@ class Bybit(Exchange): officially supported by the Freqtrade development team. So some features may still not work as expected. """ + unified_account = False _ft_has: Dict = { @@ -60,20 +62,14 @@ class Bybit(Exchange): # ccxt defaults to swap mode. config = {} if self.trading_mode == TradingMode.SPOT: - config.update({ - "options": { - "defaultType": "spot" - } - }) + config.update({"options": {"defaultType": "spot"}}) config.update(super()._ccxt_config) return config def market_is_future(self, market: Dict[str, Any]) -> bool: main = super().market_is_future(market) # For ByBit, we'll only support USDT markets for now. - return ( - main and market['settle'] == 'USDT' - ) + return main and market["settle"] == "USDT" @retrier def additional_exchange_init(self) -> None: @@ -83,17 +79,19 @@ class Bybit(Exchange): Must be overridden in child methods if required. """ try: - if not self._config['dry_run']: + if not self._config["dry_run"]: if self.trading_mode == TradingMode.FUTURES: position_mode = self._api.set_position_mode(False) - self._log_exchange_response('set_position_mode', position_mode) + self._log_exchange_response("set_position_mode", position_mode) is_unified = self._api.is_unified_enabled() # Returns a tuple of bools, first for margin, second for Account if is_unified and len(is_unified) > 1 and is_unified[1]: self.unified_account = True logger.info("Bybit: Unified account.") - raise OperationalException("Bybit: Unified account is not supported. " - "Please use a standard (sub)account.") + raise OperationalException( + "Bybit: Unified account is not supported. " + "Please use a standard (sub)account." + ) else: self.unified_account = False logger.info("Bybit: Standard account.") @@ -101,14 +99,14 @@ class Bybit(Exchange): raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}' - ) from e + f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def ohlcv_candle_limit( - self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: - + self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None + ) -> int: if candle_type in (CandleType.FUNDING_RATE): return 200 @@ -116,7 +114,7 @@ class Bybit(Exchange): def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False): if self.trading_mode != TradingMode.SPOT: - params = {'leverage': leverage} + params = {"leverage": leverage} self.set_margin_mode(pair, self.margin_mode, accept_fail=True, params=params) self._set_leverage(leverage, pair, accept_fail=True) @@ -126,7 +124,7 @@ class Bybit(Exchange): ordertype: str, leverage: float, reduceOnly: bool, - time_in_force: str = 'GTC', + time_in_force: str = "GTC", ) -> Dict: params = super()._get_params( side=side, @@ -136,13 +134,13 @@ class Bybit(Exchange): time_in_force=time_in_force, ) if self.trading_mode == TradingMode.FUTURES and self.margin_mode: - params['position_idx'] = 0 + params["position_idx"] = 0 return params def dry_run_liquidation_price( self, pair: str, - open_rate: float, # Entry price of position + open_rate: float, # Entry price of position is_short: bool, amount: float, stake_amount: float, @@ -185,10 +183,8 @@ class Bybit(Exchange): mm_ratio, _ = self.get_maintenance_ratio_and_amt(pair, stake_amount) if self.trading_mode == TradingMode.FUTURES and self.margin_mode == MarginMode.ISOLATED: - - if market['inverse']: - raise OperationalException( - "Freqtrade does not yet support inverse contracts") + if market["inverse"]: + raise OperationalException("Freqtrade does not yet support inverse contracts") initial_margin_rate = 1 / leverage # See docstring - ignores extra margin! @@ -199,10 +195,12 @@ class Bybit(Exchange): else: raise OperationalException( - "Freqtrade only supports isolated futures for leverage trading") + "Freqtrade only supports isolated futures for leverage trading" + ) def get_funding_fees( - self, pair: str, amount: float, is_short: bool, open_date: datetime) -> float: + self, pair: str, amount: float, is_short: bool, open_date: datetime + ) -> float: """ Fetch funding fees, either from the exchange (live) or calculates them based on funding rate/mark price history @@ -216,8 +214,7 @@ class Bybit(Exchange): # Bybit does not provide "applied" funding fees per position. if self.trading_mode == TradingMode.FUTURES: try: - return self._fetch_and_calculate_funding_fees( - pair, amount, is_short, open_date) + return self._fetch_and_calculate_funding_fees(pair, amount, is_short, open_date) except ExchangeError: logger.warning(f"Could not update funding fees for {pair}.") return 0.0 @@ -234,7 +231,7 @@ class Bybit(Exchange): while since < dt_now(): until = since + timedelta(days=7, minutes=-1) - orders += super().fetch_orders(pair, since, params={'until': dt_ts(until)}) + orders += super().fetch_orders(pair, since, params={"until": dt_ts(until)}) since = until return orders @@ -242,10 +239,30 @@ class Bybit(Exchange): def fetch_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: order = super().fetch_order(order_id, pair, params) if ( - order.get('status') == 'canceled' - and order.get('filled') == 0.0 - and order.get('remaining') == 0.0 + order.get("status") == "canceled" + and order.get("filled") == 0.0 + and order.get("remaining") == 0.0 ): # Canceled orders will have "remaining=0" on bybit. - order['remaining'] = None + order["remaining"] = None return order + + @retrier + def get_leverage_tiers(self) -> Dict[str, List[Dict]]: + """ + Cache leverage tiers for 1 day, since they are not expected to change often, and + bybit requires pagination to fetch all tiers. + """ + + # Load cached tiers + tiers_cached = self.load_cached_leverage_tiers( + self._config["stake_currency"], timedelta(days=1) + ) + if tiers_cached: + return tiers_cached + + # Fetch tiers from exchange + tiers = super().get_leverage_tiers() + + self.cache_leverage_tiers(tiers, self._config["stake_currency"]) + return tiers diff --git a/freqtrade/exchange/check_exchange.py b/freqtrade/exchange/check_exchange.py index 69330bcd0..73b1c8a97 100644 --- a/freqtrade/exchange/check_exchange.py +++ b/freqtrade/exchange/check_exchange.py @@ -21,45 +21,52 @@ def check_exchange(config: Config, check_for_bad: bool = True) -> bool: and thus is not known for the Freqtrade at all. """ - if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER] - and not config.get('exchange', {}).get('name')): + if config["runmode"] in [ + RunMode.PLOT, + RunMode.UTIL_NO_EXCHANGE, + RunMode.OTHER, + ] and not config.get("exchange", {}).get("name"): # Skip checking exchange in plot mode, since it requires no exchange return True logger.info("Checking exchange...") - exchange = config.get('exchange', {}).get('name', '').lower() + exchange = config.get("exchange", {}).get("name", "").lower() if not exchange: raise OperationalException( - f'This command requires a configured exchange. You should either use ' - f'`--exchange ` or specify a configuration file via `--config`.\n' - f'The following exchanges are available for Freqtrade: ' + f"This command requires a configured exchange. You should either use " + f"`--exchange ` or specify a configuration file via `--config`.\n" + f"The following exchanges are available for Freqtrade: " f'{", ".join(available_exchanges())}' ) if not is_exchange_known_ccxt(exchange): raise OperationalException( f'Exchange "{exchange}" is not known to the ccxt library ' - f'and therefore not available for the bot.\n' - f'The following exchanges are available for Freqtrade: ' + f"and therefore not available for the bot.\n" + f"The following exchanges are available for Freqtrade: " f'{", ".join(available_exchanges())}' ) valid, reason = validate_exchange(exchange) if not valid: if check_for_bad: - raise OperationalException(f'Exchange "{exchange}" will not work with Freqtrade. ' - f'Reason: {reason}') + raise OperationalException( + f'Exchange "{exchange}" will not work with Freqtrade. ' f"Reason: {reason}" + ) else: logger.warning(f'Exchange "{exchange}" will not work with Freqtrade. Reason: {reason}') if MAP_EXCHANGE_CHILDCLASS.get(exchange, exchange) in SUPPORTED_EXCHANGES: - logger.info(f'Exchange "{exchange}" is officially supported ' - f'by the Freqtrade development team.') + logger.info( + f'Exchange "{exchange}" is officially supported ' f"by the Freqtrade development team." + ) else: - logger.warning(f'Exchange "{exchange}" is known to the the ccxt library, ' - f'available for the bot, but not officially supported ' - f'by the Freqtrade development team. ' - f'It may work flawlessly (please report back) or have serious issues. ' - f'Use it at your own discretion.') + logger.warning( + f'Exchange "{exchange}" is known to the ccxt library, ' + f"available for the bot, but not officially supported " + f"by the Freqtrade development team. " + f"It may work flawlessly (please report back) or have serious issues. " + f"Use it at your own discretion." + ) return True diff --git a/freqtrade/exchange/coinbasepro.py b/freqtrade/exchange/coinbasepro.py index 7dd9c80dc..e234002ad 100644 --- a/freqtrade/exchange/coinbasepro.py +++ b/freqtrade/exchange/coinbasepro.py @@ -1,4 +1,5 @@ -""" CoinbasePro exchange subclass """ +"""CoinbasePro exchange subclass""" + import logging from typing import Dict diff --git a/freqtrade/exchange/common.py b/freqtrade/exchange/common.py index 8909ef5ff..99f891836 100644 --- a/freqtrade/exchange/common.py +++ b/freqtrade/exchange/common.py @@ -43,46 +43,49 @@ BAD_EXCHANGES = { } MAP_EXCHANGE_CHILDCLASS = { - 'binanceus': 'binance', - 'binanceje': 'binance', - 'binanceusdm': 'binance', - 'okex': 'okx', - 'gateio': 'gate', - 'huboi': 'htx', + "binanceus": "binance", + "binanceje": "binance", + "binanceusdm": "binance", + "okex": "okx", + "gateio": "gate", + "huboi": "htx", } SUPPORTED_EXCHANGES = [ - 'binance', - 'bitmart', - 'gate', - 'htx', - 'kraken', - 'okx', + "binance", + "bingx", + "bitmart", + "gate", + "htx", + "kraken", + "okx", ] # either the main, or replacement methods (array) is required EXCHANGE_HAS_REQUIRED: Dict[str, List[str]] = { # Required / private - 'fetchOrder': ['fetchOpenOrder', 'fetchClosedOrder'], - 'cancelOrder': [], - 'createOrder': [], - 'fetchBalance': [], - + "fetchOrder": ["fetchOpenOrder", "fetchClosedOrder"], + "cancelOrder": [], + "createOrder": [], + "fetchBalance": [], # Public endpoints - 'fetchOHLCV': [], + "fetchOHLCV": [], } EXCHANGE_HAS_OPTIONAL = [ # Private - 'fetchMyTrades', # Trades for order - fee detection - 'createLimitOrder', 'createMarketOrder', # Either OR for orders + "fetchMyTrades", # Trades for order - fee detection + "createLimitOrder", + "createMarketOrder", # Either OR for orders # 'setLeverage', # Margin/Futures trading # 'setMarginMode', # Margin/Futures trading # 'fetchFundingHistory', # Futures trading # Public - 'fetchOrderBook', 'fetchL2OrderBook', 'fetchTicker', # OR for pricing - 'fetchTickers', # For volumepairlist? - 'fetchTrades', # Downloading trades data + "fetchOrderBook", + "fetchL2OrderBook", + "fetchTicker", # OR for pricing + "fetchTickers", # For volumepairlist? + "fetchTrades", # Downloading trades data # 'fetchFundingRateHistory', # Futures trading # 'fetchPositions', # Futures trading # 'fetchLeverageTiers', # Futures initialization @@ -99,11 +102,11 @@ def remove_exchange_credentials(exchange_config: ExchangeConfig, dry_run: bool) Modifies the input dict! """ if dry_run: - exchange_config['key'] = '' - exchange_config['apiKey'] = '' - exchange_config['secret'] = '' - exchange_config['password'] = '' - exchange_config['uid'] = '' + exchange_config["key"] = "" + exchange_config["apiKey"] = "" + exchange_config["secret"] = "" + exchange_config["password"] = "" + exchange_config["uid"] = "" def calculate_backoff(retrycount, max_retries): @@ -115,25 +118,27 @@ def calculate_backoff(retrycount, max_retries): def retrier_async(f): async def wrapper(*args, **kwargs): - count = kwargs.pop('count', API_RETRY_COUNT) + count = kwargs.pop("count", API_RETRY_COUNT) kucoin = args[0].name == "KuCoin" # Check if the exchange is KuCoin. try: return await f(*args, **kwargs) except TemporaryError as ex: msg = f'{f.__name__}() returned exception: "{ex}". ' if count > 0: - msg += f'Retrying still for {count} times.' + msg += f"Retrying still for {count} times." count -= 1 - kwargs['count'] = count + kwargs["count"] = count if isinstance(ex, DDosProtection): if kucoin and "429000" in str(ex): # Temporary fix for 429000 error on kucoin # see https://github.com/freqtrade/freqtrade/issues/5700 for details. _get_logging_mixin().log_once( f"Kucoin 429 error, avoid triggering DDosProtection backoff delay. " - f"{count} tries left before giving up", logmethod=logger.warning) + f"{count} tries left before giving up", + logmethod=logger.warning, + ) # Reset msg to avoid logging too many times. - msg = '' + msg = "" else: backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT) logger.info(f"Applying DDosProtection backoff delay: {backoff_delay}") @@ -142,38 +147,37 @@ def retrier_async(f): logger.warning(msg) return await wrapper(*args, **kwargs) else: - logger.warning(msg + 'Giving up.') + logger.warning(msg + "Giving up.") raise ex + return wrapper -F = TypeVar('F', bound=Callable[..., Any]) +F = TypeVar("F", bound=Callable[..., Any]) # Type shenanigans @overload -def retrier(_func: F) -> F: - ... +def retrier(_func: F) -> F: ... @overload -def retrier(*, retries=API_RETRY_COUNT) -> Callable[[F], F]: - ... +def retrier(*, retries=API_RETRY_COUNT) -> Callable[[F], F]: ... def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT): def decorator(f: F) -> F: @wraps(f) def wrapper(*args, **kwargs): - count = kwargs.pop('count', retries) + count = kwargs.pop("count", retries) try: return f(*args, **kwargs) except (TemporaryError, RetryableOrderError) as ex: msg = f'{f.__name__}() returned exception: "{ex}". ' if count > 0: - logger.warning(msg + f'Retrying still for {count} times.') + logger.warning(msg + f"Retrying still for {count} times.") count -= 1 - kwargs.update({'count': count}) + kwargs.update({"count": count}) if isinstance(ex, (DDosProtection, RetryableOrderError)): # increasing backoff backoff_delay = calculate_backoff(count + 1, retries) @@ -181,9 +185,11 @@ def retrier(_func: Optional[F] = None, *, retries=API_RETRY_COUNT): time.sleep(backoff_delay) return wrapper(*args, **kwargs) else: - logger.warning(msg + 'Giving up.') + logger.warning(msg + "Giving up.") raise ex + return cast(F, wrapper) + # Support both @retrier and @retrier(retries=2) syntax if _func is None: return decorator diff --git a/freqtrade/exchange/exchange.py b/freqtrade/exchange/exchange.py index eed852e7c..9d7c4eabb 100644 --- a/freqtrade/exchange/exchange.py +++ b/freqtrade/exchange/exchange.py @@ -2,6 +2,7 @@ """ Cryptocurrency Exchanges support """ + import asyncio import inspect import logging @@ -19,29 +20,67 @@ from ccxt import TICK_SIZE from dateutil import parser from pandas import DataFrame, concat -from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk, - BuySell, Config, EntryExit, ExchangeConfig, - ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe) +from freqtrade.constants import ( + DEFAULT_AMOUNT_RESERVE_PERCENT, + NON_OPEN_EXCHANGE_STATES, + BidAsk, + BuySell, + Config, + EntryExit, + ExchangeConfig, + ListPairsWithTimeframes, + MakerTaker, + OBLiteral, + PairWithTimeframe, +) from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, PriceType, RunMode, TradingMode -from freqtrade.exceptions import (ConfigurationError, DDosProtection, ExchangeError, - InsufficientFundsError, InvalidOrderException, - OperationalException, PricingError, RetryableOrderError, - TemporaryError) -from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials, - retrier, retrier_async) -from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType, - amount_to_contract_precision, amount_to_contracts, - amount_to_precision, contracts_to_amount, - date_minus_candles, is_exchange_known_ccxt, - market_is_active, price_to_precision) -from freqtrade.exchange.exchange_utils_timeframe import (timeframe_to_minutes, timeframe_to_msecs, - timeframe_to_next_date, - timeframe_to_prev_date, - timeframe_to_seconds) +from freqtrade.exceptions import ( + ConfigurationError, + DDosProtection, + ExchangeError, + InsufficientFundsError, + InvalidOrderException, + OperationalException, + PricingError, + RetryableOrderError, + TemporaryError, +) +from freqtrade.exchange.common import ( + API_FETCH_ORDER_RETRY_COUNT, + remove_exchange_credentials, + retrier, + retrier_async, +) +from freqtrade.exchange.exchange_utils import ( + ROUND, + ROUND_DOWN, + ROUND_UP, + CcxtModuleType, + amount_to_contract_precision, + amount_to_contracts, + amount_to_precision, + contracts_to_amount, + date_minus_candles, + is_exchange_known_ccxt, + market_is_active, + price_to_precision, +) +from freqtrade.exchange.exchange_utils_timeframe import ( + timeframe_to_minutes, + timeframe_to_msecs, + timeframe_to_next_date, + timeframe_to_prev_date, + timeframe_to_seconds, +) from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers -from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json, - safe_value_fallback2) +from freqtrade.misc import ( + chunks, + deep_merge_dicts, + file_dump_json, + file_load_json, + safe_value_fallback2, +) from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.util import dt_from_ts, dt_now from freqtrade.util.datetime_helpers import dt_humanize_delta, dt_ts @@ -52,7 +91,6 @@ logger = logging.getLogger(__name__) class Exchange: - # Parameters to add directly to buy/sell calls (like agreeing to trading agreement) _params: Dict = {} @@ -86,7 +124,7 @@ class Exchange: "funding_fee_timeframe": "8h", "ccxt_futures_name": "swap", "needs_trading_fees": False, # use fetch_trading_fees to cache fees - "order_props_in_contracts": ['amount', 'filled', 'remaining'], + "order_props_in_contracts": ["amount", "filled", "remaining"], # Override createMarketBuyOrderRequiresPrice where ccxt has it wrong "marketOrderRequiresPrice": False, "exchange_has_overrides": {}, # Dictionary overriding ccxt's "has". @@ -99,8 +137,14 @@ class Exchange: # TradingMode.SPOT always supported and not required in this list ] - def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None, - validate: bool = True, load_leverage_tiers: bool = False) -> None: + def __init__( + self, + config: Config, + *, + exchange_config: Optional[ExchangeConfig] = None, + validate: bool = True, + load_leverage_tiers: bool = False, + ) -> None: """ Initializes this module with the given config, it does basic validation whether the specified exchange and pairs are valid. @@ -141,51 +185,49 @@ class Exchange: # Holds all open sell orders for dry_run self._dry_run_open_orders: Dict[str, Any] = {} - if config['dry_run']: - logger.info('Instance is running with dry_run enabled') + if config["dry_run"]: + logger.info("Instance is running with dry_run enabled") logger.info(f"Using CCXT {ccxt.__version__}") - exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange'] - remove_exchange_credentials(exchange_conf, config.get('dry_run', False)) - self.log_responses = exchange_conf.get('log_responses', False) + exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config["exchange"] + remove_exchange_credentials(exchange_conf, config.get("dry_run", False)) + self.log_responses = exchange_conf.get("log_responses", False) # Leverage properties - self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) + self.trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT) self.margin_mode: MarginMode = ( - MarginMode(config.get('margin_mode')) - if config.get('margin_mode') - else MarginMode.NONE + MarginMode(config.get("margin_mode")) if config.get("margin_mode") else MarginMode.NONE ) - self.liquidation_buffer = config.get('liquidation_buffer', 0.05) + self.liquidation_buffer = config.get("liquidation_buffer", 0.05) # Deep merge ft_has with default ft_has options self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default)) if self.trading_mode == TradingMode.FUTURES: self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has) - if exchange_conf.get('_ft_has_params'): - self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'), - self._ft_has) + if exchange_conf.get("_ft_has_params"): + self._ft_has = deep_merge_dicts(exchange_conf.get("_ft_has_params"), self._ft_has) logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has) # Assign this directly for easy access - self._ohlcv_partial_candle = self._ft_has['ohlcv_partial_candle'] + self._ohlcv_partial_candle = self._ft_has["ohlcv_partial_candle"] - self._trades_pagination = self._ft_has['trades_pagination'] - self._trades_pagination_arg = self._ft_has['trades_pagination_arg'] + self._trades_pagination = self._ft_has["trades_pagination"] + self._trades_pagination_arg = self._ft_has["trades_pagination_arg"] # Initialize ccxt objects ccxt_config = self._ccxt_config - ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config) - ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config) + ccxt_config = deep_merge_dicts(exchange_conf.get("ccxt_config", {}), ccxt_config) + ccxt_config = deep_merge_dicts(exchange_conf.get("ccxt_sync_config", {}), ccxt_config) self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config) ccxt_async_config = self._ccxt_config - ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), - ccxt_async_config) - ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}), - ccxt_async_config) - self._api_async = self._init_ccxt( - exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config) + ccxt_async_config = deep_merge_dicts( + exchange_conf.get("ccxt_config", {}), ccxt_async_config + ) + ccxt_async_config = deep_merge_dicts( + exchange_conf.get("ccxt_async_config", {}), ccxt_async_config + ) + self._api_async = self._init_ccxt(exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config) logger.info(f'Using Exchange "{self.name}"') self.required_candle_call_count = 1 @@ -193,13 +235,15 @@ class Exchange: # Initial markets load self._load_markets() self.validate_config(config) - self._startup_candle_count: int = config.get('startup_candle_count', 0) + self._startup_candle_count: int = config.get("startup_candle_count", 0) self.required_candle_call_count = self.validate_required_startup_candles( - self._startup_candle_count, config.get('timeframe', '')) + self._startup_candle_count, config.get("timeframe", "") + ) # Converts the interval provided in minutes in config to seconds - self.markets_refresh_interval: int = exchange_conf.get( - "markets_refresh_interval", 60) * 60 * 1000 + self.markets_refresh_interval: int = ( + exchange_conf.get("markets_refresh_interval", 60) * 60 * 1000 + ) if self.trading_mode != TradingMode.SPOT and load_leverage_tiers: self.fill_leverage_tiers() @@ -213,8 +257,11 @@ class Exchange: def close(self): logger.debug("Exchange object destroyed, closing async loop") - if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) - and self._api_async.session): + if ( + self._api_async + and inspect.iscoroutinefunction(self._api_async.close) + and self._api_async.session + ): logger.debug("Closing async ccxt session.") self.loop.run_until_complete(self._api_async.close()) if self.loop and not self.loop.is_closed(): @@ -227,48 +274,52 @@ class Exchange: def validate_config(self, config): # Check if timeframe is available - self.validate_timeframes(config.get('timeframe')) + self.validate_timeframes(config.get("timeframe")) # Check if all pairs are available - self.validate_stakecurrency(config['stake_currency']) - if not config['exchange'].get('skip_pair_validation'): - self.validate_pairs(config['exchange']['pair_whitelist']) - self.validate_ordertypes(config.get('order_types', {})) - self.validate_order_time_in_force(config.get('order_time_in_force', {})) + self.validate_stakecurrency(config["stake_currency"]) + if not config["exchange"].get("skip_pair_validation"): + self.validate_pairs(config["exchange"]["pair_whitelist"]) + self.validate_ordertypes(config.get("order_types", {})) + self.validate_order_time_in_force(config.get("order_time_in_force", {})) self.validate_trading_mode_and_margin_mode(self.trading_mode, self.margin_mode) - self.validate_pricing(config['exit_pricing']) - self.validate_pricing(config['entry_pricing']) + self.validate_pricing(config["exit_pricing"]) + self.validate_pricing(config["entry_pricing"]) - def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt, *, - ccxt_kwargs: Dict) -> ccxt.Exchange: + def _init_ccxt( + self, + exchange_config: Dict[str, Any], + ccxt_module: CcxtModuleType = ccxt, + *, + ccxt_kwargs: Dict, + ) -> ccxt.Exchange: """ Initialize ccxt with given config and return valid ccxt instance. """ # Find matching class for the given exchange name - name = exchange_config['name'] + name = exchange_config["name"] if not is_exchange_known_ccxt(name, ccxt_module): - raise OperationalException(f'Exchange {name} is not supported by ccxt') + raise OperationalException(f"Exchange {name} is not supported by ccxt") ex_config = { - 'apiKey': exchange_config.get('key'), - 'secret': exchange_config.get('secret'), - 'password': exchange_config.get('password'), - 'uid': exchange_config.get('uid', ''), + "apiKey": exchange_config.get("key"), + "secret": exchange_config.get("secret"), + "password": exchange_config.get("password"), + "uid": exchange_config.get("uid", ""), } if ccxt_kwargs: - logger.info('Applying additional ccxt config: %s', ccxt_kwargs) + logger.info("Applying additional ccxt config: %s", ccxt_kwargs) if self._ccxt_params: # Inject static options after the above output to not confuse users. ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs) if ccxt_kwargs: ex_config.update(ccxt_kwargs) try: - api = getattr(ccxt_module, name.lower())(ex_config) except (KeyError, AttributeError) as e: - raise OperationalException(f'Exchange {name} is not supported') from e + raise OperationalException(f"Exchange {name} is not supported") from e except ccxt.BaseError as e: raise OperationalException(f"Initialization of ccxt failed. Reason: {e}") from e @@ -278,17 +329,9 @@ class Exchange: def _ccxt_config(self) -> Dict: # Parameters to add directly to ccxt sync/async initialization. if self.trading_mode == TradingMode.MARGIN: - return { - "options": { - "defaultType": "margin" - } - } + return {"options": {"defaultType": "margin"}} elif self.trading_mode == TradingMode.FUTURES: - return { - "options": { - "defaultType": self._ft_has["ccxt_futures_name"] - } - } + return {"options": {"defaultType": self._ft_has["ccxt_futures_name"]}} else: return {} @@ -328,13 +371,14 @@ class Exchange: pass def _log_exchange_response(self, endpoint: str, response, *, add_info=None) -> None: - """ Log exchange responses """ + """Log exchange responses""" if self.log_responses: add_info_str = "" if add_info is None else f" {add_info}: " logger.info(f"API {endpoint}: {add_info_str}{response}") def ohlcv_candle_limit( - self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: + self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None + ) -> int: """ Exchange ohlcv candle limit Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits @@ -345,16 +389,22 @@ class Exchange: :param since_ms: Starting timestamp :return: Candle limit as integer """ - return int(self._ft_has.get('ohlcv_candle_limit_per_timeframe', {}).get( - timeframe, self._ft_has.get('ohlcv_candle_limit'))) + return int( + self._ft_has.get("ohlcv_candle_limit_per_timeframe", {}).get( + timeframe, self._ft_has.get("ohlcv_candle_limit") + ) + ) def get_markets( - self, - base_currencies: Optional[List[str]] = None, - quote_currencies: Optional[List[str]] = None, - spot_only: bool = False, margin_only: bool = False, futures_only: bool = False, - tradable_only: bool = True, - active_only: bool = False) -> Dict[str, Any]: + self, + base_currencies: Optional[List[str]] = None, + quote_currencies: Optional[List[str]] = None, + spot_only: bool = False, + margin_only: bool = False, + futures_only: bool = False, + tradable_only: bool = True, + active_only: bool = False, + ) -> Dict[str, Any]: """ Return exchange ccxt markets, filtered out by base currency and quote currency if this was requested in parameters. @@ -364,9 +414,9 @@ class Exchange: raise OperationalException("Markets were not loaded.") if base_currencies: - markets = {k: v for k, v in markets.items() if v['base'] in base_currencies} + markets = {k: v for k, v in markets.items() if v["base"] in base_currencies} if quote_currencies: - markets = {k: v for k, v in markets.items() if v['quote'] in quote_currencies} + markets = {k: v for k, v in markets.items() if v["quote"] in quote_currencies} if tradable_only: markets = {k: v for k, v in markets.items() if self.market_is_tradable(v)} if spot_only: @@ -384,27 +434,27 @@ class Exchange: Return a list of supported quote currencies """ markets = self.markets - return sorted(set([x['quote'] for _, x in markets.items()])) + return sorted(set([x["quote"] for _, x in markets.items()])) def get_pair_quote_currency(self, pair: str) -> str: - """ Return a pair's quote currency (base/quote:settlement) """ - return self.markets.get(pair, {}).get('quote', '') + """Return a pair's quote currency (base/quote:settlement)""" + return self.markets.get(pair, {}).get("quote", "") def get_pair_base_currency(self, pair: str) -> str: - """ Return a pair's base currency (base/quote:settlement) """ - return self.markets.get(pair, {}).get('base', '') + """Return a pair's base currency (base/quote:settlement)""" + return self.markets.get(pair, {}).get("base", "") def market_is_future(self, market: Dict[str, Any]) -> bool: return ( - market.get(self._ft_has["ccxt_futures_name"], False) is True and - market.get('linear', False) is True + market.get(self._ft_has["ccxt_futures_name"], False) is True + and market.get("linear", False) is True ) def market_is_spot(self, market: Dict[str, Any]) -> bool: - return market.get('spot', False) is True + return market.get("spot", False) is True def market_is_margin(self, market: Dict[str, Any]) -> bool: - return market.get('margin', False) is True + return market.get("margin", False) is True def market_is_tradable(self, market: Dict[str, Any]) -> bool: """ @@ -412,14 +462,18 @@ class Exchange: Ensures that Configured mode aligns to """ return ( - market.get('quote', None) is not None - and market.get('base', None) is not None - and (self.precisionMode != TICK_SIZE - # Too low precision will falsify calculations - or market.get('precision', {}).get('price') > 1e-11) - and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market)) - or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market)) - or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market))) + market.get("quote", None) is not None + and market.get("base", None) is not None + and ( + self.precisionMode != TICK_SIZE + # Too low precision will falsify calculations + or market.get("precision", {}).get("price") > 1e-11 + ) + and ( + (self.trading_mode == TradingMode.SPOT and self.market_is_spot(market)) + or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market)) + or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market)) + ) ) def klines(self, pair_interval: PairWithTimeframe, copy: bool = True) -> DataFrame: @@ -434,37 +488,35 @@ class Exchange: contract_size: float = 1.0 if not market: return None - if market.get('contractSize') is not None: + if market.get("contractSize") is not None: # ccxt has contractSize in markets as string - contract_size = float(market['contractSize']) + contract_size = float(market["contractSize"]) return contract_size else: return 1 def _trades_contracts_to_amount(self, trades: List) -> List: - if len(trades) > 0 and 'symbol' in trades[0]: - contract_size = self.get_contract_size(trades[0]['symbol']) + if len(trades) > 0 and "symbol" in trades[0]: + contract_size = self.get_contract_size(trades[0]["symbol"]) if contract_size != 1: for trade in trades: - trade['amount'] = trade['amount'] * contract_size + trade["amount"] = trade["amount"] * contract_size return trades def _order_contracts_to_amount(self, order: Dict) -> Dict: - if 'symbol' in order and order['symbol'] is not None: - contract_size = self.get_contract_size(order['symbol']) + if "symbol" in order and order["symbol"] is not None: + contract_size = self.get_contract_size(order["symbol"]) if contract_size != 1: - for prop in self._ft_has.get('order_props_in_contracts', []): + for prop in self._ft_has.get("order_props_in_contracts", []): if prop in order and order[prop] is not None: order[prop] = order[prop] * contract_size return order def _amount_to_contracts(self, pair: str, amount: float) -> float: - contract_size = self.get_contract_size(pair) return amount_to_contracts(amount, contract_size) def _contracts_to_amount(self, pair: str, num_contracts: float) -> float: - contract_size = self.get_contract_size(pair) return contracts_to_amount(num_contracts, contract_size) @@ -474,33 +526,33 @@ class Exchange: """ contract_size = self.get_contract_size(pair) - return amount_to_contract_precision(amount, self.get_precision_amount(pair), - self.precisionMode, contract_size) + return amount_to_contract_precision( + amount, self.get_precision_amount(pair), self.precisionMode, contract_size + ) def _load_async_markets(self, reload: bool = False) -> None: try: if self._api_async: - self.loop.run_until_complete( - self._api_async.load_markets(reload=reload, params={})) + self.loop.run_until_complete(self._api_async.load_markets(reload=reload, params={})) except (asyncio.TimeoutError, ccxt.BaseError) as e: - logger.warning('Could not load async markets. Reason: %s', e) + logger.warning("Could not load async markets. Reason: %s", e) return def _load_markets(self) -> None: - """ Initialize markets both sync and async """ + """Initialize markets both sync and async""" try: self._markets = self._api.load_markets(params={}) self._load_async_markets() self._last_markets_refresh = dt_ts() - if self._ft_has['needs_trading_fees']: + if self._ft_has["needs_trading_fees"]: self._trading_fees = self.fetch_trading_fees() except ccxt.BaseError: - logger.exception('Unable to initialize markets.') + logger.exception("Unable to initialize markets.") def reload_markets(self, force: bool = False) -> None: - """Reload markets both sync and async if refresh interval has passed """ + """Reload markets both sync and async if refresh interval has passed""" # Check whether markets have to be reloaded if ( not force @@ -528,14 +580,15 @@ class Exchange: """ if not self._markets: raise OperationalException( - 'Could not load markets, therefore cannot start. ' - 'Please investigate the above error for more details.' + "Could not load markets, therefore cannot start. " + "Please investigate the above error for more details." ) quote_currencies = self.get_quote_currencies() if stake_currency not in quote_currencies: raise ConfigurationError( f"{stake_currency} is not available as stake on {self.name}. " - f"Available currencies are: {', '.join(quote_currencies)}") + f"Available currencies are: {', '.join(quote_currencies)}" + ) def validate_pairs(self, pairs: List[str]) -> None: """ @@ -546,7 +599,7 @@ class Exchange: """ if not self.markets: - logger.warning('Unable to validate pairs (assuming they are correct).') + logger.warning("Unable to validate pairs (assuming they are correct).") return extended_pairs = expand_pairlist(pairs, list(self.markets), keep_invalid=True) invalid_pairs = [] @@ -554,8 +607,9 @@ class Exchange: # Note: ccxt has BaseCurrency/QuoteCurrency format for pairs if self.markets and pair not in self.markets: raise OperationalException( - f'Pair {pair} is not available on {self.name} {self.trading_mode.value}. ' - f'Please remove {pair} from your whitelist.') + f"Pair {pair} is not available on {self.name} {self.trading_mode.value}. " + f"Please remove {pair} from your whitelist." + ) # From ccxt Documentation: # markets.info: An associative array of non-common market properties, @@ -563,27 +617,33 @@ class Exchange: # The internal info array is different for each particular market, # its contents depend on the exchange. # It can also be a string or similar ... so we need to verify that first. - elif (isinstance(self.markets[pair].get('info'), dict) - and self.markets[pair].get('info', {}).get('prohibitedIn', False)): + elif isinstance(self.markets[pair].get("info"), dict) and self.markets[pair].get( + "info", {} + ).get("prohibitedIn", False): # Warn users about restricted pairs in whitelist. # We cannot determine reliably if Users are affected. - logger.warning(f"Pair {pair} is restricted for some users on this exchange." - f"Please check if you are impacted by this restriction " - f"on the exchange and eventually remove {pair} from your whitelist.") - if (self._config['stake_currency'] and - self.get_pair_quote_currency(pair) != self._config['stake_currency']): + logger.warning( + f"Pair {pair} is restricted for some users on this exchange." + f"Please check if you are impacted by this restriction " + f"on the exchange and eventually remove {pair} from your whitelist." + ) + if ( + self._config["stake_currency"] + and self.get_pair_quote_currency(pair) != self._config["stake_currency"] + ): invalid_pairs.append(pair) if invalid_pairs: raise OperationalException( f"Stake-currency '{self._config['stake_currency']}' not compatible with " - f"pair-whitelist. Please remove the following pairs: {invalid_pairs}") + f"pair-whitelist. Please remove the following pairs: {invalid_pairs}" + ) def get_valid_pair_combination(self, curr_1: str, curr_2: str) -> str: """ Get valid pair combination of curr_1 and curr_2 by trying both combinations. """ for pair in [f"{curr_1}/{curr_2}", f"{curr_2}/{curr_1}"]: - if pair in self.markets and self.markets[pair].get('active'): + if pair in self.markets and self.markets[pair].get("active"): return pair raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.") @@ -598,15 +658,17 @@ class Exchange: raise OperationalException( f"The ccxt library does not provide the list of timeframes " f"for the exchange {self.name} and this exchange " - f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}") + f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}" + ) if timeframe and (timeframe not in self.timeframes): raise ConfigurationError( - f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}") + f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}" + ) if ( timeframe - and self._config['runmode'] != RunMode.UTIL_EXCHANGE + and self._config["runmode"] != RunMode.UTIL_EXCHANGE and timeframe_to_minutes(timeframe) < 1 ): raise ConfigurationError("Timeframes < 1m are currently not supported by Freqtrade.") @@ -615,48 +677,49 @@ class Exchange: """ Checks if order-types configured in strategy/config are supported """ - if any(v == 'market' for k, v in order_types.items()): - if not self.exchange_has('createMarketOrder'): - raise ConfigurationError( - f'Exchange {self.name} does not support market orders.') + if any(v == "market" for k, v in order_types.items()): + if not self.exchange_has("createMarketOrder"): + raise ConfigurationError(f"Exchange {self.name} does not support market orders.") self.validate_stop_ordertypes(order_types) def validate_stop_ordertypes(self, order_types: Dict) -> None: """ Validate stoploss order types """ - if (order_types.get("stoploss_on_exchange") - and not self._ft_has.get("stoploss_on_exchange", False)): - raise ConfigurationError( - f'On exchange stoploss is not supported for {self.name}.' - ) + if order_types.get("stoploss_on_exchange") and not self._ft_has.get( + "stoploss_on_exchange", False + ): + raise ConfigurationError(f"On exchange stoploss is not supported for {self.name}.") if self.trading_mode == TradingMode.FUTURES: - price_mapping = self._ft_has.get('stop_price_type_value_mapping', {}).keys() + price_mapping = self._ft_has.get("stop_price_type_value_mapping", {}).keys() if ( order_types.get("stoploss_on_exchange", False) is True - and 'stoploss_price_type' in order_types - and order_types['stoploss_price_type'] not in price_mapping + and "stoploss_price_type" in order_types + and order_types["stoploss_price_type"] not in price_mapping ): raise ConfigurationError( - f'On exchange stoploss price type is not supported for {self.name}.' + f"On exchange stoploss price type is not supported for {self.name}." ) def validate_pricing(self, pricing: Dict) -> None: - if pricing.get('use_order_book', False) and not self.exchange_has('fetchL2OrderBook'): - raise ConfigurationError(f'Orderbook not available for {self.name}.') - if (not pricing.get('use_order_book', False) and ( - not self.exchange_has('fetchTicker') - or not self._ft_has['tickers_have_price'])): - raise ConfigurationError(f'Ticker pricing not available for {self.name}.') + if pricing.get("use_order_book", False) and not self.exchange_has("fetchL2OrderBook"): + raise ConfigurationError(f"Orderbook not available for {self.name}.") + if not pricing.get("use_order_book", False) and ( + not self.exchange_has("fetchTicker") or not self._ft_has["tickers_have_price"] + ): + raise ConfigurationError(f"Ticker pricing not available for {self.name}.") def validate_order_time_in_force(self, order_time_in_force: Dict) -> None: """ Checks if order time in force configured in strategy/config are supported """ - if any(v.upper() not in self._ft_has["order_time_in_force"] - for k, v in order_time_in_force.items()): + if any( + v.upper() not in self._ft_has["order_time_in_force"] + for k, v in order_time_in_force.items() + ): raise ConfigurationError( - f'Time in force policies are not supported for {self.name} yet.') + f"Time in force policies are not supported for {self.name} yet." + ) def validate_required_startup_candles(self, startup_candles: int, timeframe: str) -> int: """ @@ -665,36 +728,41 @@ class Exchange: """ candle_limit = self.ohlcv_candle_limit( - timeframe, self._config['candle_type_def'], - dt_ts(date_minus_candles(timeframe, startup_candles)) - if timeframe else None) + timeframe, + self._config["candle_type_def"], + dt_ts(date_minus_candles(timeframe, startup_candles)) if timeframe else None, + ) # Require one more candle - to account for the still open candle. candle_count = startup_candles + 1 # Allow 5 calls to the exchange per pair required_candle_call_count = int( - (candle_count / candle_limit) + (0 if candle_count % candle_limit == 0 else 1)) - if self._ft_has['ohlcv_has_history']: - + (candle_count / candle_limit) + (0 if candle_count % candle_limit == 0 else 1) + ) + if self._ft_has["ohlcv_has_history"]: if required_candle_call_count > 5: # Only allow 5 calls per pair to somewhat limit the impact raise ConfigurationError( f"This strategy requires {startup_candles} candles to start, " "which is more than 5x " - f"the amount of candles {self.name} provides for {timeframe}.") + f"the amount of candles {self.name} provides for {timeframe}." + ) elif required_candle_call_count > 1: raise ConfigurationError( f"This strategy requires {startup_candles} candles to start, which is more than " - f"the amount of candles {self.name} provides for {timeframe}.") + f"the amount of candles {self.name} provides for {timeframe}." + ) if required_candle_call_count > 1: - logger.warning(f"Using {required_candle_call_count} calls to get OHLCV. " - f"This can result in slower operations for the bot. Please check " - f"if you really need {startup_candles} candles for your strategy") + logger.warning( + f"Using {required_candle_call_count} calls to get OHLCV. " + f"This can result in slower operations for the bot. Please check " + f"if you really need {startup_candles} candles for your strategy" + ) return required_candle_call_count def validate_trading_mode_and_margin_mode( self, trading_mode: TradingMode, - margin_mode: Optional[MarginMode] # Only None when trading_mode = TradingMode.SPOT + margin_mode: Optional[MarginMode], # Only None when trading_mode = TradingMode.SPOT ): """ Checks if freqtrade can perform trades using the configured @@ -723,8 +791,8 @@ class Exchange: :param endpoint: Name of endpoint (e.g. 'fetchOHLCV', 'fetchTickers') :return: bool """ - if endpoint in self._ft_has.get('exchange_has_overrides', {}): - return self._ft_has['exchange_has_overrides'][endpoint] + if endpoint in self._ft_has.get("exchange_has_overrides", {}): + return self._ft_has["exchange_has_overrides"][endpoint] return endpoint in self._api.has and self._api.has[endpoint] def get_precision_amount(self, pair: str) -> Optional[float]: @@ -733,7 +801,7 @@ class Exchange: :param pair: Pair to get precision for :return: precision for amount or None. Must be used in combination with precisionMode """ - return self.markets.get(pair, {}).get('precision', {}).get('amount', None) + return self.markets.get(pair, {}).get("precision", {}).get("amount", None) def get_precision_price(self, pair: str) -> Optional[float]: """ @@ -741,7 +809,7 @@ class Exchange: :param pair: Pair to get precision for :return: precision for price or None. Must be used in combination with precisionMode """ - return self.markets.get(pair, {}).get('precision', {}).get('price', None) + return self.markets.get(pair, {}).get("precision", {}).get("price", None) def amount_to_precision(self, pair: str, amount: float) -> float: """ @@ -756,35 +824,33 @@ class Exchange: The default price_rounding_mode in conf is ROUND. For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts. """ - return price_to_precision(price, self.get_precision_price(pair), - self.precisionMode, rounding_mode=rounding_mode) + return price_to_precision( + price, self.get_precision_price(pair), self.precisionMode, rounding_mode=rounding_mode + ) def price_get_one_pip(self, pair: str, price: float) -> float: """ Gets the "1 pip" value for this pair. Used in PriceFilter to calculate the 1pip movements. """ - precision = self.markets[pair]['precision']['price'] + precision = self.markets[pair]["precision"]["price"] if self.precisionMode == TICK_SIZE: return precision else: return 1 / pow(10, precision) def get_min_pair_stake_amount( - self, - pair: str, - price: float, - stoploss: float, - leverage: Optional[float] = 1.0 + self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0 ) -> Optional[float]: - return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage) + return self._get_stake_amount_limit(pair, price, stoploss, "min", leverage) def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float: - max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max', leverage) + max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, "max", leverage) if max_stake_amount is None: # * Should never be executed - raise OperationalException(f'{self.name}.get_max_pair_stake_amount should' - 'never set max_stake_amount to None') + raise OperationalException( + f"{self.name}.get_max_pair_stake_amount should never set max_stake_amount to None" + ) return max_stake_amount def _get_stake_amount_limit( @@ -792,11 +858,10 @@ class Exchange: pair: str, price: float, stoploss: float, - limit: Literal['min', 'max'], - leverage: Optional[float] = 1.0 + limit: Literal["min", "max"], + leverage: Optional[float] = 1.0, ) -> Optional[float]: - - isMin = limit == 'min' + isMin = limit == "min" try: market = self.markets[pair] @@ -805,11 +870,10 @@ class Exchange: if isMin: # reserve some percent defined in config (5% default) + stoploss - margin_reserve: float = 1.0 + self._config.get('amount_reserve_percent', - DEFAULT_AMOUNT_RESERVE_PERCENT) - stoploss_reserve = ( - margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5 + margin_reserve: float = 1.0 + self._config.get( + "amount_reserve_percent", DEFAULT_AMOUNT_RESERVE_PERCENT ) + stoploss_reserve = margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5 # it should not be more than 50% stoploss_reserve = max(min(stoploss_reserve, 1.5), 1) else: @@ -817,26 +881,25 @@ class Exchange: stoploss_reserve = 1.0 stake_limits = [] - limits = market['limits'] - if (limits['cost'][limit] is not None): + limits = market["limits"] + if limits["cost"][limit] is not None: stake_limits.append( - self._contracts_to_amount(pair, limits['cost'][limit]) * stoploss_reserve + self._contracts_to_amount(pair, limits["cost"][limit]) * stoploss_reserve ) - if (limits['amount'][limit] is not None): + if limits["amount"][limit] is not None: stake_limits.append( - self._contracts_to_amount(pair, limits['amount'][limit]) * price * margin_reserve + self._contracts_to_amount(pair, limits["amount"][limit]) * price * margin_reserve ) if not stake_limits: - return None if isMin else float('inf') + return None if isMin else float("inf") # The value returned should satisfy both limits: for amount (base currency) and # for cost (quote, stake currency), so max() is used here. # See also #2575 at github. return self._get_stake_amount_considering_leverage( - max(stake_limits) if isMin else min(stake_limits), - leverage or 1.0 + max(stake_limits) if isMin else min(stake_limits), leverage or 1.0 ) def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float: @@ -850,39 +913,48 @@ class Exchange: # Dry-run methods - def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float, - rate: float, leverage: float, params: Optional[Dict] = None, - stop_loss: bool = False) -> Dict[str, Any]: + def create_dry_run_order( + self, + pair: str, + ordertype: str, + side: str, + amount: float, + rate: float, + leverage: float, + params: Optional[Dict] = None, + stop_loss: bool = False, + ) -> Dict[str, Any]: now = dt_now() - order_id = f'dry_run_{side}_{pair}_{now.timestamp()}' + order_id = f"dry_run_{side}_{pair}_{now.timestamp()}" # Rounding here must respect to contract sizes _amount = self._contracts_to_amount( - pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))) + pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) + ) dry_order: Dict[str, Any] = { - 'id': order_id, - 'symbol': pair, - 'price': rate, - 'average': rate, - 'amount': _amount, - 'cost': _amount * rate, - 'type': ordertype, - 'side': side, - 'filled': 0, - 'remaining': _amount, - 'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), - 'timestamp': dt_ts(now), - 'status': "open", - 'fee': None, - 'info': {}, - 'leverage': leverage + "id": order_id, + "symbol": pair, + "price": rate, + "average": rate, + "amount": _amount, + "cost": _amount * rate, + "type": ordertype, + "side": side, + "filled": 0, + "remaining": _amount, + "datetime": now.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), + "timestamp": dt_ts(now), + "status": "open", + "fee": None, + "info": {}, + "leverage": leverage, } if stop_loss: dry_order["info"] = {"stopPrice": dry_order["price"]} - dry_order[self._ft_has['stop_price_prop']] = dry_order["price"] + dry_order[self._ft_has["stop_price_prop"]] = dry_order["price"] # Workaround to avoid filling stoploss orders immediately dry_order["ft_order_type"] = "stoploss" orderbook: Optional[OrderBook] = None - if self.exchange_has('fetchL2OrderBook'): + if self.exchange_has("fetchL2OrderBook"): orderbook = self.fetch_l2_order_book(pair, 20) if ordertype == "limit" and orderbook: # Allow a 1% price difference @@ -890,24 +962,28 @@ class Exchange: if self._dry_is_price_crossed(pair, side, rate, orderbook, allowed_diff): logger.info( f"Converted order {pair} to market order due to price {rate} crossing spread " - f"by more than {allowed_diff:.2%}.") + f"by more than {allowed_diff:.2%}." + ) dry_order["type"] = "market" if dry_order["type"] == "market" and not dry_order.get("ft_order_type"): # Update market order pricing average = self.get_dry_market_fill_price(pair, side, amount, rate, orderbook) - dry_order.update({ - 'average': average, - 'filled': _amount, - 'remaining': 0.0, - 'status': "closed", - 'cost': (dry_order['amount'] * average) - }) + dry_order.update( + { + "average": average, + "filled": _amount, + "remaining": 0.0, + "status": "closed", + "cost": (dry_order["amount"] * average), + } + ) # market orders will always incurr taker fees - dry_order = self.add_dry_order_fee(pair, dry_order, 'taker') + dry_order = self.add_dry_order_fee(pair, dry_order, "taker") dry_order = self.check_dry_limit_order_filled( - dry_order, immediate=True, orderbook=orderbook) + dry_order, immediate=True, orderbook=orderbook + ) self._dry_run_open_orders[dry_order["id"]] = dry_order # Copy order and close it - so the returned order is open unless it's a market order @@ -920,26 +996,29 @@ class Exchange: taker_or_maker: MakerTaker, ) -> Dict[str, Any]: fee = self.get_fee(pair, taker_or_maker=taker_or_maker) - dry_order.update({ - 'fee': { - 'currency': self.get_pair_quote_currency(pair), - 'cost': dry_order['cost'] * fee, - 'rate': fee + dry_order.update( + { + "fee": { + "currency": self.get_pair_quote_currency(pair), + "cost": dry_order["cost"] * fee, + "rate": fee, + } } - }) + ) return dry_order - def get_dry_market_fill_price(self, pair: str, side: str, amount: float, rate: float, - orderbook: Optional[OrderBook]) -> float: + def get_dry_market_fill_price( + self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook] + ) -> float: """ Get the market order fill price based on orderbook interpolation """ - if self.exchange_has('fetchL2OrderBook'): + if self.exchange_has("fetchL2OrderBook"): if not orderbook: orderbook = self.fetch_l2_order_book(pair, 20) - ob_type: OBLiteral = 'asks' if side == 'buy' else 'bids' + ob_type: OBLiteral = "asks" if side == "buy" else "bids" slippage = 0.05 - max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage)) + max_slippage_val = rate * ((1 + slippage) if side == "buy" else (1 - slippage)) remaining_amount = amount filled_value = 0.0 @@ -962,7 +1041,7 @@ class Exchange: filled_value += remaining_amount * book_entry_price forecast_avg_filled_price = max(filled_value, 0) / amount # Limit max. slippage to specified value - if side == 'buy': + if side == "buy": forecast_avg_filled_price = min(forecast_avg_filled_price, max_slippage_val) else: @@ -972,19 +1051,25 @@ class Exchange: return rate - def _dry_is_price_crossed(self, pair: str, side: str, limit: float, - orderbook: Optional[OrderBook] = None, offset: float = 0.0) -> bool: - if not self.exchange_has('fetchL2OrderBook'): + def _dry_is_price_crossed( + self, + pair: str, + side: str, + limit: float, + orderbook: Optional[OrderBook] = None, + offset: float = 0.0, + ) -> bool: + if not self.exchange_has("fetchL2OrderBook"): return True if not orderbook: orderbook = self.fetch_l2_order_book(pair, 1) try: - if side == 'buy': - price = orderbook['asks'][0][0] + if side == "buy": + price = orderbook["asks"][0][0] if limit * (1 - offset) >= price: return True else: - price = orderbook['bids'][0][0] + price = orderbook["bids"][0][0] if limit * (1 + offset) <= price: return True except IndexError: @@ -993,26 +1078,30 @@ class Exchange: return False def check_dry_limit_order_filled( - self, order: Dict[str, Any], immediate: bool = False, - orderbook: Optional[OrderBook] = None) -> Dict[str, Any]: + self, order: Dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None + ) -> Dict[str, Any]: """ Check dry-run limit order fill and update fee (if it filled). """ - if (order['status'] != "closed" - and order['type'] in ["limit"] - and not order.get('ft_order_type')): - pair = order['symbol'] - if self._dry_is_price_crossed(pair, order['side'], order['price'], orderbook): - order.update({ - 'status': 'closed', - 'filled': order['amount'], - 'remaining': 0, - }) + if ( + order["status"] != "closed" + and order["type"] in ["limit"] + and not order.get("ft_order_type") + ): + pair = order["symbol"] + if self._dry_is_price_crossed(pair, order["side"], order["price"], orderbook): + order.update( + { + "status": "closed", + "filled": order["amount"], + "remaining": 0, + } + ) self.add_dry_order_fee( pair, order, - 'taker' if immediate else 'maker', + "taker" if immediate else "maker", ) return order @@ -1028,14 +1117,16 @@ class Exchange: return order except KeyError as e: from freqtrade.persistence import Order + order = Order.order_by_id(order_id) if order: - ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_prop']) + ccxt_order = order.to_ccxt_object(self._ft_has["stop_price_prop"]) self._dry_run_open_orders[order_id] = ccxt_order return ccxt_order # Gracefully handle errors with dry-run orders. raise InvalidOrderException( - f'Tried to get an invalid dry-run-order (id: {order_id}). Message: {e}') from e + f"Tried to get an invalid dry-run-order (id: {order_id}). Message: {e}" + ) from e # Order handling @@ -1050,20 +1141,20 @@ class Exchange: ordertype: str, leverage: float, reduceOnly: bool, - time_in_force: str = 'GTC', + time_in_force: str = "GTC", ) -> Dict: params = self._params.copy() - if time_in_force != 'GTC' and ordertype != 'market': - params.update({'timeInForce': time_in_force.upper()}) + if time_in_force != "GTC" and ordertype != "market": + params.update({"timeInForce": time_in_force.upper()}) if reduceOnly: - params.update({'reduceOnly': True}) + params.update({"reduceOnly": True}) return params def _order_needs_price(self, ordertype: str) -> bool: return ( - ordertype != 'market' + ordertype != "market" or self._api.options.get("createMarketBuyOrderRequiresPrice", False) - or self._ft_has.get('marketOrderRequiresPrice', False) + or self._ft_has.get("marketOrderRequiresPrice", False) ) def create_order( @@ -1076,11 +1167,12 @@ class Exchange: rate: float, leverage: float, reduceOnly: bool = False, - time_in_force: str = 'GTC', + time_in_force: str = "GTC", ) -> Dict: - if self._config['dry_run']: + if self._config["dry_run"]: dry_order = self.create_dry_run_order( - pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage) + pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage + ) return dry_order params = self._get_params(side, ordertype, leverage, reduceOnly, time_in_force) @@ -1102,32 +1194,35 @@ class Exchange: rate_for_order, params, ) - if order.get('status') is None: + if order.get("status") is None: # Map empty status to open. - order['status'] = 'open' + order["status"] = "open" - if order.get('type') is None: - order['type'] = ordertype + if order.get("type") is None: + order["type"] = ordertype - self._log_exchange_response('create_order', order) + self._log_exchange_response("create_order", order) order = self._order_contracts_to_amount(order) return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( - f'Insufficient funds to create {ordertype} {side} order on market {pair}. ' - f'Tried to {side} amount {amount} at rate {rate}.' - f'Message: {e}') from e + f"Insufficient funds to create {ordertype} {side} order on market {pair}. " + f"Tried to {side} amount {amount} at rate {rate}." + f"Message: {e}" + ) from e except ccxt.InvalidOrder as e: raise InvalidOrderException( - f'Could not create {ordertype} {side} order on market {pair}. ' - f'Tried to {side} amount {amount} at rate {rate}. ' - f'Message: {e}') from e + f"Could not create {ordertype} {side} order on market {pair}. " + f"Tried to {side} amount {amount} at rate {rate}. " + f"Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not place {side} order due to {e.__class__.__name__}. Message: {e}') from e + f"Could not place {side} order due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1136,17 +1231,15 @@ class Exchange: Verify stop_loss against stoploss-order value (limit or price) Returns True if adjustment is necessary. """ - if not self._ft_has.get('stoploss_on_exchange'): + if not self._ft_has.get("stoploss_on_exchange"): raise OperationalException(f"stoploss is not implemented for {self.name}.") - price_param = self._ft_has['stop_price_prop'] - return ( - order.get(price_param, None) is None - or ((side == "sell" and stop_loss > float(order[price_param])) or - (side == "buy" and stop_loss < float(order[price_param]))) + price_param = self._ft_has["stop_price_prop"] + return order.get(price_param, None) is None or ( + (side == "sell" and stop_loss > float(order[price_param])) + or (side == "buy" and stop_loss < float(order[price_param])) ) def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]: - available_order_Types: Dict[str, str] = self._ft_has["stoploss_order_types"] if user_order_type in available_order_Types.keys(): @@ -1159,14 +1252,13 @@ class Exchange: def _get_stop_limit_rate(self, stop_price: float, order_types: Dict, side: str) -> float: # Limit price threshold: As limit price should always be below stop-price - limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99) + limit_price_pct = order_types.get("stoploss_on_exchange_limit_ratio", 0.99) if side == "sell": limit_rate = stop_price * limit_price_pct else: limit_rate = stop_price * (2 - limit_price_pct) - bad_stop_price = ((stop_price < limit_rate) if side == - "sell" else (stop_price > limit_rate)) + bad_stop_price = (stop_price < limit_rate) if side == "sell" else (stop_price > limit_rate) # Ensure rate is less than stop price if bad_stop_price: # This can for example happen if the stop / liquidation price is set to 0 @@ -1177,18 +1269,25 @@ class Exchange: "In stoploss limit order, stop price should be more than limit price. " f"Stop price: {stop_price}, Limit price: {limit_rate}, " f"Limit Price pct: {limit_price_pct}" - ) + ) return limit_rate def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: params = self._params.copy() # Verify if stopPrice works for your exchange, else configure stop_price_param - params.update({self._ft_has['stop_price_param']: stop_price}) + params.update({self._ft_has["stop_price_param"]: stop_price}) return params @retrier(retries=0) - def create_stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict, - side: BuySell, leverage: float) -> Dict: + def create_stoploss( + self, + pair: str, + amount: float, + stop_price: float, + order_types: Dict, + side: BuySell, + leverage: float, + ) -> Dict: """ creates a stoploss order. requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market @@ -1204,19 +1303,19 @@ class Exchange: WARNING: setting `stoploss_on_exchange` to True will NOT auto-enable stoploss on exchange. `stoploss_adjust` must still be implemented for this to work. """ - if not self._ft_has['stoploss_on_exchange']: + if not self._ft_has["stoploss_on_exchange"]: raise OperationalException(f"stoploss is not implemented for {self.name}.") - user_order_type = order_types.get('stoploss', 'market') + user_order_type = order_types.get("stoploss", "market") ordertype, user_order_type = self._get_stop_order_type(user_order_type) - round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP + round_mode = ROUND_DOWN if side == "buy" else ROUND_UP stop_price_norm = self.price_to_precision(pair, stop_price, rounding_mode=round_mode) limit_rate = None - if user_order_type == 'limit': + if user_order_type == "limit": limit_rate = self._get_stop_limit_rate(stop_price, order_types, side) limit_rate = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode) - if self._config['dry_run']: + if self._config["dry_run"]: dry_order = self.create_dry_run_order( pair, ordertype, @@ -1229,43 +1328,55 @@ class Exchange: return dry_order try: - params = self._get_stop_params(side=side, ordertype=ordertype, - stop_price=stop_price_norm) + params = self._get_stop_params( + side=side, ordertype=ordertype, stop_price=stop_price_norm + ) if self.trading_mode == TradingMode.FUTURES: - params['reduceOnly'] = True - if 'stoploss_price_type' in order_types and 'stop_price_type_field' in self._ft_has: - price_type = self._ft_has['stop_price_type_value_mapping'][ - order_types.get('stoploss_price_type', PriceType.LAST)] - params[self._ft_has['stop_price_type_field']] = price_type + params["reduceOnly"] = True + if "stoploss_price_type" in order_types and "stop_price_type_field" in self._ft_has: + price_type = self._ft_has["stop_price_type_value_mapping"][ + order_types.get("stoploss_price_type", PriceType.LAST) + ] + params[self._ft_has["stop_price_type_field"]] = price_type amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) self._lev_prep(pair, leverage, side, accept_fail=True) - order = self._api.create_order(symbol=pair, type=ordertype, side=side, - amount=amount, price=limit_rate, params=params) - self._log_exchange_response('create_stoploss_order', order) + order = self._api.create_order( + symbol=pair, + type=ordertype, + side=side, + amount=amount, + price=limit_rate, + params=params, + ) + self._log_exchange_response("create_stoploss_order", order) order = self._order_contracts_to_amount(order) - logger.info(f"stoploss {user_order_type} order added for {pair}. " - f"stop price: {stop_price}. limit: {limit_rate}") + logger.info( + f"stoploss {user_order_type} order added for {pair}. " + f"stop price: {stop_price}. limit: {limit_rate}" + ) return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( - f'Insufficient funds to create {ordertype} {side} order on market {pair}. ' - f'Tried to {side} amount {amount} at rate {limit_rate} with ' - f'stop-price {stop_price_norm}. Message: {e}') from e + f"Insufficient funds to create {ordertype} {side} order on market {pair}. " + f"Tried to {side} amount {amount} at rate {limit_rate} with " + f"stop-price {stop_price_norm}. Message: {e}" + ) from e except (ccxt.InvalidOrder, ccxt.BadRequest, ccxt.OperationRejected) as e: # Errors: # `Order would trigger immediately.` raise InvalidOrderException( - f'Could not create {ordertype} {side} order on market {pair}. ' - f'Tried to {side} amount {amount} at rate {limit_rate} with ' - f'stop-price {stop_price_norm}. Message: {e}') from e + f"Could not create {ordertype} {side} order on market {pair}. " + f"Tried to {side} amount {amount} at rate {limit_rate} with " + f"stop-price {stop_price_norm}. Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f"Could not place stoploss order due to {e.__class__.__name__}. " - f"Message: {e}") from e + f"Could not place stoploss order due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1276,61 +1387,68 @@ class Exchange: """ try: order = self._api.fetch_open_order(order_id, pair, params=params) - self._log_exchange_response('fetch_open_order', order) + self._log_exchange_response("fetch_open_order", order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound: try: order = self._api.fetch_closed_order(order_id, pair, params=params) - self._log_exchange_response('fetch_closed_order', order) + self._log_exchange_response("fetch_closed_order", order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound as e: raise RetryableOrderError( - f'Order not found (pair: {pair} id: {order_id}). Message: {e}') from e + f"Order not found (pair: {pair} id: {order_id}). Message: {e}" + ) from e except ccxt.InvalidOrder as e: raise InvalidOrderException( - f'Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}') from e + f"Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get order due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get order due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier(retries=API_FETCH_ORDER_RETRY_COUNT) def fetch_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - if self._config['dry_run']: + if self._config["dry_run"]: return self.fetch_dry_run_order(order_id) if params is None: params = {} try: - if not self.exchange_has('fetchOrder'): + if not self.exchange_has("fetchOrder"): return self.fetch_order_emulated(order_id, pair, params) order = self._api.fetch_order(order_id, pair, params=params) - self._log_exchange_response('fetch_order', order) + self._log_exchange_response("fetch_order", order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound as e: raise RetryableOrderError( - f'Order not found (pair: {pair} id: {order_id}). Message: {e}') from e + f"Order not found (pair: {pair} id: {order_id}). Message: {e}" + ) from e except ccxt.InvalidOrder as e: raise InvalidOrderException( - f'Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}') from e + f"Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get order due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get order due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: return self.fetch_order(order_id, pair, params) - def fetch_order_or_stoploss_order(self, order_id: str, pair: str, - stoploss_order: bool = False) -> Dict: + def fetch_order_or_stoploss_order( + self, order_id: str, pair: str, stoploss_order: bool = False + ) -> Dict: """ Simple wrapper calling either fetch_order or fetch_stoploss_order depending on the stoploss_order parameter @@ -1348,16 +1466,15 @@ class Exchange: :param order: Order dict as returned from fetch_order() :return: True if order has been cancelled without being filled, False otherwise. """ - return (order.get('status') in NON_OPEN_EXCHANGE_STATES - and order.get('filled') == 0.0) + return order.get("status") in NON_OPEN_EXCHANGE_STATES and order.get("filled") == 0.0 @retrier def cancel_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - if self._config['dry_run']: + if self._config["dry_run"]: try: order = self.fetch_dry_run_order(order_id) - order.update({'status': 'canceled', 'filled': 0.0, 'remaining': order['amount']}) + order.update({"status": "canceled", "filled": 0.0, "remaining": order["amount"]}) return order except InvalidOrderException: return {} @@ -1366,29 +1483,30 @@ class Exchange: params = {} try: order = self._api.cancel_order(order_id, pair, params=params) - self._log_exchange_response('cancel_order', order) + self._log_exchange_response("cancel_order", order) order = self._order_contracts_to_amount(order) return order except ccxt.InvalidOrder as e: - raise InvalidOrderException( - f'Could not cancel order. Message: {e}') from e + raise InvalidOrderException(f"Could not cancel order. Message: {e}") from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not cancel order due to {e.__class__.__name__}. Message: {e}') from e + f"Could not cancel order due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def cancel_stoploss_order( - self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: + self, order_id: str, pair: str, params: Optional[Dict] = None + ) -> Dict: return self.cancel_order(order_id, pair, params) def is_cancel_order_result_suitable(self, corder) -> bool: if not isinstance(corder, dict): return False - required = ('fee', 'status', 'amount') + required = ("fee", "status", "amount") return all(corder.get(k, None) is not None for k in required) def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: @@ -1412,12 +1530,12 @@ class Exchange: except InvalidOrderException: logger.warning(f"Could not fetch cancelled order {order_id}.") order = { - 'id': order_id, - 'status': 'canceled', - 'amount': amount, - 'filled': 0.0, - 'fee': {}, - 'info': {} + "id": order_id, + "status": "canceled", + "amount": amount, + "filled": 0.0, + "fee": {}, + "info": {}, } return order @@ -1439,13 +1557,12 @@ class Exchange: order = self.fetch_stoploss_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled stoploss order {order_id}.") - order = {'id': order_id, 'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}} + order = {"id": order_id, "fee": {}, "status": "canceled", "amount": amount, "info": {}} return order @retrier def get_balances(self) -> dict: - try: balances = self._api.fetch_balance() # Remove additional info from ccxt results @@ -1459,7 +1576,8 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get balance due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1470,28 +1588,29 @@ class Exchange: If no pair is given, all positions are returned. :param pair: Pair for the query """ - if self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES: + if self._config["dry_run"] or self.trading_mode != TradingMode.FUTURES: return [] try: symbols = [] if pair: symbols.append(pair) positions: List[Dict] = self._api.fetch_positions(symbols) - self._log_exchange_response('fetch_positions', positions) + self._log_exchange_response("fetch_positions", positions) return positions except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get positions due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get positions due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def _fetch_orders_emulate(self, pair: str, since_ms: int) -> List[Dict]: orders = [] - if self.exchange_has('fetchClosedOrders'): + if self.exchange_has("fetchClosedOrders"): orders = self._api.fetch_closed_orders(pair, since=since_ms) - if self.exchange_has('fetchOpenOrders'): + if self.exchange_has("fetchOpenOrders"): orders_open = self._api.fetch_open_orders(pair, since=since_ms) orders.extend(orders_open) return orders @@ -1503,13 +1622,13 @@ class Exchange: :param pair: Pair for the query :param since: Starting time for the query """ - if self._config['dry_run']: + if self._config["dry_run"]: return [] try: since_ms = int((since.timestamp() - 10) * 1000) - if self.exchange_has('fetchOrders'): + if self.exchange_has("fetchOrders"): if not params: params = {} try: @@ -1520,14 +1639,15 @@ class Exchange: orders = self._fetch_orders_emulate(pair, since_ms) else: orders = self._fetch_orders_emulate(pair, since_ms) - self._log_exchange_response('fetch_orders', orders) + self._log_exchange_response("fetch_orders", orders) orders = [self._order_contracts_to_amount(o) for o in orders] return orders except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e + f"Could not fetch positions due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1537,18 +1657,22 @@ class Exchange: Fetch user account trading fees Can be cached, should not update often. """ - if (self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES - or not self.exchange_has('fetchTradingFees')): + if ( + self._config["dry_run"] + or self.trading_mode != TradingMode.FUTURES + or not self.exchange_has("fetchTradingFees") + ): return {} try: trading_fees: Dict[str, Any] = self._api.fetch_trading_fees() - self._log_exchange_response('fetch_trading_fees', trading_fees) + self._log_exchange_response("fetch_trading_fees", trading_fees) return trading_fees except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not fetch trading fees due to {e.__class__.__name__}. Message: {e}') from e + f"Could not fetch trading fees due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1559,27 +1683,29 @@ class Exchange: :param cached: Allow cached result :return: fetch_bids_asks result """ - if not self.exchange_has('fetchBidsAsks'): + if not self.exchange_has("fetchBidsAsks"): return {} if cached: with self._cache_lock: - tickers = self._fetch_tickers_cache.get('fetch_bids_asks') + tickers = self._fetch_tickers_cache.get("fetch_bids_asks") if tickers: return tickers try: tickers = self._api.fetch_bids_asks(symbols) with self._cache_lock: - self._fetch_tickers_cache['fetch_bids_asks'] = tickers + self._fetch_tickers_cache["fetch_bids_asks"] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( - f'Exchange {self._api.name} does not support fetching bids/asks in batch. ' - f'Message: {e}') from e + f"Exchange {self._api.name} does not support fetching bids/asks in batch. " + f"Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not load bids/asks due to {e.__class__.__name__}. Message: {e}') from e + f"Could not load bids/asks due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1590,25 +1716,28 @@ class Exchange: :return: fetch_tickers result """ tickers: Tickers - if not self.exchange_has('fetchTickers'): + if not self.exchange_has("fetchTickers"): return {} if cached: with self._cache_lock: - tickers = self._fetch_tickers_cache.get('fetch_tickers') # type: ignore + tickers = self._fetch_tickers_cache.get("fetch_tickers") # type: ignore if tickers: return tickers try: tickers = self._api.fetch_tickers(symbols) with self._cache_lock: - self._fetch_tickers_cache['fetch_tickers'] = tickers + self._fetch_tickers_cache["fetch_tickers"] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( - f'Exchange {self._api.name} does not support fetching tickers in batch. ' - f'Message: {e}') from e + f"Exchange {self._api.name} does not support fetching tickers in batch. " + f"Message: {e}" + ) from e except ccxt.BadSymbol as e: - logger.warning(f"Could not load tickers due to {e.__class__.__name__}. Message: {e} ." - "Reloading markets.") + logger.warning( + f"Could not load tickers due to {e.__class__.__name__}. Message: {e} ." + "Reloading markets." + ) self.reload_markets(True) # Re-raise exception to repeat the call. raise TemporaryError from e @@ -1616,7 +1745,8 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not load tickers due to {e.__class__.__name__}. Message: {e}') from e + f"Could not load tickers due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1625,8 +1755,7 @@ class Exchange: @retrier def fetch_ticker(self, pair: str) -> Ticker: try: - if (pair not in self.markets or - self.markets[pair].get('active', False) is False): + if pair not in self.markets or self.markets[pair].get("active", False) is False: raise ExchangeError(f"Pair {pair} not available") data: Ticker = self._api.fetch_ticker(pair) return data @@ -1634,13 +1763,15 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e + f"Could not load ticker due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod - def get_next_limit_in_list(limit: int, limit_range: Optional[List[int]], - range_required: bool = True): + def get_next_limit_in_list( + limit: int, limit_range: Optional[List[int]], range_required: bool = True + ): """ Get next greater value in the list. Used by fetch_l2_order_book if the api only supports a limited range @@ -1662,43 +1793,50 @@ class Exchange: Returns a dict in the format {'asks': [price, volume], 'bids': [price, volume]} """ - limit1 = self.get_next_limit_in_list(limit, self._ft_has['l2_limit_range'], - self._ft_has['l2_limit_range_required']) + limit1 = self.get_next_limit_in_list( + limit, self._ft_has["l2_limit_range"], self._ft_has["l2_limit_range_required"] + ) try: - return self._api.fetch_l2_order_book(pair, limit1) except ccxt.NotSupported as e: raise OperationalException( - f'Exchange {self._api.name} does not support fetching order book.' - f'Message: {e}') from e + f"Exchange {self._api.name} does not support fetching order book. Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get order book due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get order book due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def _get_price_side(self, side: str, is_short: bool, conf_strategy: Dict) -> BidAsk: - price_side = conf_strategy['price_side'] + price_side = conf_strategy["price_side"] - if price_side in ('same', 'other'): + if price_side in ("same", "other"): price_map = { - ('entry', 'long', 'same'): 'bid', - ('entry', 'long', 'other'): 'ask', - ('entry', 'short', 'same'): 'ask', - ('entry', 'short', 'other'): 'bid', - ('exit', 'long', 'same'): 'ask', - ('exit', 'long', 'other'): 'bid', - ('exit', 'short', 'same'): 'bid', - ('exit', 'short', 'other'): 'ask', + ("entry", "long", "same"): "bid", + ("entry", "long", "other"): "ask", + ("entry", "short", "same"): "ask", + ("entry", "short", "other"): "bid", + ("exit", "long", "same"): "ask", + ("exit", "long", "other"): "bid", + ("exit", "short", "same"): "bid", + ("exit", "short", "other"): "ask", } - price_side = price_map[(side, 'short' if is_short else 'long', price_side)] + price_side = price_map[(side, "short" if is_short else "long", price_side)] return price_side - def get_rate(self, pair: str, refresh: bool, - side: EntryExit, is_short: bool, - order_book: Optional[OrderBook] = None, ticker: Optional[Ticker] = None) -> float: + def get_rate( + self, + pair: str, + refresh: bool, + side: EntryExit, + is_short: bool, + order_book: Optional[OrderBook] = None, + ticker: Optional[Ticker] = None, + ) -> float: """ Calculates bid/ask target bid rate - between current ask price and last price @@ -1711,7 +1849,7 @@ class Exchange: :raises PricingError if orderbook price could not be determined. """ name = side.capitalize() - strat_name = 'entry_pricing' if side == "entry" else 'exit_pricing' + strat_name = "entry_pricing" if side == "entry" else "exit_pricing" cache_rate: TTLCache = self._entry_rate_cache if side == "entry" else self._exit_rate_cache if not refresh: @@ -1726,13 +1864,11 @@ class Exchange: price_side = self._get_price_side(side, is_short, conf_strategy) - if conf_strategy.get('use_order_book', False): - - order_book_top = conf_strategy.get('order_book_top', 1) + if conf_strategy.get("use_order_book", False): + order_book_top = conf_strategy.get("order_book_top", 1) if order_book is None: order_book = self.fetch_l2_order_book(pair, order_book_top) - rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, - order_book_top) + rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, order_book_top) else: logger.debug(f"Using Last {price_side.capitalize()} / Last Price") if ticker is None: @@ -1746,41 +1882,51 @@ class Exchange: return rate - def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], - price_side: BidAsk) -> Optional[float]: + def _get_rate_from_ticker( + self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], price_side: BidAsk + ) -> Optional[float]: """ Get rate from ticker. """ ticker_rate = ticker[price_side] - if ticker['last'] and ticker_rate: - if side == 'entry' and ticker_rate > ticker['last']: - balance = conf_strategy.get('price_last_balance', 0.0) - ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) - elif side == 'exit' and ticker_rate < ticker['last']: - balance = conf_strategy.get('price_last_balance', 0.0) - ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) + if ticker["last"] and ticker_rate: + if side == "entry" and ticker_rate > ticker["last"]: + balance = conf_strategy.get("price_last_balance", 0.0) + ticker_rate = ticker_rate + balance * (ticker["last"] - ticker_rate) + elif side == "exit" and ticker_rate < ticker["last"]: + balance = conf_strategy.get("price_last_balance", 0.0) + ticker_rate = ticker_rate - balance * (ticker_rate - ticker["last"]) rate = ticker_rate return rate - def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str, - price_side: BidAsk, order_book_top: int) -> float: + def _get_rate_from_ob( + self, + pair: str, + side: EntryExit, + order_book: OrderBook, + name: str, + price_side: BidAsk, + order_book_top: int, + ) -> float: """ Get rate from orderbook :raises: PricingError if rate could not be determined. """ - logger.debug('order_book %s', order_book) + logger.debug("order_book %s", order_book) # top 1 = index 0 try: - obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' + obside: OBLiteral = "bids" if price_side == "bid" else "asks" rate = order_book[obside][order_book_top - 1][0] except (IndexError, KeyError) as e: logger.warning( - f"{pair} - {name} Price at location {order_book_top} from orderbook " - f"could not be determined. Orderbook: {order_book}" - ) + f"{pair} - {name} Price at location {order_book_top} from orderbook " + f"could not be determined. Orderbook: {order_book}" + ) raise PricingError from e - logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}" - f"side - top {order_book_top} order book {side} rate {rate:.8f}") + logger.debug( + f"{pair} - {name} price from orderbook {price_side.capitalize()}" + f"side - top {order_book_top} order book {side} rate {rate:.8f}" + ) return rate def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]: @@ -1795,27 +1941,30 @@ class Exchange: if exit_rate: logger.debug(f"Using cached sell rate for {pair}.") - entry_pricing = self._config.get('entry_pricing', {}) - exit_pricing = self._config.get('exit_pricing', {}) + entry_pricing = self._config.get("entry_pricing", {}) + exit_pricing = self._config.get("exit_pricing", {}) order_book = ticker = None - if not entry_rate and entry_pricing.get('use_order_book', False): - order_book_top = max(entry_pricing.get('order_book_top', 1), - exit_pricing.get('order_book_top', 1)) + if not entry_rate and entry_pricing.get("use_order_book", False): + order_book_top = max( + entry_pricing.get("order_book_top", 1), exit_pricing.get("order_book_top", 1) + ) order_book = self.fetch_l2_order_book(pair, order_book_top) - entry_rate = self.get_rate(pair, refresh, 'entry', is_short, order_book=order_book) + entry_rate = self.get_rate(pair, refresh, "entry", is_short, order_book=order_book) elif not entry_rate: ticker = self.fetch_ticker(pair) - entry_rate = self.get_rate(pair, refresh, 'entry', is_short, ticker=ticker) + entry_rate = self.get_rate(pair, refresh, "entry", is_short, ticker=ticker) if not exit_rate: - exit_rate = self.get_rate(pair, refresh, 'exit', - is_short, order_book=order_book, ticker=ticker) + exit_rate = self.get_rate( + pair, refresh, "exit", is_short, order_book=order_book, ticker=ticker + ) return entry_rate, exit_rate # Fee handling @retrier - def get_trades_for_order(self, order_id: str, pair: str, since: datetime, - params: Optional[Dict] = None) -> List: + def get_trades_for_order( + self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None + ) -> List: """ Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id. The "since" argument passed in is coming from the database and is in UTC, @@ -1832,20 +1981,22 @@ class Exchange: :param pair: Pair the order is for :param since: datetime object of the order creation time. Assumes object is in UTC. """ - if self._config['dry_run']: + if self._config["dry_run"]: return [] - if not self.exchange_has('fetchMyTrades'): + if not self.exchange_has("fetchMyTrades"): return [] try: # Allow 5s offset to catch slight time offsets (discovered in #1185) # since needs to be int in milliseconds _params = params if params else {} my_trades = self._api.fetch_my_trades( - pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000), - params=_params) - matched_trades = [trade for trade in my_trades if trade['order'] == order_id] + pair, + int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000), + params=_params, + ) + matched_trades = [trade for trade in my_trades if trade["order"] == order_id] - self._log_exchange_response('get_trades_for_order', matched_trades) + self._log_exchange_response("get_trades_for_order", matched_trades) matched_trades = self._trades_contracts_to_amount(matched_trades) @@ -1854,16 +2005,24 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get trades due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get trades due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def get_order_id_conditional(self, order: Dict[str, Any]) -> str: - return order['id'] + return order["id"] @retrier - def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1, - price: float = 1, taker_or_maker: MakerTaker = 'maker') -> float: + def get_fee( + self, + symbol: str, + type: str = "", + side: str = "", + amount: float = 1, + price: float = 1, + taker_or_maker: MakerTaker = "maker", + ) -> float: """ Retrieve fee from exchange :param symbol: Pair @@ -1873,22 +2032,29 @@ class Exchange: :param price: Price of order :param taker_or_maker: 'maker' or 'taker' (ignored if "type" is provided) """ - if type and type == 'market': - taker_or_maker = 'taker' + if type and type == "market": + taker_or_maker = "taker" try: - if self._config['dry_run'] and self._config.get('fee', None) is not None: - return self._config['fee'] + if self._config["dry_run"] and self._config.get("fee", None) is not None: + return self._config["fee"] # validate that markets are loaded before trying to get fee if self._api.markets is None or len(self._api.markets) == 0: self._api.load_markets(params={}) - return self._api.calculate_fee(symbol=symbol, type=type, side=side, amount=amount, - price=price, takerOrMaker=taker_or_maker)['rate'] + return self._api.calculate_fee( + symbol=symbol, + type=type, + side=side, + amount=amount, + price=price, + takerOrMaker=taker_or_maker, + )["rate"] except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get fee info due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get fee info due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -1902,14 +2068,17 @@ class Exchange: """ if not isinstance(order, dict): return False - return ('fee' in order and order['fee'] is not None - and (order['fee'].keys() >= {'currency', 'cost'}) - and order['fee']['currency'] is not None - and order['fee']['cost'] is not None - ) + return ( + "fee" in order + and order["fee"] is not None + and (order["fee"].keys() >= {"currency", "cost"}) + and order["fee"]["currency"] is not None + and order["fee"]["cost"] is not None + ) def calculate_fee_rate( - self, fee: Dict, symbol: str, cost: float, amount: float) -> Optional[float]: + self, fee: Dict, symbol: str, cost: float, amount: float + ) -> Optional[float]: """ Calculate fee rate if it's not given by the exchange. :param fee: ccxt Fee dict - must contain cost / currency / rate @@ -1917,12 +2086,12 @@ class Exchange: :param cost: Total cost of the order :param amount: Amount of the order """ - if fee.get('rate') is not None: - return fee.get('rate') - fee_curr = fee.get('currency') + if fee.get("rate") is not None: + return fee.get("rate") + fee_curr = fee.get("currency") if fee_curr is None: return None - fee_cost = float(fee['cost']) + fee_cost = float(fee["cost"]) # Calculate fee based on order details if fee_curr == self.get_pair_base_currency(symbol): @@ -1937,18 +2106,19 @@ class Exchange: # If cost is None or 0.0 -> falsy, return None return None try: - comb = self.get_valid_pair_combination(fee_curr, self._config['stake_currency']) + comb = self.get_valid_pair_combination(fee_curr, self._config["stake_currency"]) tick = self.fetch_ticker(comb) - fee_to_quote_rate = safe_value_fallback2(tick, tick, 'last', 'ask') + fee_to_quote_rate = safe_value_fallback2(tick, tick, "last", "ask") except (ValueError, ExchangeError): - fee_to_quote_rate = self._config['exchange'].get('unknown_fee_rate', None) + fee_to_quote_rate = self._config["exchange"].get("unknown_fee_rate", None) if not fee_to_quote_rate: return None return round((fee_cost * fee_to_quote_rate) / cost, 8) - def extract_cost_curr_rate(self, fee: Dict, symbol: str, cost: float, - amount: float) -> Tuple[float, str, Optional[float]]: + def extract_cost_curr_rate( + self, fee: Dict, symbol: str, cost: float, amount: float + ) -> Tuple[float, str, Optional[float]]: """ Extract tuple of cost, currency, rate. Requires order_has_fee to run first! @@ -1958,22 +2128,23 @@ class Exchange: :param amount: Amount of the order :return: Tuple with cost, currency, rate of the given fee dict """ - return (float(fee['cost']), - fee['currency'], - self.calculate_fee_rate( - fee, - symbol, - cost, - amount - ) - ) + return ( + float(fee["cost"]), + fee["currency"], + self.calculate_fee_rate(fee, symbol, cost, amount), + ) # Historic data - def get_historic_ohlcv(self, pair: str, timeframe: str, - since_ms: int, candle_type: CandleType, - is_new_pair: bool = False, - until_ms: Optional[int] = None) -> List: + def get_historic_ohlcv( + self, + pair: str, + timeframe: str, + since_ms: int, + candle_type: CandleType, + is_new_pair: bool = False, + until_ms: Optional[int] = None, + ) -> List: """ Get candle history using asyncio and returns the list of candles. Handles all async work for this. @@ -1986,17 +2157,28 @@ class Exchange: :return: List with candle (OHLCV) data """ pair, _, _, data, _ = self.loop.run_until_complete( - self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe, - since_ms=since_ms, until_ms=until_ms, - is_new_pair=is_new_pair, candle_type=candle_type)) + self._async_get_historic_ohlcv( + pair=pair, + timeframe=timeframe, + since_ms=since_ms, + until_ms=until_ms, + is_new_pair=is_new_pair, + candle_type=candle_type, + ) + ) logger.info(f"Downloaded data for {pair} with length {len(data)}.") return data - async def _async_get_historic_ohlcv(self, pair: str, timeframe: str, - since_ms: int, candle_type: CandleType, - is_new_pair: bool = False, raise_: bool = False, - until_ms: Optional[int] = None - ) -> OHLCVResponse: + async def _async_get_historic_ohlcv( + self, + pair: str, + timeframe: str, + since_ms: int, + candle_type: CandleType, + is_new_pair: bool = False, + raise_: bool = False, + until_ms: Optional[int] = None, + ) -> OHLCVResponse: """ Download historic ohlcv :param is_new_pair: used by binance subclass to allow "fast" new pair downloading @@ -2004,20 +2186,21 @@ class Exchange: """ one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( - timeframe, candle_type, since_ms) + timeframe, candle_type, since_ms + ) logger.debug( "one_call: %s msecs (%s)", one_call, - dt_humanize_delta(dt_now() - timedelta(milliseconds=one_call)) + dt_humanize_delta(dt_now() - timedelta(milliseconds=one_call)), ) - input_coroutines = [self._async_get_candle_history( - pair, timeframe, candle_type, since) for since in - range(since_ms, until_ms or dt_ts(), one_call)] + input_coroutines = [ + self._async_get_candle_history(pair, timeframe, candle_type, since) + for since in range(since_ms, until_ms or dt_ts(), one_call) + ] data: List = [] # Chunk requests into batches of 100 to avoid overwhelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): - results = await asyncio.gather(*input_coro, return_exceptions=True) for res in results: if isinstance(res, BaseException): @@ -2035,8 +2218,13 @@ class Exchange: return pair, timeframe, candle_type, data, self._ohlcv_partial_candle def _build_coroutine( - self, pair: str, timeframe: str, candle_type: CandleType, - since_ms: Optional[int], cache: bool) -> Coroutine[Any, Any, OHLCVResponse]: + self, + pair: str, + timeframe: str, + candle_type: CandleType, + since_ms: Optional[int], + cache: bool, + ) -> Coroutine[Any, Any, OHLCVResponse]: not_all_data = cache and self.required_candle_call_count > 1 if cache and (pair, timeframe, candle_type) in self._klines: candle_limit = self.ohlcv_candle_limit(timeframe, candle_type) @@ -2048,47 +2236,57 @@ class Exchange: else: # Time jump detected, evict cache logger.info( - f"Time jump detected. Evicting cache for {pair}, {timeframe}, {candle_type}") + f"Time jump detected. Evicting cache for {pair}, {timeframe}, {candle_type}" + ) del self._klines[(pair, timeframe, candle_type)] - if (not since_ms and (self._ft_has["ohlcv_require_since"] or not_all_data)): + if not since_ms and (self._ft_has["ohlcv_require_since"] or not_all_data): # Multiple calls for one pair - to get more history one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( - timeframe, candle_type, since_ms) + timeframe, candle_type, since_ms + ) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = dt_ts(now - timedelta(seconds=move_to // 1000)) if since_ms: return self._async_get_historic_ohlcv( - pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) + pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type + ) else: # One call ... "regular" refresh return self._async_get_candle_history( - pair, timeframe, since_ms=since_ms, candle_type=candle_type) + pair, timeframe, since_ms=since_ms, candle_type=candle_type + ) def _build_ohlcv_dl_jobs( - self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], - cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: + self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool + ) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): - if (timeframe not in self.timeframes - and candle_type in (CandleType.SPOT, CandleType.FUTURES)): + if timeframe not in self.timeframes and candle_type in ( + CandleType.SPOT, + CandleType.FUTURES, + ): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " - f"{', '.join(self.timeframes)}.") + f"{', '.join(self.timeframes)}." + ) continue - if ((pair, timeframe, candle_type) not in self._klines or not cache - or self._now_is_time_to_refresh(pair, timeframe, candle_type)): - + if ( + (pair, timeframe, candle_type) not in self._klines + or not cache + or self._now_is_time_to_refresh(pair, timeframe, candle_type) + ): input_coroutines.append( - self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) + self._build_coroutine(pair, timeframe, candle_type, since_ms, cache) + ) else: logger.debug( @@ -2098,22 +2296,35 @@ class Exchange: return input_coroutines, cached_pairs - def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], - cache: bool, drop_incomplete: bool) -> DataFrame: + def _process_ohlcv_df( + self, + pair: str, + timeframe: str, + c_type: CandleType, + ticks: List[List], + cache: bool, + drop_incomplete: bool, + ) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache - ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, - drop_incomplete=drop_incomplete) + ohlcv_df = ohlcv_to_dataframe( + ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete + ) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df - ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, - fill_missing=True, drop_incomplete=False) - candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) + ohlcv_df = clean_ohlcv_dataframe( + concat([old, ohlcv_df], axis=0), + timeframe, + pair, + fill_missing=True, + drop_incomplete=False, + ) + candle_limit = self.ohlcv_candle_limit(timeframe, self._config["candle_type_def"]) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) @@ -2122,10 +2333,14 @@ class Exchange: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df - def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, - since_ms: Optional[int] = None, cache: bool = True, - drop_incomplete: Optional[bool] = None - ) -> Dict[PairWithTimeframe, DataFrame]: + def refresh_latest_ohlcv( + self, + pair_list: ListPairsWithTimeframes, + *, + since_ms: Optional[int] = None, + cache: bool = True, + drop_incomplete: Optional[bool] = None, + ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). @@ -2145,11 +2360,12 @@ class Exchange: results_df = {} # Chunk requests into batches of 100 to avoid overwhelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): - async def gather_stuff(): - return await asyncio.gather(*input_coro, return_exceptions=True) + + async def gather_stuff(coro): + return await asyncio.gather(*coro, return_exceptions=True) with self._loop_lock: - results = self.loop.run_until_complete(gather_stuff()) + results = self.loop.run_until_complete(gather_stuff(input_coro)) for res in results: if isinstance(res, Exception): @@ -2159,23 +2375,21 @@ class Exchange: pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( - pair, timeframe, c_type, ticks, cache, drop_incomplete_) + pair, timeframe, c_type, ticks, cache, drop_incomplete_ + ) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( - (pair, timeframe, c_type), - copy=False + (pair, timeframe, c_type), copy=False ) return results_df def refresh_ohlcv_with_cache( - self, - pairs: List[PairWithTimeframe], - since_ms: int + self, pairs: List[PairWithTimeframe], since_ms: int ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh ohlcv data for all pairs in needed_pairs if necessary. @@ -2189,18 +2403,18 @@ class Exchange: timeframe_in_sec = timeframe_to_seconds(timeframe) # Initialise cache self._expiring_candle_cache[(timeframe, since_ms)] = PeriodicCache( - ttl=timeframe_in_sec, maxsize=1000) + ttl=timeframe_in_sec, maxsize=1000 + ) # Get candles from cache candles = { - c: self._expiring_candle_cache[(c[1], since_ms)].get(c, None) for c in pairs + c: self._expiring_candle_cache[(c[1], since_ms)].get(c, None) + for c in pairs if c in self._expiring_candle_cache[(c[1], since_ms)] } pairs_to_download = [p for p in pairs if p not in candles] if pairs_to_download: - candles = self.refresh_latest_ohlcv( - pairs_to_download, since_ms=since_ms, cache=False - ) + candles = self.refresh_latest_ohlcv(pairs_to_download, since_ms=since_ms, cache=False) for c, val in candles.items(): self._expiring_candle_cache[(c[1], since_ms)][c] = val return candles @@ -2228,21 +2442,26 @@ class Exchange: """ try: # Fetch OHLCV asynchronously - s = '(' + dt_from_ts(since_ms).isoformat() + ') ' if since_ms is not None else '' + s = "(" + dt_from_ts(since_ms).isoformat() + ") " if since_ms is not None else "" logger.debug( "Fetching pair %s, %s, interval %s, since %s %s...", - pair, candle_type, timeframe, since_ms, s + pair, + candle_type, + timeframe, + since_ms, + s, ) - params = deepcopy(self._ft_has.get('ohlcv_params', {})) + params = deepcopy(self._ft_has.get("ohlcv_params", {})) candle_limit = self.ohlcv_candle_limit( - timeframe, candle_type=candle_type, since_ms=since_ms) + timeframe, candle_type=candle_type, since_ms=since_ms + ) if candle_type and candle_type != CandleType.SPOT: - params.update({'price': candle_type.value}) + params.update({"price": candle_type.value}) if candle_type != CandleType.FUNDING_RATE: data = await self._api_async.fetch_ohlcv( - pair, timeframe=timeframe, since=since_ms, - limit=candle_limit, params=params) + pair, timeframe=timeframe, since=since_ms, limit=candle_limit, params=params + ) else: # Funding rate data = await self._fetch_funding_rate_history( @@ -2266,17 +2485,21 @@ class Exchange: except ccxt.NotSupported as e: raise OperationalException( - f'Exchange {self._api.name} does not support fetching historical ' - f'candle (OHLCV) data. Message: {e}') from e + f"Exchange {self._api.name} does not support fetching historical " + f"candle (OHLCV) data. Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: - raise TemporaryError(f'Could not fetch historical candle (OHLCV) data ' - f'for pair {pair} due to {e.__class__.__name__}. ' - f'Message: {e}') from e + raise TemporaryError( + f"Could not fetch historical candle (OHLCV) data " + f"for pair {pair} due to {e.__class__.__name__}. " + f"Message: {e}" + ) from e except ccxt.BaseError as e: - raise OperationalException(f'Could not fetch historical candle (OHLCV) data ' - f'for pair {pair}. Message: {e}') from e + raise OperationalException( + f"Could not fetch historical candle (OHLCV) data for pair {pair}. Message: {e}" + ) from e async def _fetch_funding_rate_history( self, @@ -2289,19 +2512,17 @@ class Exchange: Fetch funding rate history - used to selectively override this by subclasses. """ # Funding rate - data = await self._api_async.fetch_funding_rate_history( - pair, since=since_ms, - limit=limit) + data = await self._api_async.fetch_funding_rate_history(pair, since=since_ms, limit=limit) # Convert funding rate to candle pattern - data = [[x['timestamp'], x['fundingRate'], 0, 0, 0, 0] for x in data] + data = [[x["timestamp"], x["fundingRate"], 0, 0, 0, 0] for x in data] return data # Fetch historic trades @retrier_async - async def _async_fetch_trades(self, pair: str, - since: Optional[int] = None, - params: Optional[dict] = None) -> Tuple[List[List], Any]: + async def _async_fetch_trades( + self, pair: str, since: Optional[int] = None, params: Optional[dict] = None + ) -> Tuple[List[List], Any]: """ Asynchronously gets trade history using fetch_trades. Handles exchange errors, does one call to the exchange. @@ -2317,8 +2538,9 @@ class Exchange: else: logger.debug( "Fetching trades for pair %s, since %s %s...", - pair, since, - '(' + dt_from_ts(since).isoformat() + ') ' if since is not None else '' + pair, + since, + "(" + dt_from_ts(since).isoformat() + ") " if since is not None else "", ) trades = await self._api_async.fetch_trades(pair, since=since, limit=1000) trades = self._trades_contracts_to_amount(trades) @@ -2326,15 +2548,17 @@ class Exchange: return trades_dict_to_list(trades), pagination_value except ccxt.NotSupported as e: raise OperationalException( - f'Exchange {self._api.name} does not support fetching historical trade data.' - f'Message: {e}') from e + f"Exchange {self._api.name} does not support fetching historical trade data." + f"Message: {e}" + ) from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: - raise TemporaryError(f'Could not load trade history due to {e.__class__.__name__}. ' - f'Message: {e}') from e + raise TemporaryError( + f"Could not load trade history due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: - raise OperationalException(f'Could not fetch trade data. Msg: {e}') from e + raise OperationalException(f"Could not fetch trade data. Msg: {e}") from e def _valid_trade_pagination_id(self, pair: str, from_id: str) -> bool: """ @@ -2350,15 +2574,14 @@ class Exchange: """ if not trades: return None - if self._trades_pagination == 'id': - return trades[-1].get('id') + if self._trades_pagination == "id": + return trades[-1].get("id") else: - return trades[-1].get('timestamp') + return trades[-1].get("timestamp") - async def _async_get_trade_history_id(self, pair: str, - until: int, - since: Optional[int] = None, - from_id: Optional[str] = None) -> Tuple[str, List[List]]: + async def _async_get_trade_history_id( + self, pair: str, until: int, since: Optional[int] = None, from_id: Optional[str] = None + ) -> Tuple[str, List[List]]: """ Asynchronously gets trade history using fetch_trades use this when exchange uses id-based iteration (check `self._trades_pagination`) @@ -2372,7 +2595,7 @@ class Exchange: trades: List[List] = [] # DEFAULT_TRADES_COLUMNS: 0 -> timestamp # DEFAULT_TRADES_COLUMNS: 1 -> id - has_overlap = self._ft_has.get('trades_pagination_overlap', True) + has_overlap = self._ft_has.get("trades_pagination_overlap", True) # Skip last trade by default since its the key for the next call x = slice(None, -1) if has_overlap else slice(None) @@ -2387,12 +2610,15 @@ class Exchange: while True: try: t, from_id_next = await self._async_fetch_trades( - pair, params={self._trades_pagination_arg: from_id}) + pair, params={self._trades_pagination_arg: from_id} + ) if t: trades.extend(t[x]) if from_id == from_id_next or t[-1][0] > until: - logger.debug(f"Stopping because from_id did not change. " - f"Reached {t[-1][0]} > {until}") + logger.debug( + f"Stopping because from_id did not change. " + f"Reached {t[-1][0]} > {until}" + ) # Reached the end of the defined-download period - add last trade as well. if has_overlap: trades.extend(t[-1:]) @@ -2408,8 +2634,9 @@ class Exchange: return (pair, trades) - async def _async_get_trade_history_time(self, pair: str, until: int, - since: Optional[int] = None) -> Tuple[str, List[List]]: + async def _async_get_trade_history_time( + self, pair: str, until: int, since: Optional[int] = None + ) -> Tuple[str, List[List]]: """ Asynchronously gets trade history using fetch_trades, when the exchange uses time-based iteration (check `self._trades_pagination`) @@ -2435,8 +2662,7 @@ class Exchange: trades.extend(t) # Reached the end of the defined-download period if until and since_next > until: - logger.debug( - f"Stopping because until was reached. {since_next} > {until}") + logger.debug(f"Stopping because until was reached. {since_next} > {until}") break else: logger.debug("Stopping as no more trades were returned.") @@ -2447,36 +2673,44 @@ class Exchange: return (pair, trades) - async def _async_get_trade_history(self, pair: str, - since: Optional[int] = None, - until: Optional[int] = None, - from_id: Optional[str] = None) -> Tuple[str, List[List]]: + async def _async_get_trade_history( + self, + pair: str, + since: Optional[int] = None, + until: Optional[int] = None, + from_id: Optional[str] = None, + ) -> Tuple[str, List[List]]: """ Async wrapper handling downloading trades using either time or id based methods. """ - logger.debug(f"_async_get_trade_history(), pair: {pair}, " - f"since: {since}, until: {until}, from_id: {from_id}") + logger.debug( + f"_async_get_trade_history(), pair: {pair}, " + f"since: {since}, until: {until}, from_id: {from_id}" + ) if until is None: until = ccxt.Exchange.milliseconds() logger.debug(f"Exchange milliseconds: {until}") - if self._trades_pagination == 'time': - return await self._async_get_trade_history_time( - pair=pair, since=since, until=until) - elif self._trades_pagination == 'id': + if self._trades_pagination == "time": + return await self._async_get_trade_history_time(pair=pair, since=since, until=until) + elif self._trades_pagination == "id": return await self._async_get_trade_history_id( pair=pair, since=since, until=until, from_id=from_id ) else: - raise OperationalException(f"Exchange {self.name} does use neither time, " - f"nor id based pagination") + raise OperationalException( + f"Exchange {self.name} does use neither time, nor id based pagination" + ) - def get_historic_trades(self, pair: str, - since: Optional[int] = None, - until: Optional[int] = None, - from_id: Optional[str] = None) -> Tuple[str, List]: + def get_historic_trades( + self, + pair: str, + since: Optional[int] = None, + until: Optional[int] = None, + from_id: Optional[str] = None, + ) -> Tuple[str, List]: """ Get trade history data using asyncio. Handles all async work and returns the list of candles. @@ -2491,8 +2725,9 @@ class Exchange: raise OperationalException("This exchange does not support downloading Trades.") with self._loop_lock: - task = asyncio.ensure_future(self._async_get_trade_history( - pair=pair, since=since, until=until, from_id=from_id)) + task = asyncio.ensure_future( + self._async_get_trade_history(pair=pair, since=since, until=until, from_id=from_id) + ) for sig in [signal.SIGINT, signal.SIGTERM]: try: @@ -2520,18 +2755,17 @@ class Exchange: since = dt_ts(since) try: - funding_history = self._api.fetch_funding_history( - symbol=pair, - since=since + funding_history = self._api.fetch_funding_history(symbol=pair, since=since) + self._log_exchange_response( + "funding_history", funding_history, add_info=f"pair: {pair}, since: {since}" ) - self._log_exchange_response('funding_history', funding_history, - add_info=f"pair: {pair}, since: {since}") - return sum(fee['amount'] for fee in funding_history) + return sum(fee["amount"] for fee in funding_history) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get funding fees due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get funding fees due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -2543,14 +2777,14 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not load leverage tiers due to {e.__class__.__name__}. Message: {e}' + f"Could not load leverage tiers due to {e.__class__.__name__}. Message: {e}" ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier_async async def get_market_leverage_tiers(self, symbol: str) -> Tuple[str, List[Dict]]: - """ Leverage tiers per symbol """ + """Leverage tiers per symbol""" try: tier = await self._api_async.fetch_market_leverage_tiers(symbol) return symbol, tier @@ -2558,43 +2792,49 @@ class Exchange: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not load leverage tiers for {symbol}' - f' due to {e.__class__.__name__}. Message: {e}' + f"Could not load leverage tiers for {symbol}" + f" due to {e.__class__.__name__}. Message: {e}" ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def load_leverage_tiers(self) -> Dict[str, List[Dict]]: if self.trading_mode == TradingMode.FUTURES: - if self.exchange_has('fetchLeverageTiers'): + if self.exchange_has("fetchLeverageTiers"): # Fetch all leverage tiers at once return self.get_leverage_tiers() - elif self.exchange_has('fetchMarketLeverageTiers'): + elif self.exchange_has("fetchMarketLeverageTiers"): # Must fetch the leverage tiers for each market separately # * This is slow(~45s) on Okx, makes ~90 api calls to load all linear swap markets markets = self.markets symbols = [ - symbol for symbol, market in markets.items() - if (self.market_is_future(market) - and market['quote'] == self._config['stake_currency']) + symbol + for symbol, market in markets.items() + if ( + self.market_is_future(market) + and market["quote"] == self._config["stake_currency"] + ) ] tiers: Dict[str, List[Dict]] = {} - tiers_cached = self.load_cached_leverage_tiers(self._config['stake_currency']) + tiers_cached = self.load_cached_leverage_tiers(self._config["stake_currency"]) if tiers_cached: tiers = tiers_cached coros = [ self.get_market_leverage_tiers(symbol) - for symbol in sorted(symbols) if symbol not in tiers] + for symbol in sorted(symbols) + if symbol not in tiers + ] # Be verbose here, as this delays startup by ~1 minute. if coros: logger.info( f"Initializing leverage_tiers for {len(symbols)} markets. " - "This will take about a minute.") + "This will take about a minute." + ) else: logger.info("Using cached leverage_tiers.") @@ -2602,7 +2842,6 @@ class Exchange: return await asyncio.gather(*input_coro, return_exceptions=True) for input_coro in chunks(coros, 100): - with self._loop_lock: results = self.loop.run_until_complete(gather_results(input_coro)) @@ -2613,15 +2852,14 @@ class Exchange: symbol, tier = res tiers[symbol] = tier if len(coros) > 0: - self.cache_leverage_tiers(tiers, self._config['stake_currency']) + self.cache_leverage_tiers(tiers, self._config["stake_currency"]) logger.info(f"Done initializing {len(symbols)} markets.") return tiers return {} def cache_leverage_tiers(self, tiers: Dict[str, List[Dict]], stake_currency: str) -> None: - - filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json" + filename = self._config["datadir"] / "futures" / f"leverage_tiers_{stake_currency}.json" if not filename.parent.is_dir(): filename.parent.mkdir(parents=True) data = { @@ -2630,18 +2868,27 @@ class Exchange: } file_dump_json(filename, data) - def load_cached_leverage_tiers(self, stake_currency: str) -> Optional[Dict[str, List[Dict]]]: - filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json" + def load_cached_leverage_tiers( + self, stake_currency: str, cache_time: Optional[timedelta] = None + ) -> Optional[Dict[str, List[Dict]]]: + """ + Load cached leverage tiers from disk + :param cache_time: The maximum age of the cache before it is considered outdated + """ + if not cache_time: + # Default to 4 weeks + cache_time = timedelta(weeks=4) + filename = self._config["datadir"] / "futures" / f"leverage_tiers_{stake_currency}.json" if filename.is_file(): try: tiers = file_load_json(filename) - updated = tiers.get('updated') + updated = tiers.get("updated") if updated: updated_dt = parser.parse(updated) - if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4): + if updated_dt < datetime.now(timezone.utc) - cache_time: logger.info("Cached leverage tiers are outdated. Will update.") return None - return tiers['data'] + return tiers["data"] except Exception: logger.exception("Error loading cached leverage tiers. Refreshing.") return None @@ -2659,13 +2906,13 @@ class Exchange: self._leverage_tiers[pair] = pair_tiers def parse_leverage_tier(self, tier) -> Dict: - info = tier.get('info', {}) + info = tier.get("info", {}) return { - 'minNotional': tier['minNotional'], - 'maxNotional': tier['maxNotional'], - 'maintenanceMarginRate': tier['maintenanceMarginRate'], - 'maxLeverage': tier['maxLeverage'], - 'maintAmt': float(info['cum']) if 'cum' in info else None, + "minNotional": tier["minNotional"], + "maxNotional": tier["maxNotional"], + "maintenanceMarginRate": tier["maintenanceMarginRate"], + "maxLeverage": tier["maxLeverage"], + "maintAmt": float(info["cum"]) if "cum" in info else None, } def get_max_leverage(self, pair: str, stake_amount: Optional[float]) -> float: @@ -2679,11 +2926,10 @@ class Exchange: return 1.0 if self.trading_mode == TradingMode.FUTURES: - # Checks and edge cases if stake_amount is None: raise OperationalException( - f'{self.name}.get_max_leverage requires argument stake_amount' + f"{self.name}.get_max_leverage requires argument stake_amount" ) if pair not in self._leverage_tiers: @@ -2693,18 +2939,17 @@ class Exchange: pair_tiers = self._leverage_tiers[pair] if stake_amount == 0: - return self._leverage_tiers[pair][0]['maxLeverage'] # Max lev for lowest amount + return self._leverage_tiers[pair][0]["maxLeverage"] # Max lev for lowest amount for tier_index in range(len(pair_tiers)): - tier = pair_tiers[tier_index] - lev = tier['maxLeverage'] + lev = tier["maxLeverage"] if tier_index < len(pair_tiers) - 1: next_tier = pair_tiers[tier_index + 1] - next_floor = next_tier['minNotional'] / next_tier['maxLeverage'] + next_floor = next_tier["minNotional"] / next_tier["maxLeverage"] if next_floor > stake_amount: # Next tier min too high for stake amount - return min((tier['maxNotional'] / stake_amount), lev) + return min((tier["maxNotional"] / stake_amount), lev) # # With the two leverage tiers below, # - a stake amount of 150 would mean a max leverage of (10000 / 150) = 66.66 @@ -2725,20 +2970,20 @@ class Exchange: # else: # if on the last tier - if stake_amount > tier['maxNotional']: + if stake_amount > tier["maxNotional"]: # If stake is > than max tradeable amount - raise InvalidOrderException(f'Amount {stake_amount} too high for {pair}') + raise InvalidOrderException(f"Amount {stake_amount} too high for {pair}") else: - return tier['maxLeverage'] + return tier["maxLeverage"] raise OperationalException( - 'Looped through all tiers without finding a max leverage. Should never be reached' + "Looped through all tiers without finding a max leverage. Should never be reached" ) elif self.trading_mode == TradingMode.MARGIN: # Search markets.limits for max lev market = self.markets[pair] - if market['limits']['leverage']['max'] is not None: - return market['limits']['leverage']['max'] + if market["limits"]["leverage"]["max"] is not None: + return market["limits"]["leverage"]["max"] else: return 1.0 # Default if max leverage cannot be found else: @@ -2755,24 +3000,26 @@ class Exchange: Set's the leverage before making a trade, in order to not have the same leverage on every trade """ - if self._config['dry_run'] or not self.exchange_has("setLeverage"): + if self._config["dry_run"] or not self.exchange_has("setLeverage"): # Some exchanges only support one margin_mode type return - if self._ft_has.get('floor_leverage', False) is True: + if self._ft_has.get("floor_leverage", False) is True: # Rounding for binance ... leverage = floor(leverage) try: res = self._api.set_leverage(symbol=pair, leverage=leverage) - self._log_exchange_response('set_leverage', res) + self._log_exchange_response("set_leverage", res) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.BadRequest, ccxt.OperationRejected, ccxt.InsufficientFunds) as e: if not accept_fail: raise TemporaryError( - f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e + f"Could not set leverage due to {e.__class__.__name__}. Message: {e}" + ) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not set leverage due to {e.__class__.__name__}. Message: {e}') from e + f"Could not set leverage due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -2793,13 +3040,18 @@ class Exchange: return open_date.minute == 0 and open_date.second == 0 @retrier - def set_margin_mode(self, pair: str, margin_mode: MarginMode, accept_fail: bool = False, - params: Optional[Dict] = None): + def set_margin_mode( + self, + pair: str, + margin_mode: MarginMode, + accept_fail: bool = False, + params: Optional[Dict] = None, + ): """ Set's the margin mode on the exchange to cross or isolated for a specific pair :param pair: base/quote currency pair (e.g. "ADA/USDT") """ - if self._config['dry_run'] or not self.exchange_has("setMarginMode"): + if self._config["dry_run"] or not self.exchange_has("setMarginMode"): # Some exchanges only support one margin_mode type return @@ -2807,16 +3059,18 @@ class Exchange: params = {} try: res = self._api.set_margin_mode(margin_mode.value, pair, params) - self._log_exchange_response('set_margin_mode', res) + self._log_exchange_response("set_margin_mode", res) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.BadRequest, ccxt.OperationRejected) as e: if not accept_fail: raise TemporaryError( - f'Could not set margin mode due to {e.__class__.__name__}. Message: {e}') from e + f"Could not set margin mode due to {e.__class__.__name__}. Message: {e}" + ) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not set margin mode due to {e.__class__.__name__}. Message: {e}') from e + f"Could not set margin mode due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -2826,7 +3080,7 @@ class Exchange: amount: float, is_short: bool, open_date: datetime, - close_date: Optional[datetime] = None + close_date: Optional[datetime] = None, ) -> float: """ Fetches and calculates the sum of all funding fees that occurred for a pair @@ -2842,9 +3096,9 @@ class Exchange: if self.funding_fee_cutoff(open_date): # Shift back to 1h candle to avoid missing funding fees # Only really relevant for trades very close to the full hour - open_date = timeframe_to_prev_date('1h', open_date) - timeframe = self._ft_has['mark_ohlcv_timeframe'] - timeframe_ff = self._ft_has['funding_fee_timeframe'] + open_date = timeframe_to_prev_date("1h", open_date) + timeframe = self._ft_has["mark_ohlcv_timeframe"] + timeframe_ff = self._ft_has["funding_fee_timeframe"] mark_price_type = CandleType.from_string(self._ft_has["mark_ohlcv_price"]) if not close_date: @@ -2874,12 +3128,13 @@ class Exchange: amount=amount, is_short=is_short, open_date=open_date, - close_date=close_date + close_date=close_date, ) @staticmethod - def combine_funding_and_mark(funding_rates: DataFrame, mark_rates: DataFrame, - futures_funding_rate: Optional[int] = None) -> DataFrame: + def combine_funding_and_mark( + funding_rates: DataFrame, mark_rates: DataFrame, futures_funding_rate: Optional[int] = None + ) -> DataFrame: """ Combine funding-rates and mark-rates dataframes :param funding_rates: Dataframe containing Funding rates (Type FUNDING_RATE) @@ -2888,24 +3143,28 @@ class Exchange: """ if futures_funding_rate is None: return mark_rates.merge( - funding_rates, on='date', how="inner", suffixes=["_mark", "_fund"]) + funding_rates, on="date", how="inner", suffixes=["_mark", "_fund"] + ) else: if len(funding_rates) == 0: # No funding rate candles - full fillup with fallback variable - mark_rates['open_fund'] = futures_funding_rate + mark_rates["open_fund"] = futures_funding_rate return mark_rates.rename( - columns={'open': 'open_mark', - 'close': 'close_mark', - 'high': 'high_mark', - 'low': 'low_mark', - 'volume': 'volume_mark'}) + columns={ + "open": "open_mark", + "close": "close_mark", + "high": "high_mark", + "low": "low_mark", + "volume": "volume_mark", + } + ) else: # Fill up missing funding_rate candles with fallback value combined = mark_rates.merge( - funding_rates, on='date', how="left", suffixes=["_mark", "_fund"] - ) - combined['open_fund'] = combined['open_fund'].fillna(futures_funding_rate) + funding_rates, on="date", how="left", suffixes=["_mark", "_fund"] + ) + combined["open_fund"] = combined["open_fund"].fillna(futures_funding_rate) return combined def calculate_funding_fees( @@ -2915,7 +3174,7 @@ class Exchange: is_short: bool, open_date: datetime, close_date: datetime, - time_in_ratio: Optional[float] = None + time_in_ratio: Optional[float] = None, ) -> float: """ calculates the sum of all funding fees that occurred for a pair during a futures trade @@ -2930,15 +3189,16 @@ class Exchange: fees: float = 0 if not df.empty: - df1 = df[(df['date'] >= open_date) & (df['date'] <= close_date)] - fees = sum(df1['open_fund'] * df1['open_mark'] * amount) + df1 = df[(df["date"] >= open_date) & (df["date"] <= close_date)] + fees = sum(df1["open_fund"] * df1["open_mark"] * amount) if isnan(fees): fees = 0.0 # Negate fees for longs as funding_fees expects it this way based on live endpoints. return fees if is_short else -fees def get_funding_fees( - self, pair: str, amount: float, is_short: bool, open_date: datetime) -> float: + self, pair: str, amount: float, is_short: bool, open_date: datetime + ) -> float: """ Fetch funding fees, either from the exchange (live) or calculates them based on funding rate/mark price history @@ -2950,9 +3210,10 @@ class Exchange: """ if self.trading_mode == TradingMode.FUTURES: try: - if self._config['dry_run']: + if self._config["dry_run"]: funding_fees = self._fetch_and_calculate_funding_fees( - pair, amount, is_short, open_date) + pair, amount, is_short, open_date + ) else: funding_fees = self._get_funding_fees_from_exchange(pair, open_date) return funding_fees @@ -2965,7 +3226,7 @@ class Exchange: self, pair: str, # Dry-run - open_rate: float, # Entry price of position + open_rate: float, # Entry price of position is_short: bool, amount: float, # Absolute value of position size stake_amount: float, @@ -2979,13 +3240,13 @@ class Exchange: """ if self.trading_mode == TradingMode.SPOT: return None - elif (self.trading_mode != TradingMode.FUTURES): + elif self.trading_mode != TradingMode.FUTURES: raise OperationalException( - f"{self.name} does not support {self.margin_mode} {self.trading_mode}") + f"{self.name} does not support {self.margin_mode} {self.trading_mode}" + ) liquidation_price = None - if self._config['dry_run'] or not self.exchange_has("fetchPositions"): - + if self._config["dry_run"] or not self.exchange_has("fetchPositions"): liquidation_price = self.dry_run_liquidation_price( pair=pair, open_rate=open_rate, @@ -2995,20 +3256,18 @@ class Exchange: stake_amount=stake_amount, wallet_balance=wallet_balance, mm_ex_1=mm_ex_1, - upnl_ex_1=upnl_ex_1 + upnl_ex_1=upnl_ex_1, ) else: positions = self.fetch_positions(pair) if len(positions) > 0: pos = positions[0] - liquidation_price = pos['liquidationPrice'] + liquidation_price = pos["liquidationPrice"] if liquidation_price is not None: buffer_amount = abs(open_rate - liquidation_price) * self.liquidation_buffer liquidation_price_buffer = ( - liquidation_price - buffer_amount - if is_short else - liquidation_price + buffer_amount + liquidation_price - buffer_amount if is_short else liquidation_price + buffer_amount ) return max(liquidation_price_buffer, 0.0) else: @@ -3017,7 +3276,7 @@ class Exchange: def dry_run_liquidation_price( self, pair: str, - open_rate: float, # Entry price of position + open_rate: float, # Entry price of position is_short: bool, amount: float, stake_amount: float, @@ -3056,25 +3315,24 @@ class Exchange: """ market = self.markets[pair] - taker_fee_rate = market['taker'] + taker_fee_rate = market["taker"] mm_ratio, _ = self.get_maintenance_ratio_and_amt(pair, stake_amount) if self.trading_mode == TradingMode.FUTURES and self.margin_mode == MarginMode.ISOLATED: - - if market['inverse']: - raise OperationalException( - "Freqtrade does not yet support inverse contracts") + if market["inverse"]: + raise OperationalException("Freqtrade does not yet support inverse contracts") value = wallet_balance / amount - mm_ratio_taker = (mm_ratio + taker_fee_rate) + mm_ratio_taker = mm_ratio + taker_fee_rate if is_short: return (open_rate + value) / (1 + mm_ratio_taker) else: return (open_rate - value) / (1 - mm_ratio_taker) else: raise OperationalException( - "Freqtrade only supports isolated futures for leverage trading") + "Freqtrade only supports isolated futures for leverage trading" + ) def get_maintenance_ratio_and_amt( self, @@ -3089,10 +3347,11 @@ class Exchange: :return: (maintenance margin ratio, maintenance amount) """ - if (self._config.get('runmode') in OPTIMIZE_MODES - or self.exchange_has('fetchLeverageTiers') - or self.exchange_has('fetchMarketLeverageTiers')): - + if ( + self._config.get("runmode") in OPTIMIZE_MODES + or self.exchange_has("fetchLeverageTiers") + or self.exchange_has("fetchMarketLeverageTiers") + ): if pair not in self._leverage_tiers: raise InvalidOrderException( f"Maintenance margin rate for {pair} is unavailable for {self.name}" @@ -3101,8 +3360,8 @@ class Exchange: pair_tiers = self._leverage_tiers[pair] for tier in reversed(pair_tiers): - if nominal_value >= tier['minNotional']: - return (tier['maintenanceMarginRate'], tier['maintAmt']) + if nominal_value >= tier["minNotional"]: + return (tier["maintenanceMarginRate"], tier["maintAmt"]) raise ExchangeError("nominal value can not be lower than 0") # The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it diff --git a/freqtrade/exchange/exchange_utils.py b/freqtrade/exchange/exchange_utils.py index 73f61f256..dcae1ab3b 100644 --- a/freqtrade/exchange/exchange_utils.py +++ b/freqtrade/exchange/exchange_utils.py @@ -1,16 +1,29 @@ """ Exchange support utils """ + from datetime import datetime, timedelta, timezone from math import ceil, floor from typing import Any, Dict, List, Optional, Tuple import ccxt -from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE, - TRUNCATE, decimal_to_precision) +from ccxt import ( + DECIMAL_PLACES, + ROUND, + ROUND_DOWN, + ROUND_UP, + SIGNIFICANT_DIGITS, + TICK_SIZE, + TRUNCATE, + decimal_to_precision, +) -from freqtrade.exchange.common import (BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED, - SUPPORTED_EXCHANGES) +from freqtrade.exchange.common import ( + BAD_EXCHANGES, + EXCHANGE_HAS_OPTIONAL, + EXCHANGE_HAS_REQUIRED, + SUPPORTED_EXCHANGES, +) from freqtrade.exchange.exchange_utils_timeframe import timeframe_to_minutes, timeframe_to_prev_date from freqtrade.types import ValidExchangesType from freqtrade.util import FtPrecise @@ -20,7 +33,8 @@ CcxtModuleType = Any def is_exchange_known_ccxt( - exchange_name: str, ccxt_module: Optional[CcxtModuleType] = None) -> bool: + exchange_name: str, ccxt_module: Optional[CcxtModuleType] = None +) -> bool: return exchange_name in ccxt_exchanges(ccxt_module) @@ -46,13 +60,13 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]: """ ex_mod = getattr(ccxt, exchange.lower())() result = True - reason = '' + reason = "" if not ex_mod or not ex_mod.has: - return False, '' + return False, "" missing = [ - k for k, v in EXCHANGE_HAS_REQUIRED.items() - if ex_mod.has.get(k) is not True - and not (all(ex_mod.has.get(x) for x in v)) + k + for k, v in EXCHANGE_HAS_REQUIRED.items() + if ex_mod.has.get(k) is not True and not (all(ex_mod.has.get(x) for x in v)) ] if missing: result = False @@ -62,7 +76,7 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]: if exchange.lower() in BAD_EXCHANGES: result = False - reason = BAD_EXCHANGES.get(exchange.lower(), '') + reason = BAD_EXCHANGES.get(exchange.lower(), "") if missing_opt: reason += f"{'. ' if reason else ''}missing opt: {', '.join(missing_opt)}. " @@ -71,23 +85,26 @@ def validate_exchange(exchange: str) -> Tuple[bool, str]: def _build_exchange_list_entry( - exchange_name: str, exchangeClasses: Dict[str, Any]) -> ValidExchangesType: + exchange_name: str, exchangeClasses: Dict[str, Any] +) -> ValidExchangesType: valid, comment = validate_exchange(exchange_name) result: ValidExchangesType = { - 'name': exchange_name, - 'valid': valid, - 'supported': exchange_name.lower() in SUPPORTED_EXCHANGES, - 'comment': comment, - 'trade_modes': [{'trading_mode': 'spot', 'margin_mode': ''}], + "name": exchange_name, + "valid": valid, + "supported": exchange_name.lower() in SUPPORTED_EXCHANGES, + "comment": comment, + "trade_modes": [{"trading_mode": "spot", "margin_mode": ""}], } if resolved := exchangeClasses.get(exchange_name.lower()): - supported_modes = [{'trading_mode': 'spot', 'margin_mode': ''}] + [ - {'trading_mode': tm.value, 'margin_mode': mm.value} - for tm, mm in resolved['class']._supported_trading_mode_margin_pairs + supported_modes = [{"trading_mode": "spot", "margin_mode": ""}] + [ + {"trading_mode": tm.value, "margin_mode": mm.value} + for tm, mm in resolved["class"]._supported_trading_mode_margin_pairs ] - result.update({ - 'trade_modes': supported_modes, - }) + result.update( + { + "trade_modes": supported_modes, + } + ) return result @@ -99,7 +116,7 @@ def list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]: exchanges = ccxt_exchanges() if all_exchanges else available_exchanges() from freqtrade.resolvers.exchange_resolver import ExchangeResolver - subclassed = {e['name'].lower(): e for e in ExchangeResolver.search_all_objects({}, False)} + subclassed = {e["name"].lower(): e for e in ExchangeResolver.search_all_objects({}, False)} exchanges_valid: List[ValidExchangesType] = [ _build_exchange_list_entry(e, subclassed) for e in exchanges @@ -109,7 +126,8 @@ def list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]: def date_minus_candles( - timeframe: str, candle_count: int, date: Optional[datetime] = None) -> datetime: + timeframe: str, candle_count: int, date: Optional[datetime] = None +) -> datetime: """ subtract X candles from a date. :param timeframe: timeframe in string format (e.g. "5m") @@ -133,7 +151,7 @@ def market_is_active(market: Dict) -> bool: # true then it's true. If it's undefined, then it's most likely true, but not 100% )" # See https://github.com/ccxt/ccxt/issues/4874, # https://github.com/ccxt/ccxt/issues/4075#issuecomment-434760520 - return market.get('active', True) is not False + return market.get("active", True) is not False def amount_to_contracts(amount: float, contract_size: Optional[float]) -> float: @@ -163,8 +181,9 @@ def contracts_to_amount(num_contracts: float, contract_size: Optional[float]) -> return num_contracts -def amount_to_precision(amount: float, amount_precision: Optional[float], - precisionMode: Optional[int]) -> float: +def amount_to_precision( + amount: float, amount_precision: Optional[float], precisionMode: Optional[int] +) -> float: """ Returns the amount to buy or sell to a precision the Exchange accepts Re-implementation of ccxt internal methods - ensuring we can test the result is correct @@ -179,17 +198,24 @@ def amount_to_precision(amount: float, amount_precision: Optional[float], if amount_precision is not None and precisionMode is not None: precision = int(amount_precision) if precisionMode != TICK_SIZE else amount_precision # precision must be an int for non-ticksize inputs. - amount = float(decimal_to_precision(amount, rounding_mode=TRUNCATE, - precision=precision, - counting_mode=precisionMode, - )) + amount = float( + decimal_to_precision( + amount, + rounding_mode=TRUNCATE, + precision=precision, + counting_mode=precisionMode, + ) + ) return amount def amount_to_contract_precision( - amount, amount_precision: Optional[float], precisionMode: Optional[int], - contract_size: Optional[float]) -> float: + amount, + amount_precision: Optional[float], + precisionMode: Optional[int], + contract_size: Optional[float], +) -> float: """ Returns the amount to buy or sell to a precision the Exchange accepts including calculation to and from contracts. @@ -222,23 +248,25 @@ def __price_to_precision_significant_digits( from decimal import ROUND_DOWN as dec_ROUND_DOWN from decimal import ROUND_UP as dec_ROUND_UP from decimal import Decimal + dec = Decimal(str(price)) - string = f'{dec:f}' + string = f"{dec:f}" precision = round(price_precision) q = precision - dec.adjusted() - 1 - sigfig = Decimal('10') ** -q + sigfig = Decimal("10") ** -q if q < 0: string_to_precision = string[:precision] # string_to_precision is '' when we have zero precision - below = sigfig * Decimal(string_to_precision if string_to_precision else '0') + below = sigfig * Decimal(string_to_precision if string_to_precision else "0") above = below + sigfig res = above if rounding_mode == ROUND_UP else below - precise = f'{res:f}' + precise = f"{res:f}" else: - precise = '{:f}'.format(dec.quantize( - sigfig, - rounding=dec_ROUND_DOWN if rounding_mode == ROUND_DOWN else dec_ROUND_UP) + precise = "{:f}".format( + dec.quantize( + sigfig, rounding=dec_ROUND_DOWN if rounding_mode == ROUND_DOWN else dec_ROUND_UP + ) ) return float(precise) @@ -268,10 +296,14 @@ def price_to_precision( if price_precision is not None and precisionMode is not None: if rounding_mode not in (ROUND_UP, ROUND_DOWN): # Use CCXT code where possible. - return float(decimal_to_precision(price, rounding_mode=rounding_mode, - precision=price_precision, - counting_mode=precisionMode - )) + return float( + decimal_to_precision( + price, + rounding_mode=rounding_mode, + precision=price_precision, + counting_mode=precisionMode, + ) + ) if precisionMode == TICK_SIZE: precision = FtPrecise(price_precision) @@ -285,7 +317,6 @@ def price_to_precision( return round(float(str(res)), 14) return price elif precisionMode == DECIMAL_PLACES: - ndigits = round(price_precision) ticks = price * (10**ndigits) if rounding_mode == ROUND_UP: diff --git a/freqtrade/exchange/exchange_utils_timeframe.py b/freqtrade/exchange/exchange_utils_timeframe.py index 9366bc7a1..67cf1b5d6 100644 --- a/freqtrade/exchange/exchange_utils_timeframe.py +++ b/freqtrade/exchange/exchange_utils_timeframe.py @@ -36,16 +36,16 @@ def timeframe_to_resample_freq(timeframe: str) -> str: form ('1m', '5m', '1h', '1d', '1w', etc.) to the resample frequency used by pandas ('1T', '5T', '1H', '1D', '1W', etc.) """ - if timeframe == '1y': - return '1YS' + if timeframe == "1y": + return "1YS" timeframe_seconds = timeframe_to_seconds(timeframe) timeframe_minutes = timeframe_seconds // 60 - resample_interval = f'{timeframe_seconds}s' + resample_interval = f"{timeframe_seconds}s" if 10000 < timeframe_minutes < 43200: - resample_interval = '1W-MON' + resample_interval = "1W-MON" elif timeframe_minutes >= 43200 and timeframe_minutes < 525600: # Monthly candles need special treatment to stick to the 1st of the month - resample_interval = f'{timeframe}S' + resample_interval = f"{timeframe}S" elif timeframe_minutes > 43200: resample_interval = timeframe return resample_interval @@ -62,8 +62,7 @@ def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> d if not date: date = datetime.now(timezone.utc) - new_timestamp = ccxt.Exchange.round_timeframe( - timeframe, dt_ts(date), ROUND_DOWN) // 1000 + new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_DOWN) // 1000 return dt_from_ts(new_timestamp) @@ -76,6 +75,5 @@ def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> d """ if not date: date = datetime.now(timezone.utc) - new_timestamp = ccxt.Exchange.round_timeframe( - timeframe, dt_ts(date), ROUND_UP) // 1000 + new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_UP) // 1000 return dt_from_ts(new_timestamp) diff --git a/freqtrade/exchange/gate.py b/freqtrade/exchange/gate.py index 1d25e2df3..2408e306e 100644 --- a/freqtrade/exchange/gate.py +++ b/freqtrade/exchange/gate.py @@ -1,4 +1,5 @@ -""" Gate.io exchange subclass """ +"""Gate.io exchange subclass""" + import logging from datetime import datetime from typing import Any, Dict, List, Optional, Tuple @@ -24,7 +25,7 @@ class Gate(Exchange): _ft_has: Dict = { "ohlcv_candle_limit": 1000, - "order_time_in_force": ['GTC', 'IOC'], + "order_time_in_force": ["GTC", "IOC"], "stoploss_on_exchange": True, "stoploss_order_types": {"limit": "limit"}, "stop_price_param": "stopPrice", @@ -51,13 +52,13 @@ class Gate(Exchange): ] def _get_params( - self, - side: BuySell, - ordertype: str, - leverage: float, - reduceOnly: bool, - time_in_force: str = 'GTC', - ) -> Dict: + self, + side: BuySell, + ordertype: str, + leverage: float, + reduceOnly: bool, + time_in_force: str = "GTC", + ) -> Dict: params = super()._get_params( side=side, ordertype=ordertype, @@ -65,13 +66,14 @@ class Gate(Exchange): reduceOnly=reduceOnly, time_in_force=time_in_force, ) - if ordertype == 'market' and self.trading_mode == TradingMode.FUTURES: - params['type'] = 'market' - params.update({'timeInForce': 'IOC'}) + if ordertype == "market" and self.trading_mode == TradingMode.FUTURES: + params["type"] = "market" + params.update({"timeInForce": "IOC"}) return params - def get_trades_for_order(self, order_id: str, pair: str, since: datetime, - params: Optional[Dict] = None) -> List: + def get_trades_for_order( + self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None + ) -> List: trades = super().get_trades_for_order(order_id, pair, since, params) if self.trading_mode == TradingMode.FUTURES: @@ -84,45 +86,38 @@ class Gate(Exchange): pair_fees = self._trading_fees.get(pair, {}) if pair_fees: for idx, trade in enumerate(trades): - fee = trade.get('fee', {}) - if fee and fee.get('cost') is None: - takerOrMaker = trade.get('takerOrMaker', 'taker') + fee = trade.get("fee", {}) + if fee and fee.get("cost") is None: + takerOrMaker = trade.get("takerOrMaker", "taker") if pair_fees.get(takerOrMaker) is not None: - trades[idx]['fee'] = { - 'currency': self.get_pair_quote_currency(pair), - 'cost': trade['cost'] * pair_fees[takerOrMaker], - 'rate': pair_fees[takerOrMaker], + trades[idx]["fee"] = { + "currency": self.get_pair_quote_currency(pair), + "cost": trade["cost"] * pair_fees[takerOrMaker], + "rate": pair_fees[takerOrMaker], } return trades def get_order_id_conditional(self, order: Dict[str, Any]) -> str: - return safe_value_fallback2(order, order, 'id_stop', 'id') + return safe_value_fallback2(order, order, "id_stop", "id") def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - order = self.fetch_order( - order_id=order_id, - pair=pair, - params={'stop': True} - ) - if order.get('status', 'open') == 'closed': + order = self.fetch_order(order_id=order_id, pair=pair, params={"stop": True}) + if order.get("status", "open") == "closed": # Places a real order - which we need to fetch explicitly. - val = 'trade_id' if self.trading_mode == TradingMode.FUTURES else 'fired_order_id' + val = "trade_id" if self.trading_mode == TradingMode.FUTURES else "fired_order_id" - if new_orderid := order.get('info', {}).get(val): + if new_orderid := order.get("info", {}).get(val): order1 = self.fetch_order(order_id=new_orderid, pair=pair, params=params) - order1['id_stop'] = order1['id'] - order1['id'] = order_id - order1['type'] = 'stoploss' - order1['stopPrice'] = order.get('stopPrice') - order1['status_stop'] = 'triggered' + order1["id_stop"] = order1["id"] + order1["id"] = order_id + order1["type"] = "stoploss" + order1["stopPrice"] = order.get("stopPrice") + order1["status_stop"] = "triggered" return order1 return order def cancel_stoploss_order( - self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - return self.cancel_order( - order_id=order_id, - pair=pair, - params={'stop': True} - ) + self, order_id: str, pair: str, params: Optional[Dict] = None + ) -> Dict: + return self.cancel_order(order_id=order_id, pair=pair, params={"stop": True}) diff --git a/freqtrade/exchange/htx.py b/freqtrade/exchange/htx.py index 2e9aff77b..f939534e9 100644 --- a/freqtrade/exchange/htx.py +++ b/freqtrade/exchange/htx.py @@ -1,4 +1,5 @@ -""" HTX exchange subclass """ +"""HTX exchange subclass""" + import logging from typing import Dict @@ -23,13 +24,18 @@ class Htx(Exchange): "ohlcv_candle_limit": 1000, "l2_limit_range": [5, 10, 20], "l2_limit_range_required": False, + "ohlcv_candle_limit_per_timeframe": { + "1w": 500, + "1M": 500, + }, } def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: - params = self._params.copy() - params.update({ - "stopPrice": stop_price, - "operator": "lte", - }) + params.update( + { + "stopPrice": stop_price, + "operator": "lte", + } + ) return params diff --git a/freqtrade/exchange/idex.py b/freqtrade/exchange/idex.py index eae5ad155..b3bf12110 100644 --- a/freqtrade/exchange/idex.py +++ b/freqtrade/exchange/idex.py @@ -1,4 +1,5 @@ -""" Idex exchange subclass """ +"""Idex exchange subclass""" + import logging from typing import Dict diff --git a/freqtrade/exchange/kraken.py b/freqtrade/exchange/kraken.py index f30d79cba..4fbbe113c 100644 --- a/freqtrade/exchange/kraken.py +++ b/freqtrade/exchange/kraken.py @@ -1,4 +1,5 @@ -""" Kraken exchange subclass """ +"""Kraken exchange subclass""" + import logging from datetime import datetime from typing import Any, Dict, List, Optional, Tuple @@ -18,7 +19,6 @@ logger = logging.getLogger(__name__) class Kraken(Exchange): - _params: Dict = {"trading_agreement": "agree"} _ft_has: Dict = { "stoploss_on_exchange": True, @@ -47,18 +47,17 @@ class Kraken(Exchange): """ parent_check = super().market_is_tradable(market) - return (parent_check and - market.get('darkpool', False) is False) + return parent_check and market.get("darkpool", False) is False def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Tickers: # Only fetch tickers for current stake currency # Otherwise the request for kraken becomes too large. - symbols = list(self.get_markets(quote_currencies=[self._config['stake_currency']])) + symbols = list(self.get_markets(quote_currencies=[self._config["stake_currency"]])) return super().get_tickers(symbols=symbols, cached=cached) @retrier def get_balances(self) -> dict: - if self._config['dry_run']: + if self._config["dry_run"]: return {} try: @@ -70,23 +69,28 @@ class Kraken(Exchange): balances.pop("used", None) orders = self._api.fetch_open_orders() - order_list = [(x["symbol"].split("/")[0 if x["side"] == "sell" else 1], - x["remaining"] if x["side"] == "sell" else x["remaining"] * x["price"], - # Don't remove the below comment, this can be important for debugging - # x["side"], x["amount"], - ) for x in orders] + order_list = [ + ( + x["symbol"].split("/")[0 if x["side"] == "sell" else 1], + x["remaining"] if x["side"] == "sell" else x["remaining"] * x["price"], + # Don't remove the below comment, this can be important for debugging + # x["side"], x["amount"], + ) + for x in orders + ] for bal in balances: if not isinstance(balances[bal], dict): continue - balances[bal]['used'] = sum(order[1] for order in order_list if order[0] == bal) - balances[bal]['free'] = balances[bal]['total'] - balances[bal]['used'] + balances[bal]["used"] = sum(order[1] for order in order_list if order[0] == bal) + balances[bal]["free"] = balances[bal]["total"] - balances[bal]["used"] return balances except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e + f"Could not get balance due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e @@ -108,7 +112,7 @@ class Kraken(Exchange): ordertype: str, leverage: float, reduceOnly: bool, - time_in_force: str = 'GTC' + time_in_force: str = "GTC", ) -> Dict: params = super()._get_params( side=side, @@ -118,10 +122,10 @@ class Kraken(Exchange): time_in_force=time_in_force, ) if leverage > 1.0: - params['leverage'] = round(leverage) - if time_in_force == 'PO': - params.pop('timeInForce', None) - params['postOnly'] = True + params["leverage"] = round(leverage) + if time_in_force == "PO": + params.pop("timeInForce", None) + params["postOnly"] = True return params def calculate_funding_fees( @@ -131,7 +135,7 @@ class Kraken(Exchange): is_short: bool, open_date: datetime, close_date: datetime, - time_in_ratio: Optional[float] = None + time_in_ratio: Optional[float] = None, ) -> float: """ # ! This method will always error when run by Freqtrade because time_in_ratio is never @@ -149,12 +153,13 @@ class Kraken(Exchange): """ if not time_in_ratio: raise OperationalException( - f"time_in_ratio is required for {self.name}._get_funding_fee") + f"time_in_ratio is required for {self.name}._get_funding_fee" + ) fees: float = 0 if not df.empty: - df = df[(df['date'] >= open_date) & (df['date'] <= close_date)] - fees = sum(df['open_fund'] * df['open_mark'] * amount * time_in_ratio) + df = df[(df["date"] >= open_date) & (df["date"] <= close_date)] + fees = sum(df["open_fund"] * df["open_mark"] * amount * time_in_ratio) return fees if is_short else -fees @@ -164,14 +169,11 @@ class Kraken(Exchange): Applies only to fetch_trade_history by id. """ if len(trades) > 0: - if ( - isinstance(trades[-1].get('info'), list) - and len(trades[-1].get('info', [])) > 7 - ): + if isinstance(trades[-1].get("info"), list) and len(trades[-1].get("info", [])) > 7: # Trade response's "last" value. - return trades[-1].get('info', [])[-1] + return trades[-1].get("info", [])[-1] # Fall back to timestamp if info is somehow empty. - return trades[-1].get('timestamp') + return trades[-1].get("timestamp") return None def _valid_trade_pagination_id(self, pair: str, from_id: str) -> bool: diff --git a/freqtrade/exchange/kucoin.py b/freqtrade/exchange/kucoin.py index 7033f89ad..343904276 100644 --- a/freqtrade/exchange/kucoin.py +++ b/freqtrade/exchange/kucoin.py @@ -1,4 +1,5 @@ """Kucoin exchange subclass.""" + import logging from typing import Dict @@ -26,32 +27,27 @@ class Kucoin(Exchange): "stoploss_order_types": {"limit": "limit", "market": "market"}, "l2_limit_range": [20, 100], "l2_limit_range_required": False, - "order_time_in_force": ['GTC', 'FOK', 'IOC'], + "order_time_in_force": ["GTC", "FOK", "IOC"], "ohlcv_candle_limit": 1500, } def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: - params = self._params.copy() - params.update({ - 'stopPrice': stop_price, - 'stop': 'loss' - }) + params.update({"stopPrice": stop_price, "stop": "loss"}) return params def create_order( - self, - *, - pair: str, - ordertype: str, - side: BuySell, - amount: float, - rate: float, - leverage: float, - reduceOnly: bool = False, - time_in_force: str = 'GTC', - ) -> Dict: - + self, + *, + pair: str, + ordertype: str, + side: BuySell, + amount: float, + rate: float, + leverage: float, + reduceOnly: bool = False, + time_in_force: str = "GTC", + ) -> Dict: res = super().create_order( pair=pair, ordertype=ordertype, @@ -66,7 +62,7 @@ class Kucoin(Exchange): # ccxt returns status = 'closed' at the moment - which is information ccxt invented. # Since we rely on status heavily, we must set it to 'open' here. # ref: https://github.com/ccxt/ccxt/pull/16674, (https://github.com/ccxt/ccxt/pull/16553) - if not self._config['dry_run']: - res['type'] = ordertype - res['status'] = 'open' + if not self._config["dry_run"]: + res["type"] = ordertype + res["status"] = "open" return res diff --git a/freqtrade/exchange/okx.py b/freqtrade/exchange/okx.py index d919a73cc..1704117e6 100644 --- a/freqtrade/exchange/okx.py +++ b/freqtrade/exchange/okx.py @@ -6,8 +6,12 @@ import ccxt from freqtrade.constants import BuySell from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode -from freqtrade.exceptions import (DDosProtection, OperationalException, RetryableOrderError, - TemporaryError) +from freqtrade.exceptions import ( + DDosProtection, + OperationalException, + RetryableOrderError, + TemporaryError, +) from freqtrade.exchange import Exchange, date_minus_candles from freqtrade.exchange.common import retrier from freqtrade.misc import safe_value_fallback2 @@ -37,7 +41,7 @@ class Okx(Exchange): PriceType.LAST: "last", PriceType.MARK: "index", PriceType.INDEX: "mark", - }, + }, } _supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [ @@ -49,10 +53,11 @@ class Okx(Exchange): net_only = True - _ccxt_params: Dict = {'options': {'brokerId': 'ffb5405ad327SUDE'}} + _ccxt_params: Dict = {"options": {"brokerId": "ffb5405ad327SUDE"}} def ohlcv_candle_limit( - self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: + self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None + ) -> int: """ Exchange ohlcv candle limit OKX has the following behaviour: @@ -64,9 +69,8 @@ class Okx(Exchange): :param since_ms: Starting timestamp :return: Candle limit as integer """ - if ( - candle_type in (CandleType.FUTURES, CandleType.SPOT) and - (not since_ms or since_ms > (date_minus_candles(timeframe, 300).timestamp() * 1000)) + if candle_type in (CandleType.FUTURES, CandleType.SPOT) and ( + not since_ms or since_ms > (date_minus_candles(timeframe, 300).timestamp() * 1000) ): return 300 @@ -80,29 +84,29 @@ class Okx(Exchange): Must be overridden in child methods if required. """ try: - if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']: + if self.trading_mode == TradingMode.FUTURES and not self._config["dry_run"]: accounts = self._api.fetch_accounts() - self._log_exchange_response('fetch_accounts', accounts) + self._log_exchange_response("fetch_accounts", accounts) if len(accounts) > 0: - self.net_only = accounts[0].get('info', {}).get('posMode') == 'net_mode' + self.net_only = accounts[0].get("info", {}).get("posMode") == "net_mode" except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.OperationFailed, ccxt.ExchangeError) as e: raise TemporaryError( - f'Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}' - ) from e + f"Error in additional_exchange_init due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e def _get_posSide(self, side: BuySell, reduceOnly: bool): if self.net_only: - return 'net' + return "net" if not reduceOnly: # Enter - return 'long' if side == 'buy' else 'short' + return "long" if side == "buy" else "short" else: # Exit - return 'long' if side == 'sell' else 'short' + return "long" if side == "sell" else "short" def _get_params( self, @@ -110,7 +114,7 @@ class Okx(Exchange): ordertype: str, leverage: float, reduceOnly: bool, - time_in_force: str = 'GTC', + time_in_force: str = "GTC", ) -> Dict: params = super()._get_params( side=side, @@ -120,18 +124,21 @@ class Okx(Exchange): time_in_force=time_in_force, ) if self.trading_mode == TradingMode.FUTURES and self.margin_mode: - params['tdMode'] = self.margin_mode.value - params['posSide'] = self._get_posSide(side, reduceOnly) + params["tdMode"] = self.margin_mode.value + params["posSide"] = self._get_posSide(side, reduceOnly) return params def __fetch_leverage_already_set(self, pair: str, leverage: float, side: BuySell) -> bool: try: - res_lev = self._api.fetch_leverage(symbol=pair, params={ + res_lev = self._api.fetch_leverage( + symbol=pair, + params={ "mgnMode": self.margin_mode.value, "posSide": self._get_posSide(side, False), - }) - self._log_exchange_response('get_leverage', res_lev) - already_set = all(float(x['lever']) == leverage for x in res_lev['data']) + }, + ) + self._log_exchange_response("get_leverage", res_lev) + already_set = all(float(x["lever"]) == leverage for x in res_lev["data"]) return already_set except ccxt.BaseError: @@ -148,8 +155,9 @@ class Okx(Exchange): params={ "mgnMode": self.margin_mode.value, "posSide": self._get_posSide(side, False), - }) - self._log_exchange_response('set_leverage', res) + }, + ) + self._log_exchange_response("set_leverage", res) except ccxt.DDoSProtection as e: raise DDosProtection(e) from e @@ -157,84 +165,81 @@ class Okx(Exchange): already_set = self.__fetch_leverage_already_set(pair, leverage, side) if not already_set: raise TemporaryError( - f'Could not set leverage due to {e.__class__.__name__}. Message: {e}' - ) from e + f"Could not set leverage due to {e.__class__.__name__}. Message: {e}" + ) from e except ccxt.BaseError as e: raise OperationalException(e) from e - def get_max_pair_stake_amount( - self, - pair: str, - price: float, - leverage: float = 1.0 - ) -> float: - + def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float: if self.trading_mode == TradingMode.SPOT: - return float('inf') # Not actually inf, but this probably won't matter for SPOT + return float("inf") # Not actually inf, but this probably won't matter for SPOT if pair not in self._leverage_tiers: - return float('inf') + return float("inf") pair_tiers = self._leverage_tiers[pair] - return pair_tiers[-1]['maxNotional'] / leverage + return pair_tiers[-1]["maxNotional"] / leverage def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: params = super()._get_stop_params(side, ordertype, stop_price) if self.trading_mode == TradingMode.FUTURES and self.margin_mode: - params['tdMode'] = self.margin_mode.value - params['posSide'] = self._get_posSide(side, True) + params["tdMode"] = self.margin_mode.value + params["posSide"] = self._get_posSide(side, True) return params def _convert_stop_order(self, pair: str, order_id: str, order: Dict) -> Dict: if ( - order.get('status', 'open') == 'closed' - and (real_order_id := order.get('info', {}).get('ordId')) is not None + order.get("status", "open") == "closed" + and (real_order_id := order.get("info", {}).get("ordId")) is not None ): # Once a order triggered, we fetch the regular followup order. order_reg = self.fetch_order(real_order_id, pair) - self._log_exchange_response('fetch_stoploss_order1', order_reg) - order_reg['id_stop'] = order_reg['id'] - order_reg['id'] = order_id - order_reg['type'] = 'stoploss' - order_reg['status_stop'] = 'triggered' + self._log_exchange_response("fetch_stoploss_order1", order_reg) + order_reg["id_stop"] = order_reg["id"] + order_reg["id"] = order_id + order_reg["type"] = "stoploss" + order_reg["status_stop"] = "triggered" return order_reg order = self._order_contracts_to_amount(order) - order['type'] = 'stoploss' + order["type"] = "stoploss" return order def fetch_stoploss_order(self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - if self._config['dry_run']: + if self._config["dry_run"]: return self.fetch_dry_run_order(order_id) try: - params1 = {'stop': True} + params1 = {"stop": True} order_reg = self._api.fetch_order(order_id, pair, params=params1) - self._log_exchange_response('fetch_stoploss_order', order_reg) + self._log_exchange_response("fetch_stoploss_order", order_reg) return self._convert_stop_order(pair, order_id, order_reg) except ccxt.OrderNotFound: pass - params2 = {'stop': True, 'ordType': 'conditional'} - for method in (self._api.fetch_open_orders, self._api.fetch_closed_orders, - self._api.fetch_canceled_orders): + params2 = {"stop": True, "ordType": "conditional"} + for method in ( + self._api.fetch_open_orders, + self._api.fetch_closed_orders, + self._api.fetch_canceled_orders, + ): try: orders = method(pair, params=params2) - orders_f = [order for order in orders if order['id'] == order_id] + orders_f = [order for order in orders if order["id"] == order_id] if orders_f: order = orders_f[0] return self._convert_stop_order(pair, order_id, order) except ccxt.BaseError: pass - raise RetryableOrderError( - f'StoplossOrder not found (pair: {pair} id: {order_id}).') + raise RetryableOrderError(f"StoplossOrder not found (pair: {pair} id: {order_id}).") def get_order_id_conditional(self, order: Dict[str, Any]) -> str: - if order.get('type', '') == 'stop': - return safe_value_fallback2(order, order, 'id_stop', 'id') - return order['id'] + if order.get("type", "") == "stop": + return safe_value_fallback2(order, order, "id_stop", "id") + return order["id"] def cancel_stoploss_order( - self, order_id: str, pair: str, params: Optional[Dict] = None) -> Dict: - params1 = {'stop': True} + self, order_id: str, pair: str, params: Optional[Dict] = None + ) -> Dict: + params1 = {"stop": True} # 'ordType': 'conditional' # return self.cancel_order( @@ -247,10 +252,10 @@ class Okx(Exchange): orders = [] orders = self._api.fetch_closed_orders(pair, since=since_ms) - if (since_ms < dt_ts(dt_now() - timedelta(days=6, hours=23))): + if since_ms < dt_ts(dt_now() - timedelta(days=6, hours=23)): # Regular fetch_closed_orders only returns 7 days of data. # Force usage of "archive" endpoint, which returns 3 months of data. - params = {'method': 'privateGetTradeOrdersHistoryArchive'} + params = {"method": "privateGetTradeOrdersHistoryArchive"} orders_hist = self._api.fetch_closed_orders(pair, since=since_ms, params=params) orders.extend(orders_hist) diff --git a/freqtrade/freqai/RL/Base3ActionRLEnv.py b/freqtrade/freqai/RL/Base3ActionRLEnv.py index 538ca3a6a..c0e7e750d 100644 --- a/freqtrade/freqai/RL/Base3ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base3ActionRLEnv.py @@ -19,6 +19,7 @@ class Base3ActionRLEnv(BaseEnvironment): """ Base class for a 3 action environment """ + def __init__(self, **kwargs): super().__init__(**kwargs) self.actions = Actions @@ -73,11 +74,18 @@ class Base3ActionRLEnv(BaseEnvironment): if trade_type is not None: self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type, 'profit': self.get_unrealized_profit()}) + { + "price": self.current_price(), + "index": self._current_tick, + "type": trade_type, + "profit": self.get_unrealized_profit(), + } + ) - if (self._total_profit < self.max_drawdown or - self._total_unrealized_profit < self.max_drawdown): + if ( + self._total_profit < self.max_drawdown + or self._total_unrealized_profit < self.max_drawdown + ): self._done = True self._position_history.append(self._position) @@ -89,7 +97,7 @@ class Base3ActionRLEnv(BaseEnvironment): total_profit=self._total_profit, position=self._position.value, trade_duration=self.get_trade_duration(), - current_profit_pct=self.get_unrealized_profit() + current_profit_pct=self.get_unrealized_profit(), ) observation = self._get_observation() @@ -109,10 +117,14 @@ class Base3ActionRLEnv(BaseEnvironment): return ( (action == Actions.Buy.value and self._position == Positions.Neutral) or (action == Actions.Sell.value and self._position == Positions.Long) - or (action == Actions.Sell.value and self._position == Positions.Neutral - and self.can_short) - or (action == Actions.Buy.value and self._position == Positions.Short - and self.can_short) + or ( + action == Actions.Sell.value + and self._position == Positions.Neutral + and self.can_short + ) + or ( + action == Actions.Buy.value and self._position == Positions.Short and self.can_short + ) ) def _is_valid(self, action: int) -> bool: diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 12f10d4fc..0d2d74cb7 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -20,6 +20,7 @@ class Base4ActionRLEnv(BaseEnvironment): """ Base class for a 4 action environment """ + def __init__(self, **kwargs): super().__init__(**kwargs) self.actions = Actions @@ -52,7 +53,6 @@ class Base4ActionRLEnv(BaseEnvironment): trade_type = None if self.is_tradesignal(action): - if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" @@ -75,11 +75,18 @@ class Base4ActionRLEnv(BaseEnvironment): if trade_type is not None: self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type, 'profit': self.get_unrealized_profit()}) + { + "price": self.current_price(), + "index": self._current_tick, + "type": trade_type, + "profit": self.get_unrealized_profit(), + } + ) - if (self._total_profit < self.max_drawdown or - self._total_unrealized_profit < self.max_drawdown): + if ( + self._total_profit < self.max_drawdown + or self._total_unrealized_profit < self.max_drawdown + ): self._done = True self._position_history.append(self._position) @@ -91,7 +98,7 @@ class Base4ActionRLEnv(BaseEnvironment): total_profit=self._total_profit, position=self._position.value, trade_duration=self.get_trade_duration(), - current_profit_pct=self.get_unrealized_profit() + current_profit_pct=self.get_unrealized_profit(), ) observation = self._get_observation() @@ -108,14 +115,16 @@ class Base4ActionRLEnv(BaseEnvironment): Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Short_enter.value and self._position == Positions.Long) or - (action == Actions.Exit.value and self._position == Positions.Neutral) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Long_enter.value and self._position == Positions.Short)) + return not ( + (action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Neutral.value and self._position == Positions.Short) + or (action == Actions.Neutral.value and self._position == Positions.Long) + or (action == Actions.Short_enter.value and self._position == Positions.Short) + or (action == Actions.Short_enter.value and self._position == Positions.Long) + or (action == Actions.Exit.value and self._position == Positions.Neutral) + or (action == Actions.Long_enter.value and self._position == Positions.Long) + or (action == Actions.Long_enter.value and self._position == Positions.Short) + ) def _is_valid(self, action: int) -> bool: """ diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 35d04f942..2ae7f6b5a 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -21,6 +21,7 @@ class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment """ + def __init__(self, **kwargs): super().__init__(**kwargs) self.actions = Actions @@ -53,7 +54,6 @@ class Base5ActionRLEnv(BaseEnvironment): trade_type = None if self.is_tradesignal(action): - if action == Actions.Neutral.value: self._position = Positions.Neutral trade_type = "neutral" @@ -81,11 +81,18 @@ class Base5ActionRLEnv(BaseEnvironment): if trade_type is not None: self.trade_history.append( - {'price': self.current_price(), 'index': self._current_tick, - 'type': trade_type, 'profit': self.get_unrealized_profit()}) + { + "price": self.current_price(), + "index": self._current_tick, + "type": trade_type, + "profit": self.get_unrealized_profit(), + } + ) - if (self._total_profit < self.max_drawdown or - self._total_unrealized_profit < self.max_drawdown): + if ( + self._total_profit < self.max_drawdown + or self._total_unrealized_profit < self.max_drawdown + ): self._done = True self._position_history.append(self._position) @@ -97,7 +104,7 @@ class Base5ActionRLEnv(BaseEnvironment): total_profit=self._total_profit, position=self._position.value, trade_duration=self.get_trade_duration(), - current_profit_pct=self.get_unrealized_profit() + current_profit_pct=self.get_unrealized_profit(), ) observation = self._get_observation() @@ -113,17 +120,19 @@ class Base5ActionRLEnv(BaseEnvironment): Determine if the signal is a trade signal e.g.: agent wants a Actions.Long_exit while it is in a Positions.short """ - return not ((action == Actions.Neutral.value and self._position == Positions.Neutral) or - (action == Actions.Neutral.value and self._position == Positions.Short) or - (action == Actions.Neutral.value and self._position == Positions.Long) or - (action == Actions.Short_enter.value and self._position == Positions.Short) or - (action == Actions.Short_enter.value and self._position == Positions.Long) or - (action == Actions.Short_exit.value and self._position == Positions.Long) or - (action == Actions.Short_exit.value and self._position == Positions.Neutral) or - (action == Actions.Long_enter.value and self._position == Positions.Long) or - (action == Actions.Long_enter.value and self._position == Positions.Short) or - (action == Actions.Long_exit.value and self._position == Positions.Short) or - (action == Actions.Long_exit.value and self._position == Positions.Neutral)) + return not ( + (action == Actions.Neutral.value and self._position == Positions.Neutral) + or (action == Actions.Neutral.value and self._position == Positions.Short) + or (action == Actions.Neutral.value and self._position == Positions.Long) + or (action == Actions.Short_enter.value and self._position == Positions.Short) + or (action == Actions.Short_enter.value and self._position == Positions.Long) + or (action == Actions.Short_exit.value and self._position == Positions.Long) + or (action == Actions.Short_exit.value and self._position == Positions.Neutral) + or (action == Actions.Long_enter.value and self._position == Positions.Long) + or (action == Actions.Long_enter.value and self._position == Positions.Short) + or (action == Actions.Long_exit.value and self._position == Positions.Short) + or (action == Actions.Long_exit.value and self._position == Positions.Neutral) + ) def _is_valid(self, action: int) -> bool: # trade signal diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index f53ab9d27..ba72c90ed 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -21,6 +21,7 @@ class BaseActions(Enum): """ Default action space, mostly used for type handling. """ + Neutral = 0 Long_enter = 1 Long_exit = 2 @@ -44,11 +45,22 @@ class BaseEnvironment(gym.Env): See RL/Base5ActionRLEnv.py and RL/Base4ActionRLEnv.py """ - def __init__(self, df: DataFrame = DataFrame(), prices: DataFrame = DataFrame(), - reward_kwargs: dict = {}, window_size=10, starting_point=True, - id: str = 'baseenv-1', seed: int = 1, config: dict = {}, live: bool = False, - fee: float = 0.0015, can_short: bool = False, pair: str = "", - df_raw: DataFrame = DataFrame()): + def __init__( + self, + df: DataFrame = DataFrame(), + prices: DataFrame = DataFrame(), + reward_kwargs: dict = {}, + window_size=10, + starting_point=True, + id: str = "baseenv-1", + seed: int = 1, + config: dict = {}, + live: bool = False, + fee: float = 0.0015, + can_short: bool = False, + pair: str = "", + df_raw: DataFrame = DataFrame(), + ): """ Initializes the training/eval environment. :param df: dataframe of features @@ -64,15 +76,15 @@ class BaseEnvironment(gym.Env): :param can_short: Whether or not the environment can short """ self.config: dict = config - self.rl_config: dict = config['freqai']['rl_config'] - self.add_state_info: bool = self.rl_config.get('add_state_info', False) + self.rl_config: dict = config["freqai"]["rl_config"] + self.add_state_info: bool = self.rl_config.get("add_state_info", False) self.id: str = id - self.max_drawdown: float = 1 - self.rl_config.get('max_training_drawdown_pct', 0.8) - self.compound_trades: bool = config['stake_amount'] == 'unlimited' + self.max_drawdown: float = 1 - self.rl_config.get("max_training_drawdown_pct", 0.8) + self.compound_trades: bool = config["stake_amount"] == "unlimited" self.pair: str = pair self.raw_features: DataFrame = df_raw - if self.config.get('fee', None) is not None: - self.fee = self.config['fee'] + if self.config.get("fee", None) is not None: + self.fee = self.config["fee"] else: self.fee = fee @@ -82,14 +94,22 @@ class BaseEnvironment(gym.Env): self.can_short: bool = can_short self.live: bool = live if not self.live and self.add_state_info: - raise OperationalException("`add_state_info` is not available in backtesting. Change " - "parameter to false in your rl_config. See `add_state_info` " - "docs for more info.") + raise OperationalException( + "`add_state_info` is not available in backtesting. Change " + "parameter to false in your rl_config. See `add_state_info` " + "docs for more info." + ) self.seed(seed) self.reset_env(df, prices, window_size, reward_kwargs, starting_point) - def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, - reward_kwargs: dict, starting_point=True): + def reset_env( + self, + df: DataFrame, + prices: DataFrame, + window_size: int, + reward_kwargs: dict, + starting_point=True, + ): """ Resets the environment when the agent fails (in our case, if the drawdown exceeds the user set max_training_drawdown_pct) @@ -113,8 +133,7 @@ class BaseEnvironment(gym.Env): self.total_features = self.signal_features.shape[1] self.shape = (window_size, self.total_features) self.set_action_space() - self.observation_space = spaces.Box( - low=-1, high=1, shape=self.shape, dtype=np.float32) + self.observation_space = spaces.Box(low=-1, high=1, shape=self.shape, dtype=np.float32) # episode self._start_tick: int = self.window_size @@ -151,8 +170,13 @@ class BaseEnvironment(gym.Env): self.np_random, seed = seeding.np_random(seed) return [seed] - def tensorboard_log(self, metric: str, value: Optional[Union[int, float]] = None, - inc: Optional[bool] = None, category: str = "custom"): + def tensorboard_log( + self, + metric: str, + value: Optional[Union[int, float]] = None, + inc: Optional[bool] = None, + category: str = "custom", + ): """ Function builds the tensorboard_metrics dictionary to be parsed by the TensorboardCallback. This @@ -195,7 +219,7 @@ class BaseEnvironment(gym.Env): self._done = False if self.starting_point is True: - if self.rl_config.get('randomize_starting_position', False): + if self.rl_config.get("randomize_starting_position", False): length_of_data = int(self._end_tick / 4) start_tick = random.randint(self.window_size + 1, length_of_data) self._start_tick = start_tick @@ -207,8 +231,8 @@ class BaseEnvironment(gym.Env): self._last_trade_tick = None self._position = Positions.Neutral - self.total_reward = 0. - self._total_profit = 1. # unit + self.total_reward = 0.0 + self._total_profit = 1.0 # unit self.history = {} self.trade_history = [] self.portfolio_log_returns = np.zeros(len(self.prices)) @@ -231,18 +255,19 @@ class BaseEnvironment(gym.Env): This may or may not be independent of action types, user can inherit this in their custom "MyRLEnv" """ - features_window = self.signal_features[( - self._current_tick - self.window_size):self._current_tick] + features_window = self.signal_features[ + (self._current_tick - self.window_size) : self._current_tick + ] if self.add_state_info: - features_and_state = DataFrame(np.zeros((len(features_window), 3)), - columns=['current_profit_pct', - 'position', - 'trade_duration'], - index=features_window.index) + features_and_state = DataFrame( + np.zeros((len(features_window), 3)), + columns=["current_profit_pct", "position", "trade_duration"], + index=features_window.index, + ) - features_and_state['current_profit_pct'] = self.get_unrealized_profit() - features_and_state['position'] = self._position.value - features_and_state['trade_duration'] = self.get_trade_duration() + features_and_state["current_profit_pct"] = self.get_unrealized_profit() + features_and_state["position"] = self._position.value + features_and_state["trade_duration"] = self.get_trade_duration() features_and_state = pd.concat([features_window, features_and_state], axis=1) return features_and_state else: @@ -262,10 +287,10 @@ class BaseEnvironment(gym.Env): Get the unrealized profit if the agent is in a trade """ if self._last_trade_tick is None: - return 0. + return 0.0 if self._position == Positions.Neutral: - return 0. + return 0.0 elif self._position == Positions.Short: current_price = self.add_entry_fee(self.prices.iloc[self._current_tick].open) last_trade_price = self.add_exit_fee(self.prices.iloc[self._last_trade_tick].open) @@ -275,7 +300,7 @@ class BaseEnvironment(gym.Env): last_trade_price = self.add_entry_fee(self.prices.iloc[self._last_trade_tick].open) return (current_price - last_trade_price) / last_trade_price else: - return 0. + return 0.0 @abstractmethod def is_tradesignal(self, action: int) -> bool: diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 71fd9c28c..225ed3d50 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -30,10 +30,10 @@ from freqtrade.persistence import Trade logger = logging.getLogger(__name__) -torch.multiprocessing.set_sharing_strategy('file_system') +torch.multiprocessing.set_sharing_strategy("file_system") -SB3_MODELS = ['PPO', 'A2C', 'DQN'] -SB3_CONTRIB_MODELS = ['TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'QRDQN'] +SB3_MODELS = ["PPO", "A2C", "DQN"] +SB3_CONTRIB_MODELS = ["TRPO", "ARS", "RecurrentPPO", "MaskablePPO", "QRDQN"] class BaseReinforcementLearningModel(IFreqaiModel): @@ -42,57 +42,60 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ def __init__(self, **kwargs) -> None: - super().__init__(config=kwargs['config']) - self.max_threads = min(self.freqai_info['rl_config'].get( - 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) + super().__init__(config=kwargs["config"]) + self.max_threads = min( + self.freqai_info["rl_config"].get("cpu_count", 1), + max(int(self.max_system_threads / 2), 1), + ) th.set_num_threads(self.max_threads) - self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] + self.reward_params = self.freqai_info["rl_config"]["model_reward_parameters"] self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env() self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env() self.eval_callback: Optional[MaskableEvalCallback] = None - self.model_type = self.freqai_info['rl_config']['model_type'] - self.rl_config = self.freqai_info['rl_config'] + self.model_type = self.freqai_info["rl_config"]["model_type"] + self.rl_config = self.freqai_info["rl_config"] self.df_raw: DataFrame = DataFrame() - self.continual_learning = self.freqai_info.get('continual_learning', False) + self.continual_learning = self.freqai_info.get("continual_learning", False) if self.model_type in SB3_MODELS: - import_str = 'stable_baselines3' + import_str = "stable_baselines3" elif self.model_type in SB3_CONTRIB_MODELS: - import_str = 'sb3_contrib' + import_str = "sb3_contrib" else: - raise OperationalException(f'{self.model_type} not available in stable_baselines3 or ' - f'sb3_contrib. please choose one of {SB3_MODELS} or ' - f'{SB3_CONTRIB_MODELS}') + raise OperationalException( + f"{self.model_type} not available in stable_baselines3 or " + f"sb3_contrib. please choose one of {SB3_MODELS} or " + f"{SB3_CONTRIB_MODELS}" + ) mod = importlib.import_module(import_str, self.model_type) self.MODELCLASS = getattr(mod, self.model_type) - self.policy_type = self.freqai_info['rl_config']['policy_type'] + self.policy_type = self.freqai_info["rl_config"]["policy_type"] self.unset_outlier_removal() - self.net_arch = self.rl_config.get('net_arch', [128, 128]) + self.net_arch = self.rl_config.get("net_arch", [128, 128]) self.dd.model_type = import_str - self.tensorboard_callback: TensorboardCallback = \ - TensorboardCallback(verbose=1, actions=BaseActions) + self.tensorboard_callback: TensorboardCallback = TensorboardCallback( + verbose=1, actions=BaseActions + ) def unset_outlier_removal(self): """ If user has activated any function that may remove training points, this function will set them to false and warn them """ - if self.ft_params.get('use_SVM_to_remove_outliers', False): - self.ft_params.update({'use_SVM_to_remove_outliers': False}) - logger.warning('User tried to use SVM with RL. Deactivating SVM.') - if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): - self.ft_params.update({'use_DBSCAN_to_remove_outliers': False}) - logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') - if self.ft_params.get('DI_threshold', False): - self.ft_params.update({'DI_threshold': False}) - logger.warning('User tried to use DI_threshold with RL. Deactivating DI_threshold.') - if self.freqai_info['data_split_parameters'].get('shuffle', False): - self.freqai_info['data_split_parameters'].update({'shuffle': False}) - logger.warning('User tried to shuffle training data. Setting shuffle to False') + if self.ft_params.get("use_SVM_to_remove_outliers", False): + self.ft_params.update({"use_SVM_to_remove_outliers": False}) + logger.warning("User tried to use SVM with RL. Deactivating SVM.") + if self.ft_params.get("use_DBSCAN_to_remove_outliers", False): + self.ft_params.update({"use_DBSCAN_to_remove_outliers": False}) + logger.warning("User tried to use DBSCAN with RL. Deactivating DBSCAN.") + if self.ft_params.get("DI_threshold", False): + self.ft_params.update({"DI_threshold": False}) + logger.warning("User tried to use DI_threshold with RL. Deactivating DI_threshold.") + if self.freqai_info["data_split_parameters"].get("shuffle", False): + self.freqai_info["data_split_parameters"].update({"shuffle": False}) + logger.warning("User tried to shuffle training data. Setting shuffle to False") - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: + def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. @@ -102,7 +105,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): :model: Trained model which can be used to inference (self.predict) """ - logger.info("--------------------Starting training " f"{pair} --------------------") + logger.info(f"--------------------Starting training {pair} --------------------") features_filtered, labels_filtered = dk.filter_features( unfiltered_df, @@ -111,8 +114,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): training_filter=True, ) - dd: Dict[str, Any] = dk.make_train_test_datasets( - features_filtered, labels_filtered) + dd: Dict[str, Any] = dk.make_train_test_datasets(features_filtered, labels_filtered) self.df_raw = copy.deepcopy(dd["train_features"]) dk.fit_labels() # FIXME useless for now, but just satiating append methods @@ -121,18 +123,18 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) - (dd["train_features"], - dd["train_labels"], - dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], - dd["train_labels"], - dd["train_weights"]) + (dd["train_features"], dd["train_labels"], dd["train_weights"]) = ( + dk.feature_pipeline.fit_transform( + dd["train_features"], dd["train_labels"], dd["train_weights"] + ) + ) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - (dd["test_features"], - dd["test_labels"], - dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], - dd["test_labels"], - dd["test_weights"]) + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + (dd["test_features"], dd["test_labels"], dd["test_weights"]) = ( + dk.feature_pipeline.transform( + dd["test_features"], dd["test_labels"], dd["test_weights"] + ) + ) logger.info( f'Training model on {len(dk.data_dictionary["train_features"].columns)}' @@ -147,9 +149,13 @@ class BaseReinforcementLearningModel(IFreqaiModel): return model - def set_train_and_eval_environments(self, data_dictionary: Dict[str, DataFrame], - prices_train: DataFrame, prices_test: DataFrame, - dk: FreqaiDataKitchen): + def set_train_and_eval_environments( + self, + data_dictionary: Dict[str, DataFrame], + prices_train: DataFrame, + prices_test: DataFrame, + dk: FreqaiDataKitchen, + ): """ User can override this if they are using a custom MyRLEnv :param data_dictionary: dict = common data dictionary containing train and test @@ -165,11 +171,14 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.train_env = self.MyRLEnv(df=train_df, prices=prices_train, **env_info) self.eval_env = Monitor(self.MyRLEnv(df=test_df, prices=prices_test, **env_info)) - self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=len(train_df), - best_model_save_path=str(dk.data_path), - use_masking=(self.model_type == 'MaskablePPO' and - is_masking_supported(self.eval_env))) + self.eval_callback = MaskableEvalCallback( + self.eval_env, + deterministic=True, + render=False, + eval_freq=len(train_df), + best_model_save_path=str(dk.data_path), + use_masking=(self.model_type == "MaskablePPO" and is_masking_supported(self.eval_env)), + ) actions = self.train_env.get_actions() self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) @@ -178,16 +187,19 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ Create dictionary of environment arguments """ - env_info = {"window_size": self.CONV_WIDTH, - "reward_kwargs": self.reward_params, - "config": self.config, - "live": self.live, - "can_short": self.can_short, - "pair": pair, - "df_raw": self.df_raw} + env_info = { + "window_size": self.CONV_WIDTH, + "reward_kwargs": self.reward_params, + "config": self.config, + "live": self.live, + "can_short": self.can_short, + "pair": pair, + "df_raw": self.df_raw, + } if self.data_provider: - env_info["fee"] = self.data_provider._exchange \ - .get_fee(symbol=self.data_provider.current_whitelist()[0]) # type: ignore + env_info["fee"] = self.data_provider._exchange.get_fee( # type: ignore + symbol=self.data_provider.current_whitelist()[0] + ) return env_info @@ -219,11 +231,12 @@ class BaseReinforcementLearningModel(IFreqaiModel): for trade in open_trades: if trade.pair == pair: if self.data_provider._exchange is None: # type: ignore - logger.error('No exchange available.') + logger.error("No exchange available.") return 0, 0, 0 else: current_rate = self.data_provider._exchange.get_rate( # type: ignore - pair, refresh=False, side="exit", is_short=trade.is_short) + pair, refresh=False, side="exit", is_short=trade.is_short + ) now = datetime.now(timezone.utc).timestamp() trade_duration = int((now - trade.open_date_utc.timestamp()) / self.base_tf_seconds) @@ -255,16 +268,17 @@ class BaseReinforcementLearningModel(IFreqaiModel): dk.data_dictionary["prediction_features"] = self.drop_ohlc_from_df(filtered_dataframe, dk) dk.data_dictionary["prediction_features"], _, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) - pred_df = self.rl_model_predict( - dk.data_dictionary["prediction_features"], dk, self.model) + pred_df = self.rl_model_predict(dk.data_dictionary["prediction_features"], dk, self.model) pred_df.fillna(0, inplace=True) return (pred_df, dk.do_predict) - def rl_model_predict(self, dataframe: DataFrame, - dk: FreqaiDataKitchen, model: Any) -> DataFrame: + def rl_model_predict( + self, dataframe: DataFrame, dk: FreqaiDataKitchen, model: Any + ) -> DataFrame: """ A helper function to make predictions in the Reinforcement learning module. :param dataframe: DataFrame = the dataframe of features to make the predictions on @@ -275,11 +289,11 @@ class BaseReinforcementLearningModel(IFreqaiModel): def _predict(window): observations = dataframe.iloc[window.index] - if self.live and self.rl_config.get('add_state_info', False): + if self.live and self.rl_config.get("add_state_info", False): market_side, current_profit, trade_duration = self.get_state_info(dk.pair) - observations['current_profit_pct'] = current_profit - observations['position'] = market_side - observations['trade_duration'] = trade_duration + observations["current_profit_pct"] = current_profit + observations["position"] = market_side + observations["trade_duration"] = trade_duration res, _ = model.predict(observations, deterministic=True) return res @@ -287,23 +301,31 @@ class BaseReinforcementLearningModel(IFreqaiModel): return output - def build_ohlc_price_dataframes(self, data_dictionary: dict, - pair: str, dk: FreqaiDataKitchen) -> Tuple[DataFrame, - DataFrame]: + def build_ohlc_price_dataframes( + self, data_dictionary: dict, pair: str, dk: FreqaiDataKitchen + ) -> Tuple[DataFrame, DataFrame]: """ Builds the train prices and test prices for the environment. """ - pair = pair.replace(':', '') + pair = pair.replace(":", "") train_df = data_dictionary["train_features"] test_df = data_dictionary["test_features"] # price data for model training and evaluation - tf = self.config['timeframe'] - rename_dict = {'%-raw_open': 'open', '%-raw_low': 'low', - '%-raw_high': ' high', '%-raw_close': 'close'} - rename_dict_old = {f'%-{pair}raw_open_{tf}': 'open', f'%-{pair}raw_low_{tf}': 'low', - f'%-{pair}raw_high_{tf}': ' high', f'%-{pair}raw_close_{tf}': 'close'} + tf = self.config["timeframe"] + rename_dict = { + "%-raw_open": "open", + "%-raw_low": "low", + "%-raw_high": " high", + "%-raw_close": "close", + } + rename_dict_old = { + f"%-{pair}raw_open_{tf}": "open", + f"%-{pair}raw_low_{tf}": "low", + f"%-{pair}raw_high_{tf}": " high", + f"%-{pair}raw_close_{tf}": "close", + } prices_train = train_df.filter(rename_dict.keys(), axis=1) prices_train_old = train_df.filter(rename_dict_old.keys(), axis=1) @@ -311,17 +333,21 @@ class BaseReinforcementLearningModel(IFreqaiModel): if not prices_train_old.empty: prices_train = prices_train_old rename_dict = rename_dict_old - logger.warning('Reinforcement learning module didn\'t find the correct raw prices ' - 'assigned in feature_engineering_standard(). ' - 'Please assign them with:\n' - 'dataframe["%-raw_close"] = dataframe["close"]\n' - 'dataframe["%-raw_open"] = dataframe["open"]\n' - 'dataframe["%-raw_high"] = dataframe["high"]\n' - 'dataframe["%-raw_low"] = dataframe["low"]\n' - 'inside `feature_engineering_standard()') + logger.warning( + "Reinforcement learning module didn't find the correct raw prices " + "assigned in feature_engineering_standard(). " + "Please assign them with:\n" + 'dataframe["%-raw_close"] = dataframe["close"]\n' + 'dataframe["%-raw_open"] = dataframe["open"]\n' + 'dataframe["%-raw_high"] = dataframe["high"]\n' + 'dataframe["%-raw_low"] = dataframe["low"]\n' + "inside `feature_engineering_standard()" + ) elif prices_train.empty: - raise OperationalException("No prices found, please follow log warning " - "instructions to correct the strategy.") + raise OperationalException( + "No prices found, please follow log warning " + "instructions to correct the strategy." + ) prices_train.rename(columns=rename_dict, inplace=True) prices_train.reset_index(drop=True) @@ -339,7 +365,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): """ Given a dataframe, drop the ohlc data """ - drop_list = ['%-raw_open', '%-raw_low', '%-raw_high', '%-raw_close'] + drop_list = ["%-raw_open", "%-raw_low", "%-raw_high", "%-raw_close"] if self.rl_config["drop_ohlc_from_features"]: df.drop(drop_list, axis=1, inplace=True) @@ -358,7 +384,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): if exists: model = self.MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") else: - logger.info('No model file on disk to continue learning from.') + logger.info("No model file on disk to continue learning from.") return model @@ -400,15 +426,18 @@ class BaseReinforcementLearningModel(IFreqaiModel): return -2 pnl = self.get_unrealized_profit() - factor = 100. + factor = 100.0 # you can use feature values from dataframe - rsi_now = self.raw_features[f"%-rsi-period-10_shift-1_{self.pair}_" - f"{self.config['timeframe']}"].iloc[self._current_tick] + rsi_now = self.raw_features[ + f"%-rsi-period-10_shift-1_{self.pair}_{self.config['timeframe']}" + ].iloc[self._current_tick] # reward agent for entering trades - if (action in (Actions.Long_enter.value, Actions.Short_enter.value) - and self._position == Positions.Neutral): + if ( + action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral + ): if rsi_now < 40: factor = 40 / rsi_now else: @@ -419,7 +448,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + max_trade_duration = self.rl_config.get("max_trade_duration_candles", 300) if self._last_trade_tick: trade_duration = self._current_tick - self._last_trade_tick else: @@ -431,28 +460,36 @@ class BaseReinforcementLearningModel(IFreqaiModel): factor *= 0.5 # discourage sitting in position - if (self._position in (Positions.Short, Positions.Long) and - action == Actions.Neutral.value): + if ( + self._position in (Positions.Short, Positions.Long) + and action == Actions.Neutral.value + ): return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(pnl * factor) - return 0. + return 0.0 -def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int, - seed: int, train_df: DataFrame, price: DataFrame, - env_info: Dict[str, Any] = {}) -> Callable: +def make_env( + MyRLEnv: Type[BaseEnvironment], + env_id: str, + rank: int, + seed: int, + train_df: DataFrame, + price: DataFrame, + env_info: Dict[str, Any] = {}, +) -> Callable: """ Utility function for multiprocessed env. @@ -465,10 +502,9 @@ def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int, """ def _init() -> gym.Env: - - env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank, - **env_info) + env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank, **env_info) return env + set_random_seed(seed) return _init diff --git a/freqtrade/freqai/base_models/BaseClassifierModel.py b/freqtrade/freqai/base_models/BaseClassifierModel.py index 42b5c1a0e..dfe5ae3b0 100644 --- a/freqtrade/freqai/base_models/BaseClassifierModel.py +++ b/freqtrade/freqai/base_models/BaseClassifierModel.py @@ -21,9 +21,7 @@ class BaseClassifierModel(IFreqaiModel): such as prediction_models/CatboostClassifier.py for guidance. """ - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: + def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. @@ -47,26 +45,28 @@ class BaseClassifierModel(IFreqaiModel): start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d") end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d") - logger.info(f"-------------------- Training on data from {start_date} to " - f"{end_date} --------------------") + logger.info( + f"-------------------- Training on data from {start_date} to " + f"{end_date} --------------------" + ) # split data into train/test data. dd = dk.make_train_test_datasets(features_filtered, labels_filtered) if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: dk.fit_labels() dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) - (dd["train_features"], - dd["train_labels"], - dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], - dd["train_labels"], - dd["train_weights"]) + (dd["train_features"], dd["train_labels"], dd["train_weights"]) = ( + dk.feature_pipeline.fit_transform( + dd["train_features"], dd["train_labels"], dd["train_weights"] + ) + ) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - (dd["test_features"], - dd["test_labels"], - dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], - dd["test_labels"], - dd["test_weights"]) + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + (dd["test_features"], dd["test_labels"], dd["test_weights"]) = ( + dk.feature_pipeline.transform( + dd["test_features"], dd["test_labels"], dd["test_weights"] + ) + ) logger.info( f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" @@ -77,8 +77,10 @@ class BaseClassifierModel(IFreqaiModel): end_time = time() - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") + logger.info( + f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------" + ) return model @@ -102,7 +104,8 @@ class BaseClassifierModel(IFreqaiModel): dk.data_dictionary["prediction_features"] = filtered_df dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) if self.CONV_WIDTH == 1: diff --git a/freqtrade/freqai/base_models/BasePyTorchClassifier.py b/freqtrade/freqai/base_models/BasePyTorchClassifier.py index 4780af818..86eadb7bd 100644 --- a/freqtrade/freqai/base_models/BasePyTorchClassifier.py +++ b/freqtrade/freqai/base_models/BasePyTorchClassifier.py @@ -59,8 +59,7 @@ class BasePyTorchClassifier(BasePyTorchModel): class_names = self.model.model_meta_data.get("class_names", None) if not class_names: raise ValueError( - "Missing class names. " - "self.model.model_meta_data['class_names'] is None." + "Missing class names. self.model.model_meta_data['class_names'] is None." ) if not self.class_name_to_index: @@ -74,11 +73,11 @@ class BasePyTorchClassifier(BasePyTorchModel): dk.data_dictionary["prediction_features"] = filtered_df dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) x = self.data_convertor.convert_x( - dk.data_dictionary["prediction_features"], - device=self.device + dk.data_dictionary["prediction_features"], device=self.device ) self.model.model.eval() logits = self.model.model(x) @@ -100,10 +99,10 @@ class BasePyTorchClassifier(BasePyTorchModel): return (pred_df, dk.do_predict) def encode_class_names( - self, - data_dictionary: Dict[str, pd.DataFrame], - dk: FreqaiDataKitchen, - class_names: List[str], + self, + data_dictionary: Dict[str, pd.DataFrame], + dk: FreqaiDataKitchen, + class_names: List[str], ): """ encode class name, str -> int @@ -120,15 +119,12 @@ class BasePyTorchClassifier(BasePyTorchModel): ) @staticmethod - def assert_valid_class_names( - target_column: pd.Series, - class_names: List[str] - ): + def assert_valid_class_names(target_column: pd.Series, class_names: List[str]): non_defined_labels = set(target_column) - set(class_names) if len(non_defined_labels) != 0: raise OperationalException( f"Found non defined labels: {non_defined_labels}, ", - f"expecting labels: {class_names}" + f"expecting labels: {class_names}", ) def decode_class_names(self, class_ints: torch.Tensor) -> List[str]: @@ -144,10 +140,10 @@ class BasePyTorchClassifier(BasePyTorchModel): logger.info(f"encoded class name to index: {self.class_name_to_index}") def convert_label_column_to_int( - self, - data_dictionary: Dict[str, pd.DataFrame], - dk: FreqaiDataKitchen, - class_names: List[str] + self, + data_dictionary: Dict[str, pd.DataFrame], + dk: FreqaiDataKitchen, + class_names: List[str], ): self.init_class_names_to_index_mapping(class_names) self.encode_class_names(data_dictionary, dk, class_names) @@ -162,9 +158,7 @@ class BasePyTorchClassifier(BasePyTorchModel): return self.class_names - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: + def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. @@ -191,18 +185,18 @@ class BasePyTorchClassifier(BasePyTorchModel): dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) - (dd["train_features"], - dd["train_labels"], - dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], - dd["train_labels"], - dd["train_weights"]) + (dd["train_features"], dd["train_labels"], dd["train_weights"]) = ( + dk.feature_pipeline.fit_transform( + dd["train_features"], dd["train_labels"], dd["train_weights"] + ) + ) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - (dd["test_features"], - dd["test_labels"], - dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], - dd["test_labels"], - dd["test_weights"]) + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + (dd["test_features"], dd["test_labels"], dd["test_weights"]) = ( + dk.feature_pipeline.transform( + dd["test_features"], dd["test_labels"], dd["test_weights"] + ) + ) logger.info( f"Training model on {len(dk.data_dictionary['train_features'].columns)} features" @@ -212,7 +206,9 @@ class BasePyTorchClassifier(BasePyTorchModel): model = self.fit(dd, dk) end_time = time() - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") + logger.info( + f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------" + ) return model diff --git a/freqtrade/freqai/base_models/BasePyTorchModel.py b/freqtrade/freqai/base_models/BasePyTorchModel.py index 71369a146..50b023021 100644 --- a/freqtrade/freqai/base_models/BasePyTorchModel.py +++ b/freqtrade/freqai/base_models/BasePyTorchModel.py @@ -21,7 +21,7 @@ class BasePyTorchModel(IFreqaiModel, ABC): super().__init__(config=kwargs["config"]) self.dd.model_type = "pytorch" self.device = "cuda" if torch.cuda.is_available() else "cpu" - test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size') + test_size = self.freqai_info.get("data_split_parameters", {}).get("test_size") self.splits = ["train", "test"] if test_size != 0 else ["train"] self.window_size = self.freqai_info.get("conv_width", 1) diff --git a/freqtrade/freqai/base_models/BasePyTorchRegressor.py b/freqtrade/freqai/base_models/BasePyTorchRegressor.py index 83fea4ef9..9b429db23 100644 --- a/freqtrade/freqai/base_models/BasePyTorchRegressor.py +++ b/freqtrade/freqai/base_models/BasePyTorchRegressor.py @@ -41,11 +41,11 @@ class BasePyTorchRegressor(BasePyTorchModel): dk.data_dictionary["prediction_features"] = filtered_df dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) x = self.data_convertor.convert_x( - dk.data_dictionary["prediction_features"], - device=self.device + dk.data_dictionary["prediction_features"], device=self.device ) self.model.model.eval() y = self.model.model(x) @@ -59,9 +59,7 @@ class BasePyTorchRegressor(BasePyTorchModel): dk.do_predict = outliers return (pred_df, dk.do_predict) - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: + def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. @@ -91,19 +89,19 @@ class BasePyTorchRegressor(BasePyTorchModel): dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) - (dd["train_features"], - dd["train_labels"], - dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], - dd["train_labels"], - dd["train_weights"]) + (dd["train_features"], dd["train_labels"], dd["train_weights"]) = ( + dk.feature_pipeline.fit_transform( + dd["train_features"], dd["train_labels"], dd["train_weights"] + ) + ) dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - (dd["test_features"], - dd["test_labels"], - dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], - dd["test_labels"], - dd["test_weights"]) + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + (dd["test_features"], dd["test_labels"], dd["test_weights"]) = ( + dk.feature_pipeline.transform( + dd["test_features"], dd["test_labels"], dd["test_weights"] + ) + ) dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) logger.info( @@ -114,7 +112,9 @@ class BasePyTorchRegressor(BasePyTorchModel): model = self.fit(dd, dk) end_time = time() - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") + logger.info( + f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------" + ) return model diff --git a/freqtrade/freqai/base_models/BaseRegressionModel.py b/freqtrade/freqai/base_models/BaseRegressionModel.py index 179e4be87..bbadac0f0 100644 --- a/freqtrade/freqai/base_models/BaseRegressionModel.py +++ b/freqtrade/freqai/base_models/BaseRegressionModel.py @@ -20,9 +20,7 @@ class BaseRegressionModel(IFreqaiModel): such as prediction_models/CatboostRegressor.py for guidance. """ - def train( - self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs - ) -> Any: + def train(self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs) -> Any: """ Filter the training data and train a model to it. Train makes heavy use of the datakitchen for storing, saving, loading, and analyzing the data. @@ -46,8 +44,10 @@ class BaseRegressionModel(IFreqaiModel): start_date = unfiltered_df["date"].iloc[0].strftime("%Y-%m-%d") end_date = unfiltered_df["date"].iloc[-1].strftime("%Y-%m-%d") - logger.info(f"-------------------- Training on data from {start_date} to " - f"{end_date} --------------------") + logger.info( + f"-------------------- Training on data from {start_date} to " + f"{end_date} --------------------" + ) # split data into train/test data. dd = dk.make_train_test_datasets(features_filtered, labels_filtered) if not self.freqai_info.get("fit_live_predictions_candles", 0) or not self.live: @@ -55,19 +55,19 @@ class BaseRegressionModel(IFreqaiModel): dk.feature_pipeline = self.define_data_pipeline(threads=dk.thread_count) dk.label_pipeline = self.define_label_pipeline(threads=dk.thread_count) - (dd["train_features"], - dd["train_labels"], - dd["train_weights"]) = dk.feature_pipeline.fit_transform(dd["train_features"], - dd["train_labels"], - dd["train_weights"]) + (dd["train_features"], dd["train_labels"], dd["train_weights"]) = ( + dk.feature_pipeline.fit_transform( + dd["train_features"], dd["train_labels"], dd["train_weights"] + ) + ) dd["train_labels"], _, _ = dk.label_pipeline.fit_transform(dd["train_labels"]) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - (dd["test_features"], - dd["test_labels"], - dd["test_weights"]) = dk.feature_pipeline.transform(dd["test_features"], - dd["test_labels"], - dd["test_weights"]) + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + (dd["test_features"], dd["test_labels"], dd["test_weights"]) = ( + dk.feature_pipeline.transform( + dd["test_features"], dd["test_labels"], dd["test_weights"] + ) + ) dd["test_labels"], _, _ = dk.label_pipeline.transform(dd["test_labels"]) logger.info( @@ -79,8 +79,10 @@ class BaseRegressionModel(IFreqaiModel): end_time = time() - logger.info(f"-------------------- Done training {pair} " - f"({end_time - start_time:.2f} secs) --------------------") + logger.info( + f"-------------------- Done training {pair} " + f"({end_time - start_time:.2f} secs) --------------------" + ) return model @@ -102,7 +104,8 @@ class BaseRegressionModel(IFreqaiModel): ) dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) predictions = self.model.predict(dk.data_dictionary["prediction_features"]) if self.CONV_WIDTH == 1: diff --git a/freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py b/freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py index 4646bb9a8..c83a19bb5 100644 --- a/freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py +++ b/freqtrade/freqai/base_models/FreqaiMultiOutputClassifier.py @@ -9,7 +9,6 @@ from freqtrade.exceptions import OperationalException class FreqaiMultiOutputClassifier(MultiOutputClassifier): - def fit(self, X, y, sample_weight=None, fit_params=None): """Fit the model to data, separately for each output variable. Parameters @@ -48,18 +47,14 @@ class FreqaiMultiOutputClassifier(MultiOutputClassifier): "multi-output regression but has only one." ) - if sample_weight is not None and not has_fit_parameter( - self.estimator, "sample_weight" - ): + if sample_weight is not None and not has_fit_parameter(self.estimator, "sample_weight"): raise ValueError("Underlying estimator does not support sample weights.") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( - delayed(_fit_estimator)( - self.estimator, X, y[:, i], sample_weight, **fit_params[i] - ) + delayed(_fit_estimator)(self.estimator, X, y[:, i], sample_weight, **fit_params[i]) for i in range(y.shape[1]) ) @@ -67,8 +62,9 @@ class FreqaiMultiOutputClassifier(MultiOutputClassifier): for estimator in self.estimators_: self.classes_.extend(estimator.classes_) if len(set(self.classes_)) != len(self.classes_): - raise OperationalException(f"Class labels must be unique across targets: " - f"{self.classes_}") + raise OperationalException( + f"Class labels must be unique across targets: {self.classes_}" + ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ diff --git a/freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py b/freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py index a6cc4f39b..32ab0e16d 100644 --- a/freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py +++ b/freqtrade/freqai/base_models/FreqaiMultiOutputRegressor.py @@ -4,7 +4,6 @@ from sklearn.utils.validation import has_fit_parameter class FreqaiMultiOutputRegressor(MultiOutputRegressor): - def fit(self, X, y, sample_weight=None, fit_params=None): """Fit the model to data, separately for each output variable. Parameters @@ -40,18 +39,14 @@ class FreqaiMultiOutputRegressor(MultiOutputRegressor): "multi-output regression but has only one." ) - if sample_weight is not None and not has_fit_parameter( - self.estimator, "sample_weight" - ): + if sample_weight is not None and not has_fit_parameter(self.estimator, "sample_weight"): raise ValueError("Underlying estimator does not support sample weights.") if not fit_params: fit_params = [None] * y.shape[1] self.estimators_ = Parallel(n_jobs=self.n_jobs)( - delayed(_fit_estimator)( - self.estimator, X, y[:, i], sample_weight, **fit_params[i] - ) + delayed(_fit_estimator)(self.estimator, X, y[:, i], sample_weight, **fit_params[i]) for i in range(y.shape[1]) ) diff --git a/freqtrade/freqai/data_drawer.py b/freqtrade/freqai/data_drawer.py index 77462f311..37780a945 100644 --- a/freqtrade/freqai/data_drawer.py +++ b/freqtrade/freqai/data_drawer.py @@ -66,7 +66,6 @@ class FreqaiDataDrawer: """ def __init__(self, full_path: Path, config: Config): - self.config = config self.freqai_info = config.get("freqai", {}) # dictionary holding all pair metadata necessary to load in from disk @@ -81,7 +80,8 @@ class FreqaiDataDrawer: self.full_path = full_path self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl") self.historic_predictions_bkp_path = Path( - self.full_path / "historic_predictions.backup.pkl") + self.full_path / "historic_predictions.backup.pkl" + ) self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json") self.global_metadata_path = Path(self.full_path / "global_metadata.json") self.metric_tracker_path = Path(self.full_path / "metric_tracker.json") @@ -96,9 +96,12 @@ class FreqaiDataDrawer: self.metric_tracker_lock = threading.Lock() self.old_DBSCAN_eps: Dict[str, float] = {} self.empty_pair_dict: pair_info = { - "model_filename": "", "trained_timestamp": 0, - "data_path": "", "extras": {}} - self.model_type = self.freqai_info.get('model_save_type', 'joblib') + "model_filename": "", + "trained_timestamp": 0, + "data_path": "", + "extras": {}, + } + self.model_type = self.freqai_info.get("model_save_type", "joblib") def update_metric_tracker(self, metric: str, value: float, pair: str) -> None: """ @@ -109,11 +112,11 @@ class FreqaiDataDrawer: if pair not in self.metric_tracker: self.metric_tracker[pair] = {} if metric not in self.metric_tracker[pair]: - self.metric_tracker[pair][metric] = {'timestamp': [], 'value': []} + self.metric_tracker[pair][metric] = {"timestamp": [], "value": []} timestamp = int(datetime.now(timezone.utc).timestamp()) - self.metric_tracker[pair][metric]['value'].append(value) - self.metric_tracker[pair][metric]['timestamp'].append(timestamp) + self.metric_tracker[pair][metric]["value"].append(value) + self.metric_tracker[pair][metric]["timestamp"].append(timestamp) def collect_metrics(self, time_spent: float, pair: str): """ @@ -121,10 +124,10 @@ class FreqaiDataDrawer: """ load1, load5, load15 = psutil.getloadavg() cpus = psutil.cpu_count() - self.update_metric_tracker('train_time', time_spent, pair) - self.update_metric_tracker('cpu_load1min', load1 / cpus, pair) - self.update_metric_tracker('cpu_load5min', load5 / cpus, pair) - self.update_metric_tracker('cpu_load15min', load15 / cpus, pair) + self.update_metric_tracker("train_time", time_spent, pair) + self.update_metric_tracker("cpu_load1min", load1 / cpus, pair) + self.update_metric_tracker("cpu_load5min", load5 / cpus, pair) + self.update_metric_tracker("cpu_load15min", load15 / cpus, pair) def load_global_metadata_from_disk(self): """ @@ -155,7 +158,7 @@ class FreqaiDataDrawer: Tries to load an existing metrics dictionary if the user wants to collect metrics. """ - if self.freqai_info.get('write_metrics_to_disk', False): + if self.freqai_info.get("write_metrics_to_disk", False): exists = self.metric_tracker_path.is_file() if exists: with self.metric_tracker_path.open("r") as fp: @@ -181,10 +184,11 @@ class FreqaiDataDrawer: ) except EOFError: logger.warning( - 'Historical prediction file was corrupted. Trying to load backup file.') + "Historical prediction file was corrupted. Trying to load backup file." + ) with self.historic_predictions_bkp_path.open("rb") as fp: self.historic_predictions = cloudpickle.load(fp) - logger.warning('FreqAI successfully loaded the backup historical predictions file.') + logger.warning("FreqAI successfully loaded the backup historical predictions file.") else: logger.info("Could not find existing historic_predictions, starting from scratch") @@ -206,27 +210,33 @@ class FreqaiDataDrawer: Save metric tracker of all pair metrics collected. """ with self.save_lock: - with self.metric_tracker_path.open('w') as fp: - rapidjson.dump(self.metric_tracker, fp, default=self.np_encoder, - number_mode=rapidjson.NM_NATIVE) + with self.metric_tracker_path.open("w") as fp: + rapidjson.dump( + self.metric_tracker, + fp, + default=self.np_encoder, + number_mode=rapidjson.NM_NATIVE, + ) def save_drawer_to_disk(self) -> None: """ Save data drawer full of all pair model metadata in present model folder. """ with self.save_lock: - with self.pair_dictionary_path.open('w') as fp: - rapidjson.dump(self.pair_dict, fp, default=self.np_encoder, - number_mode=rapidjson.NM_NATIVE) + with self.pair_dictionary_path.open("w") as fp: + rapidjson.dump( + self.pair_dict, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE + ) def save_global_metadata_to_disk(self, metadata: Dict[str, Any]): """ Save global metadata json to disk """ with self.save_lock: - with self.global_metadata_path.open('w') as fp: - rapidjson.dump(metadata, fp, default=self.np_encoder, - number_mode=rapidjson.NM_NATIVE) + with self.global_metadata_path.open("w") as fp: + rapidjson.dump( + metadata, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE + ) def np_encoder(self, object): if isinstance(object, np.generic): @@ -264,9 +274,7 @@ class FreqaiDataDrawer: return def set_initial_return_values( - self, pair: str, - pred_df: DataFrame, - dataframe: DataFrame + self, pair: str, pred_df: DataFrame, dataframe: DataFrame ) -> None: """ Set the initial return values to the historical predictions dataframe. This avoids needing @@ -285,9 +293,8 @@ class FreqaiDataDrawer: new_pred["date_pred"] = dataframe["date"] # set everything to nan except date_pred - columns_to_nan = new_pred.columns.difference(['date_pred', 'date']) - new_pred[columns_to_nan] = new_pred[columns_to_nan].astype( - float).values * np.nan + columns_to_nan = new_pred.columns.difference(["date_pred", "date"]) + new_pred[columns_to_nan] = None hist_preds = self.historic_predictions[pair].copy() @@ -297,14 +304,15 @@ class FreqaiDataDrawer: # find the closest common date between new_pred and historic predictions # and cut off the new_pred dataframe at that date - common_dates = pd.merge(new_pred, hist_preds, - on="date_pred", how="inner") + common_dates = pd.merge(new_pred, hist_preds, on="date_pred", how="inner") if len(common_dates.index) > 0: - new_pred = new_pred.iloc[len(common_dates):] + new_pred = new_pred.iloc[len(common_dates) :] else: - logger.warning("No common dates found between new predictions and historic " - "predictions. You likely left your FreqAI instance offline " - f"for more than {len(dataframe.index)} candles.") + logger.warning( + "No common dates found between new predictions and historic " + "predictions. You likely left your FreqAI instance offline " + f"for more than {len(dataframe.index)} candles." + ) # Pandas warns that its keeping dtypes of non NaN columns... # yea we know and we already want that behavior. Ignoring. @@ -312,21 +320,22 @@ class FreqaiDataDrawer: warnings.filterwarnings("ignore", category=FutureWarning) # reindex new_pred columns to match the historic predictions dataframe new_pred_reindexed = new_pred.reindex(columns=hist_preds.columns) - df_concat = pd.concat( - [hist_preds, new_pred_reindexed], - ignore_index=True - ) + df_concat = pd.concat([hist_preds, new_pred_reindexed], ignore_index=True) # any missing values will get zeroed out so users can see the exact # downtime in FreqUI df_concat = df_concat.fillna(0) self.historic_predictions[pair] = df_concat - self.model_return_values[pair] = df_concat.tail( - len(dataframe.index)).reset_index(drop=True) + self.model_return_values[pair] = df_concat.tail(len(dataframe.index)).reset_index(drop=True) - def append_model_predictions(self, pair: str, predictions: DataFrame, - do_preds: NDArray[np.int_], - dk: FreqaiDataKitchen, strat_df: DataFrame) -> None: + def append_model_predictions( + self, + pair: str, + predictions: DataFrame, + do_preds: NDArray[np.int_], + dk: FreqaiDataKitchen, + strat_df: DataFrame, + ) -> None: """ Append model predictions to historic predictions dataframe, then set the strategy return dataframe to the tail of the historic predictions. The length of @@ -339,15 +348,9 @@ class FreqaiDataDrawer: index = self.historic_predictions[pair].index[-1:] columns = self.historic_predictions[pair].columns - zeros_df = pd.DataFrame( - np.zeros((1, len(columns))), - index=index, - columns=columns - ) + zeros_df = pd.DataFrame(np.zeros((1, len(columns))), index=index, columns=columns) self.historic_predictions[pair] = pd.concat( - [self.historic_predictions[pair], zeros_df], - ignore_index=True, - axis=0 + [self.historic_predictions[pair], zeros_df], ignore_index=True, axis=0 ) df = self.historic_predictions[pair] @@ -371,8 +374,8 @@ class FreqaiDataDrawer: df.iloc[-1, DI_values_loc] = dk.DI_values[-1] # extra values the user added within custom prediction model - if dk.data['extra_returns_per_train']: - rets = dk.data['extra_returns_per_train'] + if dk.data["extra_returns_per_train"]: + rets = dk.data["extra_returns_per_train"] for return_str in rets: return_loc = df.columns.get_loc(return_str) df.iloc[-1, return_loc] = rets[return_str] @@ -393,7 +396,8 @@ class FreqaiDataDrawer: self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True) def attach_return_values_to_return_dataframe( - self, pair: str, dataframe: DataFrame) -> DataFrame: + self, pair: str, dataframe: DataFrame + ) -> DataFrame: """ Attach the return values to the strat dataframe :param dataframe: DataFrame = strategy dataframe @@ -424,15 +428,14 @@ class FreqaiDataDrawer: if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0: dataframe["DI_values"] = 0 - if dk.data['extra_returns_per_train']: - rets = dk.data['extra_returns_per_train'] + if dk.data["extra_returns_per_train"]: + rets = dk.data["extra_returns_per_train"] for return_str in rets: dataframe[return_str] = 0 dk.return_dataframe = dataframe def purge_old_models(self) -> None: - num_keep = self.freqai_info["purge_old_models"] if not num_keep: return @@ -509,10 +512,10 @@ class FreqaiDataDrawer: save_path = Path(dk.data_path) # Save the trained model - if self.model_type == 'joblib': + if self.model_type == "joblib": with (save_path / f"{dk.model_filename}_model.joblib").open("wb") as fp: cloudpickle.dump(model, fp) - elif self.model_type == 'keras': + elif self.model_type == "keras": model.save(save_path / f"{dk.model_filename}_model.h5") elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]: model.save(save_path / f"{dk.model_filename}_model.zip") @@ -597,23 +600,25 @@ class FreqaiDataDrawer: # try to access model in memory instead of loading object from disk to save time if dk.live and coin in self.model_dictionary: model = self.model_dictionary[coin] - elif self.model_type == 'joblib': + elif self.model_type == "joblib": with (dk.data_path / f"{dk.model_filename}_model.joblib").open("rb") as fp: model = cloudpickle.load(fp) - elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type: + elif "stable_baselines" in self.model_type or "sb3_contrib" == self.model_type: mod = importlib.import_module( - self.model_type, self.freqai_info['rl_config']['model_type']) - MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type']) + self.model_type, self.freqai_info["rl_config"]["model_type"] + ) + MODELCLASS = getattr(mod, self.freqai_info["rl_config"]["model_type"]) model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model") - elif self.model_type == 'pytorch': + elif self.model_type == "pytorch": import torch + zip = torch.load(dk.data_path / f"{dk.model_filename}_model.zip") model = zip["pytrainer"] model = model.load_from_checkpoint(zip) if not model: raise OperationalException( - f"Unable to load model, ensure model exists at " f"{dk.data_path} " + f"Unable to load model, ensure model exists at {dk.data_path} " ) # load it into ram if it was loaded from disk @@ -640,23 +645,18 @@ class FreqaiDataDrawer: df_dp = strategy.dp.get_pair_dataframe(pair, tf) if len(df_dp.index) == 0: continue - if str(hist_df.iloc[-1]["date"]) == str( - df_dp.iloc[-1:]["date"].iloc[-1] - ): + if str(hist_df.iloc[-1]["date"]) == str(df_dp.iloc[-1:]["date"].iloc[-1]): continue try: - index = ( - df_dp.loc[ - df_dp["date"] == hist_df.iloc[-1]["date"] - ].index[0] - + 1 - ) + index = df_dp.loc[df_dp["date"] == hist_df.iloc[-1]["date"]].index[0] + 1 except IndexError: - if hist_df.iloc[-1]['date'] < df_dp['date'].iloc[0]: - raise OperationalException("In memory historical data is older than " - f"oldest DataProvider candle for {pair} on " - f"timeframe {tf}") + if hist_df.iloc[-1]["date"] < df_dp["date"].iloc[0]: + raise OperationalException( + "In memory historical data is older than " + f"oldest DataProvider candle for {pair} on " + f"timeframe {tf}" + ) else: index = -1 logger.warning( @@ -678,7 +678,7 @@ class FreqaiDataDrawer: axis=0, ) - self.current_candle = history_data[dk.pair][self.config['timeframe']].iloc[-1]['date'] + self.current_candle = history_data[dk.pair][self.config["timeframe"]].iloc[-1]["date"] def load_all_pair_histories(self, timerange: TimeRange, dk: FreqaiDataKitchen) -> None: """ @@ -716,13 +716,12 @@ class FreqaiDataDrawer: corr_dataframes: Dict[Any, Any] = {} base_dataframes: Dict[Any, Any] = {} historic_data = self.historic_data - pairs = self.freqai_info["feature_parameters"].get( - "include_corr_pairlist", [] - ) + pairs = self.freqai_info["feature_parameters"].get("include_corr_pairlist", []) for tf in self.freqai_info["feature_parameters"].get("include_timeframes"): base_dataframes[tf] = dk.slice_dataframe( - timerange, historic_data[pair][tf]).reset_index(drop=True) + timerange, historic_data[pair][tf] + ).reset_index(drop=True) if pairs: for p in pairs: if pair in p: @@ -742,8 +741,8 @@ class FreqaiDataDrawer: """ if not self.historic_predictions_path.is_file(): raise OperationalException( - 'Historic predictions not found. Historic predictions data is required ' - 'to run backtest with the freqai-backtest-live-models option ' + "Historic predictions not found. Historic predictions data is required " + "to run backtest with the freqai-backtest-live-models option " ) self.load_historic_predictions_from_disk() @@ -759,6 +758,6 @@ class FreqaiDataDrawer: # add 1 day to string timerange to ensure BT module will load all dataframe data end_date = end_date + timedelta(days=1) backtesting_timerange = TimeRange( - 'date', 'date', int(start_date.timestamp()), int(end_date.timestamp()) + "date", "date", int(start_date.timestamp()), int(end_date.timestamp()) ) return backtesting_timerange diff --git a/freqtrade/freqai/data_kitchen.py b/freqtrade/freqai/data_kitchen.py index 834399390..d43f569d8 100644 --- a/freqtrade/freqai/data_kitchen.py +++ b/freqtrade/freqai/data_kitchen.py @@ -24,7 +24,7 @@ from freqtrade.strategy import merge_informative_pair from freqtrade.strategy.interface import IStrategy -pd.set_option('future.no_silent_downcasting', True) +pd.set_option("future.no_silent_downcasting", True) SECONDS_IN_DAY = 86400 SECONDS_IN_HOUR = 3600 @@ -98,7 +98,7 @@ class FreqaiDataKitchen: config["freqai"]["backtest_period_days"], ) - self.data['extra_returns_per_train'] = self.freqai_config.get('extra_returns_per_train', {}) + self.data["extra_returns_per_train"] = self.freqai_config.get("extra_returns_per_train", {}) if not self.freqai_config.get("data_kitchen_thread_count", 0): self.thread_count = max(int(psutil.cpu_count() * 2 - 2), 1) else: @@ -120,8 +120,7 @@ class FreqaiDataKitchen: """ self.full_path = self.get_full_models_path(self.config) self.data_path = Path( - self.full_path - / f"sub-train-{pair.split('/')[0]}_{trained_timestamp}" + self.full_path / f"sub-train-{pair.split('/')[0]}_{trained_timestamp}" ) return @@ -138,8 +137,8 @@ class FreqaiDataKitchen: """ feat_dict = self.freqai_config["feature_parameters"] - if 'shuffle' not in self.freqai_config['data_split_parameters']: - self.freqai_config["data_split_parameters"].update({'shuffle': False}) + if "shuffle" not in self.freqai_config["data_split_parameters"]: + self.freqai_config["data_split_parameters"].update({"shuffle": False}) weights: npt.ArrayLike if feat_dict.get("weight_factor", 0) > 0: @@ -147,7 +146,7 @@ class FreqaiDataKitchen: else: weights = np.ones(len(filtered_dataframe)) - if self.freqai_config.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + if self.freqai_config.get("data_split_parameters", {}).get("test_size", 0.1) != 0: ( train_features, test_features, @@ -172,26 +171,43 @@ class FreqaiDataKitchen: if feat_dict["shuffle_after_split"]: rint1 = random.randint(0, 100) rint2 = random.randint(0, 100) - train_features = train_features.sample( - frac=1, random_state=rint1).reset_index(drop=True) + train_features = train_features.sample(frac=1, random_state=rint1).reset_index( + drop=True + ) train_labels = train_labels.sample(frac=1, random_state=rint1).reset_index(drop=True) - train_weights = pd.DataFrame(train_weights).sample( - frac=1, random_state=rint1).reset_index(drop=True).to_numpy()[:, 0] + train_weights = ( + pd.DataFrame(train_weights) + .sample(frac=1, random_state=rint1) + .reset_index(drop=True) + .to_numpy()[:, 0] + ) test_features = test_features.sample(frac=1, random_state=rint2).reset_index(drop=True) test_labels = test_labels.sample(frac=1, random_state=rint2).reset_index(drop=True) - test_weights = pd.DataFrame(test_weights).sample( - frac=1, random_state=rint2).reset_index(drop=True).to_numpy()[:, 0] + test_weights = ( + pd.DataFrame(test_weights) + .sample(frac=1, random_state=rint2) + .reset_index(drop=True) + .to_numpy()[:, 0] + ) # Simplest way to reverse the order of training and test data: - if self.freqai_config['feature_parameters'].get('reverse_train_test_order', False): + if self.freqai_config["feature_parameters"].get("reverse_train_test_order", False): return self.build_data_dictionary( - test_features, train_features, test_labels, - train_labels, test_weights, train_weights - ) + test_features, + train_features, + test_labels, + train_labels, + test_weights, + train_weights, + ) else: return self.build_data_dictionary( - train_features, test_features, train_labels, - test_labels, train_weights, test_weights + train_features, + test_features, + train_labels, + test_labels, + train_weights, + test_weights, ) def filter_features( @@ -224,26 +240,23 @@ class FreqaiDataKitchen: drop_index = pd.isnull(filtered_df).any(axis=1) # get the rows that have NaNs, drop_index = drop_index.replace(True, 1).replace(False, 0).infer_objects(copy=False) - if (training_filter): - + if training_filter: # we don't care about total row number (total no. datapoints) in training, we only care # about removing any row with NaNs # if labels has multiple columns (user wants to train multiple modelEs), we detect here labels = unfiltered_df.filter(label_list, axis=1) drop_index_labels = pd.isnull(labels).any(axis=1) - drop_index_labels = drop_index_labels.replace( - True, 1 - ).replace(False, 0).infer_objects(copy=False) - dates = unfiltered_df['date'] + drop_index_labels = ( + drop_index_labels.replace(True, 1).replace(False, 0).infer_objects(copy=False) + ) + dates = unfiltered_df["date"] filtered_df = filtered_df[ (drop_index == 0) & (drop_index_labels == 0) ] # dropping values labels = labels[ (drop_index == 0) & (drop_index_labels == 0) ] # assuming the labels depend entirely on the dataframe here. - self.train_dates = dates[ - (drop_index == 0) & (drop_index_labels == 0) - ] + self.train_dates = dates[(drop_index == 0) & (drop_index_labels == 0)] logger.info( f"{self.pair}: dropped {len(unfiltered_df) - len(filtered_df)} training points" f" due to NaNs in populated dataset {len(unfiltered_df)}." @@ -266,7 +279,6 @@ class FreqaiDataKitchen: self.data["filter_drop_index_training"] = drop_index else: - # we are backtesting so we need to preserve row number to send back to strategy, # so now we use do_predict to avoid any prediction based on a NaN drop_index = pd.isnull(filtered_df).any(axis=1) @@ -295,7 +307,6 @@ class FreqaiDataKitchen: train_weights: Any, test_weights: Any, ) -> Dict: - self.data_dictionary = { "train_features": train_df, "test_features": test_df, @@ -303,7 +314,7 @@ class FreqaiDataKitchen: "test_labels": test_labels, "train_weights": train_weights, "test_weights": test_weights, - "train_dates": self.train_dates + "train_dates": self.train_dates, } return self.data_dictionary @@ -330,9 +341,7 @@ class FreqaiDataKitchen: full_timerange = TimeRange.parse_timerange(tr) config_timerange = TimeRange.parse_timerange(self.config["timerange"]) if config_timerange.stopts == 0: - config_timerange.stopts = int( - datetime.now(tz=timezone.utc).timestamp() - ) + config_timerange.stopts = int(datetime.now(tz=timezone.utc).timestamp()) timerange_train = copy.deepcopy(full_timerange) timerange_backtest = copy.deepcopy(full_timerange) @@ -412,9 +421,9 @@ class FreqaiDataKitchen: weights = np.exp(-np.arange(num_weights) / (wfactor * num_weights))[::-1] return weights - def get_predictions_to_append(self, predictions: DataFrame, - do_predict: npt.ArrayLike, - dataframe_backtest: DataFrame) -> DataFrame: + def get_predictions_to_append( + self, predictions: DataFrame, do_predict: npt.ArrayLike, dataframe_backtest: DataFrame + ) -> DataFrame: """ Get backtest prediction from current backtest period """ @@ -459,18 +468,18 @@ class FreqaiDataKitchen: Back fill values to before the backtesting range so that the dataframe matches size when it goes back to the strategy. These rows are not included in the backtest. """ - to_keep = [col for col in dataframe.columns if - not col.startswith("&") and not col.startswith("%%")] - self.return_dataframe = pd.merge(dataframe[to_keep], - self.full_df, how='left', on='date') - self.return_dataframe[self.full_df.columns] = ( - self.return_dataframe[self.full_df.columns].fillna(value=0)) + to_keep = [ + col for col in dataframe.columns if not col.startswith("&") and not col.startswith("%%") + ] + self.return_dataframe = pd.merge(dataframe[to_keep], self.full_df, how="left", on="date") + self.return_dataframe[self.full_df.columns] = self.return_dataframe[ + self.full_df.columns + ].fillna(value=0) self.full_df = DataFrame() return def create_fulltimerange(self, backtest_tr: str, backtest_period_days: int) -> str: - if not isinstance(backtest_period_days, int): raise OperationalException("backtest_period_days must be an integer") @@ -484,9 +493,11 @@ class FreqaiDataKitchen: # it does not. accommodating these kinds of edge cases just to allow open-ended # timerange is not high enough priority to warrant the effort. It is safer for now # to simply ask user to add their end date - raise OperationalException("FreqAI backtesting does not allow open ended timeranges. " - "Please indicate the end date of your desired backtesting. " - "timerange.") + raise OperationalException( + "FreqAI backtesting does not allow open ended timeranges. " + "Please indicate the end date of your desired backtesting. " + "timerange." + ) # backtest_timerange.stopts = int( # datetime.now(tz=timezone.utc).timestamp() # ) @@ -525,7 +536,6 @@ class FreqaiDataKitchen: def check_if_new_training_required( self, trained_timestamp: int ) -> Tuple[bool, TimeRange, TimeRange]: - time = datetime.now(tz=timezone.utc).timestamp() trained_timerange = TimeRange() data_load_timerange = TimeRange() @@ -541,7 +551,7 @@ class FreqaiDataKitchen: # We notice that users like to use exotic indicators where # they do not know the required timeperiod. Here we include a factor # of safety by multiplying the user considered "max" by 2. - max_period = self.config.get('startup_candle_count', 20) * 2 + max_period = self.config.get("startup_candle_count", 20) * 2 additional_seconds = max_period * max_tf_seconds if trained_timestamp != 0: @@ -578,17 +588,12 @@ class FreqaiDataKitchen: return retrain, trained_timerange, data_load_timerange def set_new_model_names(self, pair: str, timestamp_id: int): - coin, _ = pair.split("/") - self.data_path = Path( - self.full_path - / f"sub-train-{pair.split('/')[0]}_{timestamp_id}" - ) + self.data_path = Path(self.full_path / f"sub-train-{pair.split('/')[0]}_{timestamp_id}") self.model_filename = f"cb_{coin.lower()}_{timestamp_id}" def set_all_pairs(self) -> None: - self.all_pairs = copy.deepcopy( self.freqai_config["feature_parameters"].get("include_corr_pairlist", []) ) @@ -597,8 +602,7 @@ class FreqaiDataKitchen: self.all_pairs.append(pair) def extract_corr_pair_columns_from_populated_indicators( - self, - dataframe: DataFrame + self, dataframe: DataFrame ) -> Dict[str, DataFrame]: """ Find the columns of the dataframe corresponding to the corr_pairlist, save them @@ -612,19 +616,20 @@ class FreqaiDataKitchen: pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", []) for pair in pairs: - pair = pair.replace(':', '') # lightgbm does not like colons - pair_cols = [col for col in dataframe.columns if col.startswith("%") - and f"{pair}_" in col] + pair = pair.replace(":", "") # lightgbm does not like colons + pair_cols = [ + col for col in dataframe.columns if col.startswith("%") and f"{pair}_" in col + ] if pair_cols: - pair_cols.insert(0, 'date') + pair_cols.insert(0, "date") corr_dataframes[pair] = dataframe.filter(pair_cols, axis=1) return corr_dataframes - def attach_corr_pair_columns(self, dataframe: DataFrame, - corr_dataframes: Dict[str, DataFrame], - current_pair: str) -> DataFrame: + def attach_corr_pair_columns( + self, dataframe: DataFrame, corr_dataframes: Dict[str, DataFrame], current_pair: str + ) -> DataFrame: """ Attach the existing corr_pair dataframes to the current pair dataframe before training @@ -636,21 +641,23 @@ class FreqaiDataKitchen: ready for training """ pairs = self.freqai_config["feature_parameters"].get("include_corr_pairlist", []) - current_pair = current_pair.replace(':', '') + current_pair = current_pair.replace(":", "") for pair in pairs: - pair = pair.replace(':', '') # lightgbm does not work with colons + pair = pair.replace(":", "") # lightgbm does not work with colons if current_pair != pair: - dataframe = dataframe.merge(corr_dataframes[pair], how='left', on='date') + dataframe = dataframe.merge(corr_dataframes[pair], how="left", on="date") return dataframe - def get_pair_data_for_features(self, - pair: str, - tf: str, - strategy: IStrategy, - corr_dataframes: dict = {}, - base_dataframes: dict = {}, - is_corr_pairs: bool = False) -> DataFrame: + def get_pair_data_for_features( + self, + pair: str, + tf: str, + strategy: IStrategy, + corr_dataframes: dict = {}, + base_dataframes: dict = {}, + is_corr_pairs: bool = False, + ) -> DataFrame: """ Get the data for the pair. If it's not in the dictionary, get it from the data provider :param pair: str = pair to get data for @@ -678,8 +685,9 @@ class FreqaiDataKitchen: dataframe = strategy.dp.get_pair_dataframe(pair=pair, timeframe=tf) return dataframe - def merge_features(self, df_main: DataFrame, df_to_merge: DataFrame, - tf: str, timeframe_inf: str, suffix: str) -> DataFrame: + def merge_features( + self, df_main: DataFrame, df_to_merge: DataFrame, tf: str, timeframe_inf: str, suffix: str + ) -> DataFrame: """ Merge the features of the dataframe and remove HLCV and date added columns :param df_main: DataFrame = main dataframe @@ -689,17 +697,30 @@ class FreqaiDataKitchen: :param suffix: str = suffix to add to the columns of the dataframe to merge :return: dataframe = merged dataframe """ - dataframe = merge_informative_pair(df_main, df_to_merge, tf, timeframe_inf=timeframe_inf, - append_timeframe=False, suffix=suffix, ffill=True) + dataframe = merge_informative_pair( + df_main, + df_to_merge, + tf, + timeframe_inf=timeframe_inf, + append_timeframe=False, + suffix=suffix, + ffill=True, + ) skip_columns = [ (f"{s}_{suffix}") for s in ["date", "open", "high", "low", "close", "volume"] ] dataframe = dataframe.drop(columns=skip_columns) return dataframe - def populate_features(self, dataframe: DataFrame, pair: str, strategy: IStrategy, - corr_dataframes: dict, base_dataframes: dict, - is_corr_pairs: bool = False) -> DataFrame: + def populate_features( + self, + dataframe: DataFrame, + pair: str, + strategy: IStrategy, + corr_dataframes: dict, + base_dataframes: dict, + is_corr_pairs: bool = False, + ) -> DataFrame: """ Use the user defined strategy functions for populating features :param dataframe: DataFrame = dataframe to populate @@ -715,19 +736,22 @@ class FreqaiDataKitchen: for tf in tfs: metadata = {"pair": pair, "tf": tf} informative_df = self.get_pair_data_for_features( - pair, tf, strategy, corr_dataframes, base_dataframes, is_corr_pairs) + pair, tf, strategy, corr_dataframes, base_dataframes, is_corr_pairs + ) informative_copy = informative_df.copy() logger.debug(f"Populating features for {pair} {tf}") for t in self.freqai_config["feature_parameters"]["indicator_periods_candles"]: df_features = strategy.feature_engineering_expand_all( - informative_copy.copy(), t, metadata=metadata) + informative_copy.copy(), t, metadata=metadata + ) suffix = f"{t}" informative_df = self.merge_features(informative_df, df_features, tf, tf, suffix) generic_df = strategy.feature_engineering_expand_basic( - informative_copy.copy(), metadata=metadata) + informative_copy.copy(), metadata=metadata + ) suffix = "gen" informative_df = self.merge_features(informative_df, generic_df, tf, tf, suffix) @@ -740,8 +764,9 @@ class FreqaiDataKitchen: df_shift = df_shift.add_suffix("_shift-" + str(n)) informative_df = pd.concat((informative_df, df_shift), axis=1) - dataframe = self.merge_features(dataframe.copy(), informative_df, - self.config["timeframe"], tf, f'{pair}_{tf}') + dataframe = self.merge_features( + dataframe.copy(), informative_df, self.config["timeframe"], tf, f"{pair}_{tf}" + ) return dataframe @@ -771,7 +796,8 @@ class FreqaiDataKitchen: # check if the user is using the deprecated populate_any_indicators function new_version = inspect.getsource(strategy.populate_any_indicators) == ( - inspect.getsource(IStrategy.populate_any_indicators)) + inspect.getsource(IStrategy.populate_any_indicators) + ) if not new_version: raise OperationalException( @@ -782,11 +808,10 @@ class FreqaiDataKitchen: f"{DOCS_LINK}/strategy_migration/#freqai-strategy \n" "And the feature_engineering_* documentation: \n" f"{DOCS_LINK}/freqai-feature-engineering/" - ) + ) tfs: List[str] = self.freqai_config["feature_parameters"].get("include_timeframes") - pairs: List[str] = self.freqai_config["feature_parameters"].get( - "include_corr_pairlist", []) + pairs: List[str] = self.freqai_config["feature_parameters"].get("include_corr_pairlist", []) for tf in tfs: if tf not in base_dataframes: @@ -804,9 +829,11 @@ class FreqaiDataKitchen: dataframe = base_dataframes[self.config["timeframe"]].copy() corr_pairs: List[str] = self.freqai_config["feature_parameters"].get( - "include_corr_pairlist", []) - dataframe = self.populate_features(dataframe.copy(), pair, strategy, - corr_dataframes, base_dataframes) + "include_corr_pairlist", [] + ) + dataframe = self.populate_features( + dataframe.copy(), pair, strategy, corr_dataframes, base_dataframes + ) metadata = {"pair": pair} dataframe = strategy.feature_engineering_standard(dataframe.copy(), metadata=metadata) # ensure corr pairs are always last @@ -814,8 +841,9 @@ class FreqaiDataKitchen: if pair == corr_pair: continue # dont repeat anything from whitelist if corr_pairs and do_corr_pairs: - dataframe = self.populate_features(dataframe.copy(), corr_pair, strategy, - corr_dataframes, base_dataframes, True) + dataframe = self.populate_features( + dataframe.copy(), corr_pair, strategy, corr_dataframes, base_dataframes, True + ) if self.live: dataframe = strategy.set_freqai_targets(dataframe.copy(), metadata=metadata) @@ -823,7 +851,7 @@ class FreqaiDataKitchen: self.get_unique_classes_from_labels(dataframe) - if self.config.get('reduce_df_footprint', False): + if self.config.get("reduce_df_footprint", False): dataframe = reduce_dataframe_footprint(dataframe) return dataframe @@ -858,7 +886,6 @@ class FreqaiDataKitchen: return dataframe[to_keep] def get_unique_classes_from_labels(self, dataframe: DataFrame) -> None: - # self.find_features(dataframe) self.find_labels(dataframe) @@ -870,9 +897,7 @@ class FreqaiDataKitchen: for label in self.unique_classes: self.unique_class_list += list(self.unique_classes[label]) - def save_backtesting_prediction( - self, append_df: DataFrame - ) -> None: + def save_backtesting_prediction(self, append_df: DataFrame) -> None: """ Save prediction dataframe from backtesting to feather file format :param append_df: dataframe for backtesting period @@ -883,19 +908,14 @@ class FreqaiDataKitchen: append_df.to_feather(self.backtesting_results_path) - def get_backtesting_prediction( - self - ) -> DataFrame: + def get_backtesting_prediction(self) -> DataFrame: """ Get prediction dataframe from feather file format """ append_df = pd.read_feather(self.backtesting_results_path) return append_df - def check_if_backtest_prediction_is_valid( - self, - len_backtest_df: int - ) -> bool: + def check_if_backtest_prediction_is_valid(self, len_backtest_df: int) -> bool: """ Check if a backtesting prediction already exists and if the predictions to append have the same size as the backtesting dataframe slice @@ -903,27 +923,29 @@ class FreqaiDataKitchen: :return: :boolean: whether the prediction file is valid. """ - path_to_predictionfile = Path(self.full_path / - self.backtest_predictions_folder / - f"{self.model_filename}_prediction.feather") + path_to_predictionfile = Path( + self.full_path + / self.backtest_predictions_folder + / f"{self.model_filename}_prediction.feather" + ) self.backtesting_results_path = path_to_predictionfile file_exists = path_to_predictionfile.is_file() if file_exists: append_df = self.get_backtesting_prediction() - if len(append_df) == len_backtest_df and 'date' in append_df: + if len(append_df) == len_backtest_df and "date" in append_df: logger.info(f"Found backtesting prediction file at {path_to_predictionfile}") return True else: - logger.info("A new backtesting prediction file is required. " - "(Number of predictions is different from dataframe length or " - "old prediction file version).") + logger.info( + "A new backtesting prediction file is required. " + "(Number of predictions is different from dataframe length or " + "old prediction file version)." + ) return False else: - logger.info( - f"Could not find backtesting prediction file at {path_to_predictionfile}" - ) + logger.info(f"Could not find backtesting prediction file at {path_to_predictionfile}") return False def get_full_models_path(self, config: Config) -> Path: @@ -932,18 +954,16 @@ class FreqaiDataKitchen: :param config: Configuration dictionary """ freqai_config: Dict[str, Any] = config["freqai"] - return Path( - config["user_data_dir"] / "models" / str(freqai_config.get("identifier")) - ) + return Path(config["user_data_dir"] / "models" / str(freqai_config.get("identifier"))) def remove_special_chars_from_feature_names(self, dataframe: pd.DataFrame) -> pd.DataFrame: """ Remove all special characters from feature strings (:) :param dataframe: the dataframe that just finished indicator population. (unfiltered) - :return: dataframe with cleaned featrue names + :return: dataframe with cleaned feature names """ - spec_chars = [':'] + spec_chars = [":"] for c in spec_chars: dataframe.columns = dataframe.columns.str.replace(c, "") @@ -976,12 +996,14 @@ class FreqaiDataKitchen: """ Deprecation warning, migration assistance """ - logger.warning(f"Your custom IFreqaiModel relies on the deprecated" - " data pipeline. Please update your model to use the new data pipeline." - " This can be achieved by following the migration guide at " - f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " - "We added a basic pipeline for you, but this will be removed " - "in a future version.") + logger.warning( + f"Your custom IFreqaiModel relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " + "We added a basic pipeline for you, but this will be removed " + "in a future version." + ) return data_dictionary @@ -989,12 +1011,14 @@ class FreqaiDataKitchen: """ Deprecation warning, migration assistance """ - logger.warning(f"Your custom IFreqaiModel relies on the deprecated" - " data pipeline. Please update your model to use the new data pipeline." - " This can be achieved by following the migration guide at " - f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " - "We added a basic pipeline for you, but this will be removed " - "in a future version.") + logger.warning( + f"Your custom IFreqaiModel relies on the deprecated" + " data pipeline. Please update your model to use the new data pipeline." + " This can be achieved by following the migration guide at " + f"{DOCS_LINK}/strategy_migration/#freqai-new-data-pipeline " + "We added a basic pipeline for you, but this will be removed " + "in a future version." + ) pred_df, _, _ = self.label_pipeline.inverse_transform(df) diff --git a/freqtrade/freqai/prediction_models/CatboostClassifier.py b/freqtrade/freqai/prediction_models/CatboostClassifier.py index b9904e40d..176139770 100644 --- a/freqtrade/freqai/prediction_models/CatboostClassifier.py +++ b/freqtrade/freqai/prediction_models/CatboostClassifier.py @@ -1,5 +1,4 @@ import logging -import sys from pathlib import Path from typing import Any, Dict @@ -46,14 +45,17 @@ class CatboostClassifier(BaseClassifierModel): cbr = CatBoostClassifier( allow_writing_files=True, - loss_function='MultiClass', + loss_function="MultiClass", train_dir=Path(dk.data_path), **self.model_training_parameters, ) init_model = self.get_init_model(dk.pair) - cbr.fit(X=train_data, eval_set=test_data, init_model=init_model, - log_cout=sys.stdout, log_cerr=sys.stderr) + cbr.fit( + X=train_data, + eval_set=test_data, + init_model=init_model, + ) return cbr diff --git a/freqtrade/freqai/prediction_models/CatboostClassifierMultiTarget.py b/freqtrade/freqai/prediction_models/CatboostClassifierMultiTarget.py index 58c47566a..02cb91f5a 100644 --- a/freqtrade/freqai/prediction_models/CatboostClassifierMultiTarget.py +++ b/freqtrade/freqai/prediction_models/CatboostClassifierMultiTarget.py @@ -1,5 +1,4 @@ import logging -import sys from pathlib import Path from typing import Any, Dict @@ -33,7 +32,7 @@ class CatboostClassifierMultiTarget(BaseClassifierModel): cbc = CatBoostClassifier( allow_writing_files=True, - loss_function='MultiClass', + loss_function="MultiClass", train_dir=Path(dk.data_path), **self.model_training_parameters, ) @@ -45,10 +44,10 @@ class CatboostClassifierMultiTarget(BaseClassifierModel): eval_sets = [None] * y.shape[1] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - eval_sets = [None] * data_dictionary['test_labels'].shape[1] + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + eval_sets = [None] * data_dictionary["test_labels"].shape[1] - for i in range(data_dictionary['test_labels'].shape[1]): + for i in range(data_dictionary["test_labels"].shape[1]): eval_sets[i] = Pool( data=data_dictionary["test_features"], label=data_dictionary["test_labels"].iloc[:, i], @@ -64,13 +63,15 @@ class CatboostClassifierMultiTarget(BaseClassifierModel): fit_params = [] for i in range(len(eval_sets)): - fit_params.append({ - 'eval_set': eval_sets[i], 'init_model': init_models[i], - 'log_cout': sys.stdout, 'log_cerr': sys.stderr, - }) + fit_params.append( + { + "eval_set": eval_sets[i], + "init_model": init_models[i], + } + ) model = FreqaiMultiOutputClassifier(estimator=cbc) - thread_training = self.freqai_info.get('multitarget_parallel_training', False) + thread_training = self.freqai_info.get("multitarget_parallel_training", False) if thread_training: model.n_jobs = y.shape[1] model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params) diff --git a/freqtrade/freqai/prediction_models/CatboostRegressor.py b/freqtrade/freqai/prediction_models/CatboostRegressor.py index 28b1b11cc..5401a808b 100644 --- a/freqtrade/freqai/prediction_models/CatboostRegressor.py +++ b/freqtrade/freqai/prediction_models/CatboostRegressor.py @@ -1,5 +1,4 @@ import logging -import sys from pathlib import Path from typing import Any, Dict @@ -35,7 +34,7 @@ class CatboostRegressor(BaseRegressionModel): label=data_dictionary["train_labels"], weight=data_dictionary["train_weights"], ) - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: test_data = None else: test_data = Pool( @@ -52,7 +51,10 @@ class CatboostRegressor(BaseRegressionModel): **self.model_training_parameters, ) - model.fit(X=train_data, eval_set=test_data, init_model=init_model, - log_cout=sys.stdout, log_cerr=sys.stderr) + model.fit( + X=train_data, + eval_set=test_data, + init_model=init_model, + ) return model diff --git a/freqtrade/freqai/prediction_models/CatboostRegressorMultiTarget.py b/freqtrade/freqai/prediction_models/CatboostRegressorMultiTarget.py index 1562c2024..c2a5344e3 100644 --- a/freqtrade/freqai/prediction_models/CatboostRegressorMultiTarget.py +++ b/freqtrade/freqai/prediction_models/CatboostRegressorMultiTarget.py @@ -1,5 +1,4 @@ import logging -import sys from pathlib import Path from typing import Any, Dict @@ -44,10 +43,10 @@ class CatboostRegressorMultiTarget(BaseRegressionModel): eval_sets = [None] * y.shape[1] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: - eval_sets = [None] * data_dictionary['test_labels'].shape[1] + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: + eval_sets = [None] * data_dictionary["test_labels"].shape[1] - for i in range(data_dictionary['test_labels'].shape[1]): + for i in range(data_dictionary["test_labels"].shape[1]): eval_sets[i] = Pool( data=data_dictionary["test_features"], label=data_dictionary["test_labels"].iloc[:, i], @@ -63,13 +62,15 @@ class CatboostRegressorMultiTarget(BaseRegressionModel): fit_params = [] for i in range(len(eval_sets)): - fit_params.append({ - 'eval_set': eval_sets[i], 'init_model': init_models[i], - 'log_cout': sys.stdout, 'log_cerr': sys.stderr, - }) + fit_params.append( + { + "eval_set": eval_sets[i], + "init_model": init_models[i], + } + ) model = FreqaiMultiOutputRegressor(estimator=cbr) - thread_training = self.freqai_info.get('multitarget_parallel_training', False) + thread_training = self.freqai_info.get("multitarget_parallel_training", False) if thread_training: model.n_jobs = y.shape[1] model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params) diff --git a/freqtrade/freqai/prediction_models/LightGBMClassifier.py b/freqtrade/freqai/prediction_models/LightGBMClassifier.py index 4c481adff..1e86a39e2 100644 --- a/freqtrade/freqai/prediction_models/LightGBMClassifier.py +++ b/freqtrade/freqai/prediction_models/LightGBMClassifier.py @@ -28,12 +28,16 @@ class LightGBMClassifier(BaseClassifierModel): :param dk: The datakitchen object for the current coin/model """ - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: eval_set = None test_weights = None else: - eval_set = [(data_dictionary["test_features"].to_numpy(), - data_dictionary["test_labels"].to_numpy()[:, 0])] + eval_set = [ + ( + data_dictionary["test_features"].to_numpy(), + data_dictionary["test_labels"].to_numpy()[:, 0], + ) + ] test_weights = data_dictionary["test_weights"] X = data_dictionary["train_features"].to_numpy() y = data_dictionary["train_labels"].to_numpy()[:, 0] @@ -42,7 +46,13 @@ class LightGBMClassifier(BaseClassifierModel): init_model = self.get_init_model(dk.pair) model = LGBMClassifier(**self.model_training_parameters) - model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, - eval_sample_weight=[test_weights], init_model=init_model) + model.fit( + X=X, + y=y, + eval_set=eval_set, + sample_weight=train_weights, + eval_sample_weight=[test_weights], + init_model=init_model, + ) return model diff --git a/freqtrade/freqai/prediction_models/LightGBMClassifierMultiTarget.py b/freqtrade/freqai/prediction_models/LightGBMClassifierMultiTarget.py index 72a8ee259..696deb9c9 100644 --- a/freqtrade/freqai/prediction_models/LightGBMClassifierMultiTarget.py +++ b/freqtrade/freqai/prediction_models/LightGBMClassifierMultiTarget.py @@ -38,13 +38,13 @@ class LightGBMClassifierMultiTarget(BaseClassifierModel): eval_weights = None eval_sets = [None] * y.shape[1] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: eval_weights = [data_dictionary["test_weights"]] - eval_sets = [(None, None)] * data_dictionary['test_labels'].shape[1] # type: ignore - for i in range(data_dictionary['test_labels'].shape[1]): + eval_sets = [(None, None)] * data_dictionary["test_labels"].shape[1] # type: ignore + for i in range(data_dictionary["test_labels"].shape[1]): eval_sets[i] = ( # type: ignore data_dictionary["test_features"], - data_dictionary["test_labels"].iloc[:, i] + data_dictionary["test_labels"].iloc[:, i], ) init_model = self.get_init_model(dk.pair) @@ -56,11 +56,15 @@ class LightGBMClassifierMultiTarget(BaseClassifierModel): fit_params = [] for i in range(len(eval_sets)): fit_params.append( - {'eval_set': eval_sets[i], 'eval_sample_weight': eval_weights, - 'init_model': init_models[i]}) + { + "eval_set": eval_sets[i], + "eval_sample_weight": eval_weights, + "init_model": init_models[i], + } + ) model = FreqaiMultiOutputClassifier(estimator=lgb) - thread_training = self.freqai_info.get('multitarget_parallel_training', False) + thread_training = self.freqai_info.get("multitarget_parallel_training", False) if thread_training: model.n_jobs = y.shape[1] model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params) diff --git a/freqtrade/freqai/prediction_models/LightGBMRegressor.py b/freqtrade/freqai/prediction_models/LightGBMRegressor.py index 15849f446..66bd204e7 100644 --- a/freqtrade/freqai/prediction_models/LightGBMRegressor.py +++ b/freqtrade/freqai/prediction_models/LightGBMRegressor.py @@ -28,7 +28,7 @@ class LightGBMRegressor(BaseRegressionModel): :param dk: The datakitchen object for the current coin/model """ - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: eval_set = None eval_weights = None else: @@ -42,7 +42,13 @@ class LightGBMRegressor(BaseRegressionModel): model = LGBMRegressor(**self.model_training_parameters) - model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, - eval_sample_weight=[eval_weights], init_model=init_model) + model.fit( + X=X, + y=y, + eval_set=eval_set, + sample_weight=train_weights, + eval_sample_weight=[eval_weights], + init_model=init_model, + ) return model diff --git a/freqtrade/freqai/prediction_models/LightGBMRegressorMultiTarget.py b/freqtrade/freqai/prediction_models/LightGBMRegressorMultiTarget.py index 5827dcefe..88752ea0b 100644 --- a/freqtrade/freqai/prediction_models/LightGBMRegressorMultiTarget.py +++ b/freqtrade/freqai/prediction_models/LightGBMRegressorMultiTarget.py @@ -38,14 +38,16 @@ class LightGBMRegressorMultiTarget(BaseRegressionModel): eval_weights = None eval_sets = [None] * y.shape[1] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: eval_weights = [data_dictionary["test_weights"]] - eval_sets = [(None, None)] * data_dictionary['test_labels'].shape[1] # type: ignore - for i in range(data_dictionary['test_labels'].shape[1]): - eval_sets[i] = [( # type: ignore - data_dictionary["test_features"], - data_dictionary["test_labels"].iloc[:, i] - )] + eval_sets = [(None, None)] * data_dictionary["test_labels"].shape[1] # type: ignore + for i in range(data_dictionary["test_labels"].shape[1]): + eval_sets[i] = [ # type: ignore + ( + data_dictionary["test_features"], + data_dictionary["test_labels"].iloc[:, i], + ) + ] init_model = self.get_init_model(dk.pair) if init_model: @@ -56,11 +58,15 @@ class LightGBMRegressorMultiTarget(BaseRegressionModel): fit_params = [] for i in range(len(eval_sets)): fit_params.append( - {'eval_set': eval_sets[i], 'eval_sample_weight': eval_weights, - 'init_model': init_models[i]}) + { + "eval_set": eval_sets[i], + "eval_sample_weight": eval_weights, + "init_model": init_models[i], + } + ) model = FreqaiMultiOutputRegressor(estimator=lgb) - thread_training = self.freqai_info.get('multitarget_parallel_training', False) + thread_training = self.freqai_info.get("multitarget_parallel_training", False) if thread_training: model.n_jobs = y.shape[1] model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params) diff --git a/freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py b/freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py index a03a0c742..246f6bb8c 100644 --- a/freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py +++ b/freqtrade/freqai/prediction_models/PyTorchMLPClassifier.py @@ -4,8 +4,10 @@ import torch from freqtrade.freqai.base_models.BasePyTorchClassifier import BasePyTorchClassifier from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor, - PyTorchDataConvertor) +from freqtrade.freqai.torch.PyTorchDataConvertor import ( + DefaultPyTorchDataConvertor, + PyTorchDataConvertor, +) from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer @@ -43,16 +45,15 @@ class PyTorchMLPClassifier(BasePyTorchClassifier): @property def data_convertor(self) -> PyTorchDataConvertor: return DefaultPyTorchDataConvertor( - target_tensor_type=torch.long, - squeeze_target_tensor=True + target_tensor_type=torch.long, squeeze_target_tensor=True ) def __init__(self, **kwargs) -> None: super().__init__(**kwargs) config = self.freqai_info.get("model_training_parameters", {}) - self.learning_rate: float = config.get("learning_rate", 3e-4) - self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) - self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) + self.learning_rate: float = config.get("learning_rate", 3e-4) + self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) + self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: """ @@ -67,9 +68,7 @@ class PyTorchMLPClassifier(BasePyTorchClassifier): self.convert_label_column_to_int(data_dictionary, dk, class_names) n_features = data_dictionary["train_features"].shape[-1] model = PyTorchMLPModel( - input_dim=n_features, - output_dim=len(class_names), - **self.model_kwargs + input_dim=n_features, output_dim=len(class_names), **self.model_kwargs ) model.to(self.device) optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate) diff --git a/freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py b/freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py index ec5c0ba81..67ba4825a 100644 --- a/freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py +++ b/freqtrade/freqai/prediction_models/PyTorchMLPRegressor.py @@ -4,8 +4,10 @@ import torch from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor, - PyTorchDataConvertor) +from freqtrade.freqai.torch.PyTorchDataConvertor import ( + DefaultPyTorchDataConvertor, + PyTorchDataConvertor, +) from freqtrade.freqai.torch.PyTorchMLPModel import PyTorchMLPModel from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchModelTrainer @@ -48,9 +50,9 @@ class PyTorchMLPRegressor(BasePyTorchRegressor): def __init__(self, **kwargs) -> None: super().__init__(**kwargs) config = self.freqai_info.get("model_training_parameters", {}) - self.learning_rate: float = config.get("learning_rate", 3e-4) - self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) - self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) + self.learning_rate: float = config.get("learning_rate", 3e-4) + self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) + self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: """ @@ -61,11 +63,7 @@ class PyTorchMLPRegressor(BasePyTorchRegressor): """ n_features = data_dictionary["train_features"].shape[-1] - model = PyTorchMLPModel( - input_dim=n_features, - output_dim=1, - **self.model_kwargs - ) + model = PyTorchMLPModel(input_dim=n_features, output_dim=1, **self.model_kwargs) model.to(self.device) optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate) criterion = torch.nn.MSELoss() diff --git a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py index 8f245ed83..27b7de832 100644 --- a/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py +++ b/freqtrade/freqai/prediction_models/PyTorchTransformerRegressor.py @@ -7,8 +7,10 @@ import torch from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor from freqtrade.freqai.data_kitchen import FreqaiDataKitchen -from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor, - PyTorchDataConvertor) +from freqtrade.freqai.torch.PyTorchDataConvertor import ( + DefaultPyTorchDataConvertor, + PyTorchDataConvertor, +) from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchTransformerTrainer from freqtrade.freqai.torch.PyTorchTransformerModel import PyTorchTransformerModel @@ -57,9 +59,9 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): def __init__(self, **kwargs) -> None: super().__init__(**kwargs) config = self.freqai_info.get("model_training_parameters", {}) - self.learning_rate: float = config.get("learning_rate", 3e-4) - self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) - self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) + self.learning_rate: float = config.get("learning_rate", 3e-4) + self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {}) + self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {}) def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any: """ @@ -75,7 +77,7 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): input_dim=n_features, output_dim=n_labels, time_window=self.window_size, - **self.model_kwargs + **self.model_kwargs, ) model.to(self.device) optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate) @@ -114,11 +116,11 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): ) dk.data_dictionary["prediction_features"], outliers, _ = dk.feature_pipeline.transform( - dk.data_dictionary["prediction_features"], outlier_check=True) + dk.data_dictionary["prediction_features"], outlier_check=True + ) x = self.data_convertor.convert_x( - dk.data_dictionary["prediction_features"], - device=self.device + dk.data_dictionary["prediction_features"], device=self.device ) # if user is asking for multiple predictions, slide the window # along the tensor @@ -129,7 +131,7 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): if x.shape[1] > self.window_size: ws = self.window_size for i in range(0, x.shape[1] - ws): - xb = x[:, i:i + ws, :].to(self.device) + xb = x[:, i : i + ws, :].to(self.device) y = self.model.model(xb) yb = torch.cat((yb, y), dim=1) else: @@ -146,7 +148,8 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor): dk.do_predict = outliers if x.shape[1] > 1: - zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))), - columns=pred_df.columns) + zeros_df = pd.DataFrame( + np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))), columns=pred_df.columns + ) pred_df = pd.concat([zeros_df, pred_df], axis=0, ignore_index=True) return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index fbf12008a..7c2ad35ca 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -56,27 +56,30 @@ class ReinforcementLearner(BaseReinforcementLearningModel): train_df = data_dictionary["train_features"] total_timesteps = self.freqai_info["rl_config"]["train_cycles"] * len(train_df) - policy_kwargs = dict(activation_fn=th.nn.ReLU, - net_arch=self.net_arch) + policy_kwargs = dict(activation_fn=th.nn.ReLU, net_arch=self.net_arch) if self.activate_tensorboard: - tb_path = Path(dk.full_path / "tensorboard" / dk.pair.split('/')[0]) + tb_path = Path(dk.full_path / "tensorboard" / dk.pair.split("/")[0]) else: tb_path = None if dk.pair not in self.dd.model_dictionary or not self.continual_learning: - model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, - tensorboard_log=tb_path, - **self.freqai_info.get('model_training_parameters', {}) - ) + model = self.MODELCLASS( + self.policy_type, + self.train_env, + policy_kwargs=policy_kwargs, + tensorboard_log=tb_path, + **self.freqai_info.get("model_training_parameters", {}), + ) else: - logger.info('Continual training activated - starting training from previously ' - 'trained agent.') + logger.info( + "Continual training activated - starting training from previously trained agent." + ) model = self.dd.model_dictionary[dk.pair] model.set_env(self.train_env) callbacks: List[Any] = [self.eval_callback, self.tensorboard_callback] progressbar_callback: Optional[ProgressBarCallback] = None - if self.rl_config.get('progress_bar', False): + if self.rl_config.get("progress_bar", False): progressbar_callback = ProgressBarCallback() callbacks.insert(0, progressbar_callback) @@ -90,7 +93,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): progressbar_callback.on_training_end() if Path(dk.data_path / "best_model.zip").is_file(): - logger.info('Callback found a best model.') + logger.info("Callback found a best model.") best_model = self.MODELCLASS.load(dk.data_path / "best_model") return best_model @@ -127,20 +130,18 @@ class ReinforcementLearner(BaseReinforcementLearningModel): return -2 pnl = self.get_unrealized_profit() - factor = 100. + factor = 100.0 # reward agent for entering trades - if (action == Actions.Long_enter.value - and self._position == Positions.Neutral): + if action == Actions.Long_enter.value and self._position == Positions.Neutral: return 25 - if (action == Actions.Short_enter.value - and self._position == Positions.Neutral): + if action == Actions.Short_enter.value and self._position == Positions.Neutral: return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + max_trade_duration = self.rl_config.get("max_trade_duration_candles", 300) trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: @@ -149,20 +150,22 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor *= 0.5 # discourage sitting in position - if (self._position in (Positions.Short, Positions.Long) and - action == Actions.Neutral.value): + if ( + self._position in (Positions.Short, Positions.Long) + and action == Actions.Neutral.value + ): return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(pnl * factor) - return 0. + return 0.0 diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 3fab83cff..9fab42b18 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -20,9 +20,13 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): Demonstration of how to build vectorized environments """ - def set_train_and_eval_environments(self, data_dictionary: Dict[str, Any], - prices_train: DataFrame, prices_test: DataFrame, - dk: FreqaiDataKitchen): + def set_train_and_eval_environments( + self, + data_dictionary: Dict[str, Any], + prices_train: DataFrame, + prices_test: DataFrame, + dk: FreqaiDataKitchen, + ): """ User can override this if they are using a custom MyRLEnv :param data_dictionary: dict = common data dictionary containing train and test @@ -45,22 +49,35 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): eval_freq = len(train_df) // self.max_threads env_id = "train_env" - self.train_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, - train_df, prices_train, - env_info=env_info) for i - in range(self.max_threads)])) + self.train_env = VecMonitor( + SubprocVecEnv( + [ + make_env(self.MyRLEnv, env_id, i, 1, train_df, prices_train, env_info=env_info) + for i in range(self.max_threads) + ] + ) + ) - eval_env_id = 'eval_env' - self.eval_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, - test_df, prices_test, - env_info=env_info) for i - in range(self.max_threads)])) + eval_env_id = "eval_env" + self.eval_env = VecMonitor( + SubprocVecEnv( + [ + make_env( + self.MyRLEnv, eval_env_id, i, 1, test_df, prices_test, env_info=env_info + ) + for i in range(self.max_threads) + ] + ) + ) - self.eval_callback = MaskableEvalCallback(self.eval_env, deterministic=True, - render=False, eval_freq=eval_freq, - best_model_save_path=str(dk.data_path), - use_masking=(self.model_type == 'MaskablePPO' and - is_masking_supported(self.eval_env))) + self.eval_callback = MaskableEvalCallback( + self.eval_env, + deterministic=True, + render=False, + eval_freq=eval_freq, + best_model_save_path=str(dk.data_path), + use_masking=(self.model_type == "MaskablePPO" and is_masking_supported(self.eval_env)), + ) # TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS, # IT WILL RETURN FALSE INFORMATION, NEVERTHELESS NOT THREAD SAFE WITH SB3!!! diff --git a/freqtrade/freqai/prediction_models/SKLearnRandomForestClassifier.py b/freqtrade/freqai/prediction_models/SKLearnRandomForestClassifier.py index 4462efc49..aa2830b8c 100644 --- a/freqtrade/freqai/prediction_models/SKLearnRandomForestClassifier.py +++ b/freqtrade/freqai/prediction_models/SKLearnRandomForestClassifier.py @@ -35,7 +35,7 @@ class SKLearnRandomForestClassifier(BaseClassifierModel): X = data_dictionary["train_features"].to_numpy() y = data_dictionary["train_labels"].to_numpy()[:, 0] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: eval_set = None else: test_features = data_dictionary["test_features"].to_numpy() @@ -44,8 +44,10 @@ class SKLearnRandomForestClassifier(BaseClassifierModel): eval_set = (test_features, test_labels) if self.freqai_info.get("continual_learning", False): - logger.warning("Continual learning is not supported for " - "SKLearnRandomForestClassifier, ignoring.") + logger.warning( + "Continual learning is not supported for " + "SKLearnRandomForestClassifier, ignoring." + ) train_weights = data_dictionary["train_weights"] @@ -73,10 +75,11 @@ class SKLearnRandomForestClassifier(BaseClassifierModel): le = LabelEncoder() label = dk.label_list[0] - labels_before = list(dk.data['labels_std'].keys()) + labels_before = list(dk.data["labels_std"].keys()) labels_after = le.fit_transform(labels_before).tolist() pred_df[label] = le.inverse_transform(pred_df[label]) pred_df = pred_df.rename( - columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))}) + columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))} + ) return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/prediction_models/XGBoostClassifier.py b/freqtrade/freqai/prediction_models/XGBoostClassifier.py index b6f04b497..41e034227 100644 --- a/freqtrade/freqai/prediction_models/XGBoostClassifier.py +++ b/freqtrade/freqai/prediction_models/XGBoostClassifier.py @@ -41,7 +41,7 @@ class XGBoostClassifier(BaseClassifierModel): if not is_integer_dtype(y): y = pd.Series(le.fit_transform(y), dtype="int64") - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: eval_set = None else: test_features = data_dictionary["test_features"].to_numpy() @@ -58,8 +58,7 @@ class XGBoostClassifier(BaseClassifierModel): model = XGBClassifier(**self.model_training_parameters) - model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, - xgb_model=init_model) + model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, xgb_model=init_model) return model @@ -79,10 +78,11 @@ class XGBoostClassifier(BaseClassifierModel): le = LabelEncoder() label = dk.label_list[0] - labels_before = list(dk.data['labels_std'].keys()) + labels_before = list(dk.data["labels_std"].keys()) labels_after = le.fit_transform(labels_before).tolist() pred_df[label] = le.inverse_transform(pred_df[label]) pred_df = pred_df.rename( - columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))}) + columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))} + ) return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/prediction_models/XGBoostRFClassifier.py b/freqtrade/freqai/prediction_models/XGBoostRFClassifier.py index 20156e9fd..f9875e8c2 100644 --- a/freqtrade/freqai/prediction_models/XGBoostRFClassifier.py +++ b/freqtrade/freqai/prediction_models/XGBoostRFClassifier.py @@ -41,7 +41,7 @@ class XGBoostRFClassifier(BaseClassifierModel): if not is_integer_dtype(y): y = pd.Series(le.fit_transform(y), dtype="int64") - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) == 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) == 0: eval_set = None else: test_features = data_dictionary["test_features"].to_numpy() @@ -58,8 +58,7 @@ class XGBoostRFClassifier(BaseClassifierModel): model = XGBRFClassifier(**self.model_training_parameters) - model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, - xgb_model=init_model) + model.fit(X=X, y=y, eval_set=eval_set, sample_weight=train_weights, xgb_model=init_model) return model @@ -79,10 +78,11 @@ class XGBoostRFClassifier(BaseClassifierModel): le = LabelEncoder() label = dk.label_list[0] - labels_before = list(dk.data['labels_std'].keys()) + labels_before = list(dk.data["labels_std"].keys()) labels_after = le.fit_transform(labels_before).tolist() pred_df[label] = le.inverse_transform(pred_df[label]) pred_df = pred_df.rename( - columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))}) + columns={labels_after[i]: labels_before[i] for i in range(len(labels_before))} + ) return (pred_df, dk.do_predict) diff --git a/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py b/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py index 1949ad536..66a0ab846 100644 --- a/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py +++ b/freqtrade/freqai/prediction_models/XGBoostRFRegressor.py @@ -37,7 +37,7 @@ class XGBoostRFRegressor(BaseRegressionModel): eval_weights = None else: eval_set = [(data_dictionary["test_features"], data_dictionary["test_labels"])] - eval_weights = [data_dictionary['test_weights']] + eval_weights = [data_dictionary["test_weights"]] sample_weight = data_dictionary["train_weights"] @@ -46,8 +46,14 @@ class XGBoostRFRegressor(BaseRegressionModel): model = XGBRFRegressor(**self.model_training_parameters) model.set_params(callbacks=[TBCallback(dk.data_path)]) - model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set, - sample_weight_eval_set=eval_weights, xgb_model=xgb_model) + model.fit( + X=X, + y=y, + sample_weight=sample_weight, + eval_set=eval_set, + sample_weight_eval_set=eval_weights, + xgb_model=xgb_model, + ) # set the callbacks to empty so that we can serialize to disk later model.set_params(callbacks=[]) diff --git a/freqtrade/freqai/prediction_models/XGBoostRegressor.py b/freqtrade/freqai/prediction_models/XGBoostRegressor.py index f3de6653b..0755eea11 100644 --- a/freqtrade/freqai/prediction_models/XGBoostRegressor.py +++ b/freqtrade/freqai/prediction_models/XGBoostRegressor.py @@ -36,15 +36,8 @@ class XGBoostRegressor(BaseRegressionModel): eval_set = None eval_weights = None else: - eval_set = [ - (data_dictionary["test_features"], - data_dictionary["test_labels"]), - (X, y) - ] - eval_weights = [ - data_dictionary['test_weights'], - data_dictionary['train_weights'] - ] + eval_set = [(data_dictionary["test_features"], data_dictionary["test_labels"]), (X, y)] + eval_weights = [data_dictionary["test_weights"], data_dictionary["train_weights"]] sample_weight = data_dictionary["train_weights"] @@ -53,8 +46,14 @@ class XGBoostRegressor(BaseRegressionModel): model = XGBRegressor(**self.model_training_parameters) model.set_params(callbacks=[TBCallback(dk.data_path)]) - model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set, - sample_weight_eval_set=eval_weights, xgb_model=xgb_model) + model.fit( + X=X, + y=y, + sample_weight=sample_weight, + eval_set=eval_set, + sample_weight_eval_set=eval_weights, + xgb_model=xgb_model, + ) # set the callbacks to empty so that we can serialize to disk later model.set_params(callbacks=[]) diff --git a/freqtrade/freqai/prediction_models/XGBoostRegressorMultiTarget.py b/freqtrade/freqai/prediction_models/XGBoostRegressorMultiTarget.py index a0330485e..7bc01e89a 100644 --- a/freqtrade/freqai/prediction_models/XGBoostRegressorMultiTarget.py +++ b/freqtrade/freqai/prediction_models/XGBoostRegressorMultiTarget.py @@ -38,13 +38,15 @@ class XGBoostRegressorMultiTarget(BaseRegressionModel): eval_weights = None eval_sets = [None] * y.shape[1] - if self.freqai_info.get('data_split_parameters', {}).get('test_size', 0.1) != 0: + if self.freqai_info.get("data_split_parameters", {}).get("test_size", 0.1) != 0: eval_weights = [data_dictionary["test_weights"]] - for i in range(data_dictionary['test_labels'].shape[1]): - eval_sets[i] = [( # type: ignore - data_dictionary["test_features"], - data_dictionary["test_labels"].iloc[:, i] - )] + for i in range(data_dictionary["test_labels"].shape[1]): + eval_sets[i] = [ # type: ignore + ( + data_dictionary["test_features"], + data_dictionary["test_labels"].iloc[:, i], + ) + ] init_model = self.get_init_model(dk.pair) if init_model: @@ -55,11 +57,15 @@ class XGBoostRegressorMultiTarget(BaseRegressionModel): fit_params = [] for i in range(len(eval_sets)): fit_params.append( - {'eval_set': eval_sets[i], 'sample_weight_eval_set': eval_weights, - 'xgb_model': init_models[i]}) + { + "eval_set": eval_sets[i], + "sample_weight_eval_set": eval_weights, + "xgb_model": init_models[i], + } + ) model = FreqaiMultiOutputRegressor(estimator=xgb) - thread_training = self.freqai_info.get('multitarget_parallel_training', False) + thread_training = self.freqai_info.get("multitarget_parallel_training", False) if thread_training: model.n_jobs = y.shape[1] model.fit(X=X, y=y, sample_weight=sample_weight, fit_params=fit_params) diff --git a/freqtrade/freqai/tensorboard/TensorboardCallback.py b/freqtrade/freqai/tensorboard/TensorboardCallback.py index b8a351498..078d25bc4 100644 --- a/freqtrade/freqai/tensorboard/TensorboardCallback.py +++ b/freqtrade/freqai/tensorboard/TensorboardCallback.py @@ -12,6 +12,7 @@ class TensorboardCallback(BaseCallback): Custom callback for plotting additional values in tensorboard and episodic summary reports. """ + def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): super().__init__(verbose) self.model: Any = None @@ -40,10 +41,9 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: - local_info = self.locals["infos"][0] - if hasattr(self.training_env, 'envs'): + if hasattr(self.training_env, "envs"): tensorboard_metrics = self.training_env.envs[0].unwrapped.tensorboard_metrics else: diff --git a/freqtrade/freqai/tensorboard/__init__.py b/freqtrade/freqai/tensorboard/__init__.py index 59862bc0d..183c25b22 100644 --- a/freqtrade/freqai/tensorboard/__init__.py +++ b/freqtrade/freqai/tensorboard/__init__.py @@ -1,15 +1,16 @@ # ensure users can still use a non-torch freqai version try: from freqtrade.freqai.tensorboard.tensorboard import TensorBoardCallback, TensorboardLogger + TBLogger = TensorboardLogger TBCallback = TensorBoardCallback except ModuleNotFoundError: - from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback, - BaseTensorboardLogger) + from freqtrade.freqai.tensorboard.base_tensorboard import ( + BaseTensorBoardCallback, + BaseTensorboardLogger, + ) + TBLogger = BaseTensorboardLogger # type: ignore TBCallback = BaseTensorBoardCallback # type: ignore -__all__ = ( - "TBLogger", - "TBCallback" -) +__all__ = ("TBLogger", "TBCallback") diff --git a/freqtrade/freqai/tensorboard/base_tensorboard.py b/freqtrade/freqai/tensorboard/base_tensorboard.py index 72f47111c..73a97230f 100644 --- a/freqtrade/freqai/tensorboard/base_tensorboard.py +++ b/freqtrade/freqai/tensorboard/base_tensorboard.py @@ -20,13 +20,10 @@ class BaseTensorboardLogger: class BaseTensorBoardCallback(TrainingCallback): - def __init__(self, logdir: Path, activate: bool = True): pass - def after_iteration( - self, model, epoch: int, evals_log: TrainingCallback.EvalsLog - ) -> bool: + def after_iteration(self, model, epoch: int, evals_log: TrainingCallback.EvalsLog) -> bool: return False def after_training(self, model): diff --git a/freqtrade/freqai/tensorboard/tensorboard.py b/freqtrade/freqai/tensorboard/tensorboard.py index 3ad896108..3a306f377 100644 --- a/freqtrade/freqai/tensorboard/tensorboard.py +++ b/freqtrade/freqai/tensorboard/tensorboard.py @@ -5,8 +5,10 @@ from typing import Any from torch.utils.tensorboard import SummaryWriter from xgboost import callback -from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback, - BaseTensorboardLogger) +from freqtrade.freqai.tensorboard.base_tensorboard import ( + BaseTensorBoardCallback, + BaseTensorboardLogger, +) logger = logging.getLogger(__name__) @@ -29,7 +31,6 @@ class TensorboardLogger(BaseTensorboardLogger): class TensorBoardCallback(BaseTensorBoardCallback): - def __init__(self, logdir: Path, activate: bool = True): self.activate = activate if self.activate: diff --git a/freqtrade/freqai/torch/PyTorchDataConvertor.py b/freqtrade/freqai/torch/PyTorchDataConvertor.py index 0af14dd14..5b3f249c3 100644 --- a/freqtrade/freqai/torch/PyTorchDataConvertor.py +++ b/freqtrade/freqai/torch/PyTorchDataConvertor.py @@ -31,9 +31,9 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor): """ def __init__( - self, - target_tensor_type: torch.dtype = torch.float32, - squeeze_target_tensor: bool = False, + self, + target_tensor_type: torch.dtype = torch.float32, + squeeze_target_tensor: bool = False, ): """ :param target_tensor_type: type of target tensor, for classification use diff --git a/freqtrade/freqai/torch/PyTorchModelTrainer.py b/freqtrade/freqai/torch/PyTorchModelTrainer.py index 5c1db3c65..602c8e95b 100644 --- a/freqtrade/freqai/torch/PyTorchModelTrainer.py +++ b/freqtrade/freqai/torch/PyTorchModelTrainer.py @@ -19,16 +19,16 @@ logger = logging.getLogger(__name__) class PyTorchModelTrainer(PyTorchTrainerInterface): def __init__( - self, - model: nn.Module, - optimizer: Optimizer, - criterion: nn.Module, - device: str, - data_convertor: PyTorchDataConvertor, - model_meta_data: Dict[str, Any] = {}, - window_size: int = 1, - tb_logger: Any = None, - **kwargs + self, + model: nn.Module, + optimizer: Optimizer, + criterion: nn.Module, + device: str, + data_convertor: PyTorchDataConvertor, + model_meta_data: Dict[str, Any] = {}, + window_size: int = 1, + tb_logger: Any = None, + **kwargs, ): """ :param model: The PyTorch model to be trained. @@ -101,9 +101,9 @@ class PyTorchModelTrainer(PyTorchTrainerInterface): @torch.no_grad() def estimate_loss( - self, - data_loader_dictionary: Dict[str, DataLoader], - split: str, + self, + data_loader_dictionary: Dict[str, DataLoader], + split: str, ) -> None: self.model.eval() for _, batch_data in enumerate(data_loader_dictionary[split]): @@ -119,9 +119,7 @@ class PyTorchModelTrainer(PyTorchTrainerInterface): self.model.train() def create_data_loaders_dictionary( - self, - data_dictionary: Dict[str, pd.DataFrame], - splits: List[str] + self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str] ) -> Dict[str, DataLoader]: """ Converts the input data to PyTorch tensors using a data loader. @@ -168,12 +166,15 @@ class PyTorchModelTrainer(PyTorchTrainerInterface): user needs to store. e.g. class_names for classification models. """ - torch.save({ - "model_state_dict": self.model.state_dict(), - "optimizer_state_dict": self.optimizer.state_dict(), - "model_meta_data": self.model_meta_data, - "pytrainer": self - }, path) + torch.save( + { + "model_state_dict": self.model.state_dict(), + "optimizer_state_dict": self.optimizer.state_dict(), + "model_meta_data": self.model_meta_data, + "pytrainer": self, + }, + path, + ) def load(self, path: Path): checkpoint = torch.load(path) @@ -198,9 +199,7 @@ class PyTorchTransformerTrainer(PyTorchModelTrainer): """ def create_data_loaders_dictionary( - self, - data_dictionary: Dict[str, pd.DataFrame], - splits: List[str] + self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str] ) -> Dict[str, DataLoader]: """ Converts the input data to PyTorch tensors using a data loader. diff --git a/freqtrade/freqai/torch/PyTorchTrainerInterface.py b/freqtrade/freqai/torch/PyTorchTrainerInterface.py index 840c145f7..2c6f0c4d7 100644 --- a/freqtrade/freqai/torch/PyTorchTrainerInterface.py +++ b/freqtrade/freqai/torch/PyTorchTrainerInterface.py @@ -8,7 +8,6 @@ from torch import nn class PyTorchTrainerInterface(ABC): - @abstractmethod def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]) -> None: """ diff --git a/freqtrade/freqai/torch/PyTorchTransformerModel.py b/freqtrade/freqai/torch/PyTorchTransformerModel.py index 162459776..ae57b8ffd 100644 --- a/freqtrade/freqai/torch/PyTorchTransformerModel.py +++ b/freqtrade/freqai/torch/PyTorchTransformerModel.py @@ -19,8 +19,16 @@ class PyTorchTransformerModel(nn.Module): Lukasz Kaiser, and Illia Polosukhin. 2017. """ - def __init__(self, input_dim: int = 7, output_dim: int = 7, hidden_dim=1024, - n_layer=2, dropout_percent=0.1, time_window=10, nhead=8): + def __init__( + self, + input_dim: int = 7, + output_dim: int = 7, + hidden_dim=1024, + n_layer=2, + dropout_percent=0.1, + time_window=10, + nhead=8, + ): super().__init__() self.time_window = time_window # ensure the input dimension to the transformer is divisible by nhead @@ -34,7 +42,8 @@ class PyTorchTransformerModel(nn.Module): # Define the encoder block of the Transformer self.encoder_layer = nn.TransformerEncoderLayer( - d_model=self.dim_val, nhead=nhead, dropout=dropout_percent, batch_first=True) + d_model=self.dim_val, nhead=nhead, dropout=dropout_percent, batch_first=True + ) self.transformer = nn.TransformerEncoder(self.encoder_layer, num_layers=n_layer) # the pseudo decoding FC @@ -48,7 +57,7 @@ class PyTorchTransformerModel(nn.Module): nn.Linear(int(hidden_dim / 2), int(hidden_dim / 4)), nn.ReLU(), nn.Dropout(dropout_percent), - nn.Linear(int(hidden_dim / 4), output_dim) + nn.Linear(int(hidden_dim / 4), output_dim), ) def forward(self, x, mask=None, add_positional_encoding=True): diff --git a/freqtrade/freqai/torch/datasets.py b/freqtrade/freqai/torch/datasets.py index 120d8a116..8ddb3b698 100644 --- a/freqtrade/freqai/torch/datasets.py +++ b/freqtrade/freqai/torch/datasets.py @@ -12,7 +12,7 @@ class WindowDataset(torch.utils.data.Dataset): def __getitem__(self, index): idx_rev = len(self.xs) - self.window_size - index - 1 - window_x = self.xs[idx_rev:idx_rev + self.window_size, :] + window_x = self.xs[idx_rev : idx_rev + self.window_size, :] # Beware of indexing, these two window_x and window_y are aimed at the same row! # this is what happens when you use : window_y = self.ys[idx_rev + self.window_size - 1, :].unsqueeze(0) diff --git a/freqtrade/freqai/utils.py b/freqtrade/freqai/utils.py index 8ac175e4d..4acdad306 100644 --- a/freqtrade/freqai/utils.py +++ b/freqtrade/freqai/utils.py @@ -31,11 +31,12 @@ def download_all_data_for_training(dp: DataProvider, config: Config) -> None: """ if dp._exchange is None: - raise OperationalException('No exchange object found.') + raise OperationalException("No exchange object found.") markets = [ - p for p in dp._exchange.get_markets( - tradable_only=True, active_only=not config.get('include_inactive') - ).keys() + p + for p in dp._exchange.get_markets( + tradable_only=True, active_only=not config.get("include_inactive") + ).keys() ] all_pairs = dynamic_expand_pairlist(config, markets) @@ -73,42 +74,39 @@ def get_required_data_timerange(config: Config) -> TimeRange: if secs > max_tf_seconds: max_tf_seconds = secs - startup_candles = config.get('startup_candle_count', 0) + startup_candles = config.get("startup_candle_count", 0) indicator_periods = config["freqai"]["feature_parameters"]["indicator_periods_candles"] # factor the max_period as a factor of safety. max_period = int(max(startup_candles, max(indicator_periods)) * 1.5) - config['startup_candle_count'] = max_period - logger.info(f'FreqAI auto-downloader using {max_period} startup candles.') + config["startup_candle_count"] = max_period + logger.info(f"FreqAI auto-downloader using {max_period} startup candles.") additional_seconds = max_period * max_tf_seconds - startts = int( - time - - config["freqai"].get("train_period_days", 0) * 86400 - - additional_seconds - ) + startts = int(time - config["freqai"].get("train_period_days", 0) * 86400 - additional_seconds) stopts = int(time) - data_load_timerange = TimeRange('date', 'date', startts, stopts) + data_load_timerange = TimeRange("date", "date", startts, stopts) return data_load_timerange -def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen, - count_max: int = 25) -> None: +def plot_feature_importance( + model: Any, pair: str, dk: FreqaiDataKitchen, count_max: int = 25 +) -> None: """ - Plot Best and worst features by importance for a single sub-train. - :param model: Any = A model which was `fit` using a common library - such as catboost or lightgbm - :param pair: str = pair e.g. BTC/USD - :param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop - :param count_max: int = the amount of features to be loaded per column + Plot Best and worst features by importance for a single sub-train. + :param model: Any = A model which was `fit` using a common library + such as catboost or lightgbm + :param pair: str = pair e.g. BTC/USD + :param dk: FreqaiDataKitchen = non-persistent data container for current coin/loop + :param count_max: int = the amount of features to be loaded per column """ from freqtrade.plot.plotting import go, make_subplots, store_plot_file # Extract feature importance from model models = {} - if 'FreqaiMultiOutputRegressor' in str(model.__class__): + if "FreqaiMultiOutputRegressor" in str(model.__class__): for estimator, label in zip(model.estimators_, dk.label_list): models[label] = estimator else: @@ -123,14 +121,16 @@ def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen, elif "xgb" in str(mdl.__class__): feature_importance = mdl.feature_importances_ else: - logger.info('Model type does not support generating feature importances.') + logger.info("Model type does not support generating feature importances.") return # Data preparation - fi_df = pd.DataFrame({ - "feature_names": np.array(dk.data_dictionary['train_features'].columns), - "feature_importance": np.array(feature_importance) - }) + fi_df = pd.DataFrame( + { + "feature_names": np.array(dk.data_dictionary["train_features"].columns), + "feature_importance": np.array(feature_importance), + } + ) fi_df_top = fi_df.nlargest(count_max, "feature_importance")[::-1] fi_df_worst = fi_df.nsmallest(count_max, "feature_importance")[::-1] @@ -140,14 +140,18 @@ def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen, go.Bar( x=fi_df["feature_importance"], y=fi_df["feature_names"], - orientation='h', showlegend=False - ), row=1, col=col + orientation="h", + showlegend=False, + ), + row=1, + col=col, ) + fig = make_subplots(rows=1, cols=2, horizontal_spacing=0.5) fig = add_feature_trace(fig, fi_df_top, 1) fig = add_feature_trace(fig, fi_df_worst, 2) fig.update_layout(title_text=f"Best and worst features by importance {pair}") - label = label.replace('&', '').replace('%', '') # escape two FreqAI specific characters + label = label.replace("&", "").replace("%", "") # escape two FreqAI specific characters store_plot_file(fig, f"{dk.model_filename}-{label}.html", dk.data_path) @@ -158,12 +162,12 @@ def record_params(config: Dict[str, Any], full_path: Path) -> None: params_record_path = full_path / "run_params.json" run_params = { - "freqai": config.get('freqai', {}), - "timeframe": config.get('timeframe'), - "stake_amount": config.get('stake_amount'), - "stake_currency": config.get('stake_currency'), - "max_open_trades": config.get('max_open_trades'), - "pairs": config.get('exchange', {}).get('pair_whitelist') + "freqai": config.get("freqai", {}), + "timeframe": config.get("timeframe"), + "stake_amount": config.get("stake_amount"), + "stake_currency": config.get("stake_currency"), + "max_open_trades": config.get("max_open_trades"), + "pairs": config.get("exchange", {}).get("pair_whitelist"), } with params_record_path.open("w") as handle: @@ -172,7 +176,7 @@ def record_params(config: Dict[str, Any], full_path: Path) -> None: handle, indent=4, default=str, - number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN + number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN, ) @@ -191,10 +195,11 @@ def get_timerange_backtest_live_models(config: Config) -> str: def get_tb_logger(model_type: str, path: Path, activate: bool) -> Any: - if model_type == "pytorch" and activate: from freqtrade.freqai.tensorboard import TBLogger + return TBLogger(path, activate) else: from freqtrade.freqai.tensorboard.base_tensorboard import BaseTensorboardLogger + return BaseTensorboardLogger(path, activate) diff --git a/freqtrade/freqtradebot.py b/freqtrade/freqtradebot.py index aa233c3ff..8d53097a5 100644 --- a/freqtrade/freqtradebot.py +++ b/freqtrade/freqtradebot.py @@ -1,6 +1,7 @@ """ Freqtrade is the main module of this bot. It contains the class Freqtrade() """ + import logging import traceback from copy import deepcopy @@ -18,12 +19,29 @@ from freqtrade.constants import BuySell, Config, EntryExecuteMode, ExchangeConfi from freqtrade.data.converter import order_book_to_dataframe from freqtrade.data.dataprovider import DataProvider from freqtrade.edge import Edge -from freqtrade.enums import (ExitCheckTuple, ExitType, RPCMessageType, SignalDirection, State, - TradingMode) -from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError, - InvalidOrderException, PricingError) -from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, remove_exchange_credentials, - timeframe_to_minutes, timeframe_to_next_date, timeframe_to_seconds) +from freqtrade.enums import ( + ExitCheckTuple, + ExitType, + RPCMessageType, + SignalDirection, + State, + TradingMode, +) +from freqtrade.exceptions import ( + DependencyException, + ExchangeError, + InsufficientFundsError, + InvalidOrderException, + PricingError, +) +from freqtrade.exchange import ( + ROUND_DOWN, + ROUND_UP, + remove_exchange_credentials, + timeframe_to_minutes, + timeframe_to_next_date, + timeframe_to_seconds, +) from freqtrade.misc import safe_value_fallback, safe_value_fallback2 from freqtrade.mixins import LoggingMixin from freqtrade.persistence import Order, PairLocks, Trade, init_db @@ -33,12 +51,18 @@ from freqtrade.plugins.protectionmanager import ProtectionManager from freqtrade.resolvers import ExchangeResolver, StrategyResolver from freqtrade.rpc import RPCManager from freqtrade.rpc.external_message_consumer import ExternalMessageConsumer -from freqtrade.rpc.rpc_types import (ProfitLossStr, RPCCancelMsg, RPCEntryMsg, RPCExitCancelMsg, - RPCExitMsg, RPCProtectionMsg) +from freqtrade.rpc.rpc_types import ( + ProfitLossStr, + RPCCancelMsg, + RPCEntryMsg, + RPCExitCancelMsg, + RPCExitMsg, + RPCProtectionMsg, +) from freqtrade.strategy.interface import IStrategy from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper from freqtrade.util import MeasureTime -from freqtrade.util.migrations import migrate_binance_futures_names +from freqtrade.util.migrations.binance_mig import migrate_binance_futures_names from freqtrade.wallets import Wallets @@ -64,9 +88,9 @@ class FreqtradeBot(LoggingMixin): # Init objects self.config = config - exchange_config: ExchangeConfig = deepcopy(config['exchange']) + exchange_config: ExchangeConfig = deepcopy(config["exchange"]) # Remove credentials from original exchange config to avoid accidental credential exposure - remove_exchange_credentials(config['exchange'], True) + remove_exchange_credentials(config["exchange"], True) self.strategy: IStrategy = StrategyResolver.load_strategy(self.config) @@ -74,15 +98,16 @@ class FreqtradeBot(LoggingMixin): validate_config_consistency(config) self.exchange = ExchangeResolver.load_exchange( - self.config, exchange_config=exchange_config, load_leverage_tiers=True) + self.config, exchange_config=exchange_config, load_leverage_tiers=True + ) - init_db(self.config['db_url']) + init_db(self.config["db_url"]) self.wallets = Wallets(self.config, self.exchange) - PairLocks.timeframe = self.config['timeframe'] + PairLocks.timeframe = self.config["timeframe"] - self.trading_mode: TradingMode = self.config.get('trading_mode', TradingMode.SPOT) + self.trading_mode: TradingMode = self.config.get("trading_mode", TradingMode.SPOT) self.last_process: Optional[datetime] = None # RPC runs in separate threads, can start handling external commands just after @@ -103,17 +128,23 @@ class FreqtradeBot(LoggingMixin): self.strategy.wallets = self.wallets # Initializing Edge only if enabled - self.edge = Edge(self.config, self.exchange, self.strategy) if \ - self.config.get('edge', {}).get('enabled', False) else None + self.edge = ( + Edge(self.config, self.exchange, self.strategy) + if self.config.get("edge", {}).get("enabled", False) + else None + ) # Init ExternalMessageConsumer if enabled - self.emc = ExternalMessageConsumer(self.config, self.dataprovider) if \ - self.config.get('external_message_consumer', {}).get('enabled', False) else None + self.emc = ( + ExternalMessageConsumer(self.config, self.dataprovider) + if self.config.get("external_message_consumer", {}).get("enabled", False) + else None + ) self.active_pair_whitelist = self._refresh_active_whitelist() # Set initial bot state from config - initial_state = self.config.get('initial_state') + initial_state = self.config.get("initial_state") self.state = State[initial_state.upper()] if initial_state else State.STOPPED # Protect exit-logic from forcesell and vice versa @@ -143,8 +174,8 @@ class FreqtradeBot(LoggingMixin): def log_took_too_long(duration: float, time_limit: float): logger.warning( - f"Strategy analysis took {duration:.2f}, which is 25% of the timeframe. " - "This can lead to delayed orders and missed signals." + f"Strategy analysis took {duration:.2f}s, more than 25% of the timeframe " + f"({time_limit:.2f}s). This can lead to delayed orders and missed signals." "Consider either reducing the amount of work your strategy performs " "or reduce the amount of pairs in the Pairlist." ) @@ -156,26 +187,23 @@ class FreqtradeBot(LoggingMixin): Public method for users of this class (worker, etc.) to send notifications via RPC about changes in the bot status. """ - self.rpc.send_msg({ - 'type': msg_type, - 'status': msg - }) + self.rpc.send_msg({"type": msg_type, "status": msg}) def cleanup(self) -> None: """ Cleanup pending resources on an already stopped bot :return: None """ - logger.info('Cleaning up modules ...') + logger.info("Cleaning up modules ...") try: # Wrap db activities in shutdown to avoid problems if database is gone, # and raises further exceptions. - if self.config['cancel_open_orders_on_exit']: + if self.config["cancel_open_orders_on_exit"]: self.cancel_all_open_orders() self.check_for_open_trades() except Exception as e: - logger.warning(f'Exception during cleanup: {e.__class__.__name__} {e}') + logger.warning(f"Exception during cleanup: {e.__class__.__name__} {e}") finally: self.strategy.ft_bot_cleanup() @@ -229,11 +257,14 @@ class FreqtradeBot(LoggingMixin): self.active_pair_whitelist = self._refresh_active_whitelist(trades) # Refreshing candles - self.dataprovider.refresh(self.pairlists.create_pair_list(self.active_pair_whitelist), - self.strategy.gather_informative_pairs()) + self.dataprovider.refresh( + self.pairlists.create_pair_list(self.active_pair_whitelist), + self.strategy.gather_informative_pairs(), + ) strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)( - current_time=datetime.now(timezone.utc)) + current_time=datetime.now(timezone.utc) + ) with self._measure_execution: self.strategy.analyze(self.active_pair_whitelist) @@ -250,12 +281,12 @@ class FreqtradeBot(LoggingMixin): # First process current opened trades (positions) self.exit_positions(trades) - # Check if we need to adjust our current positions before attempting to buy new trades. + # Check if we need to adjust our current positions before attempting to enter new trades. if self.strategy.position_adjustment_enable: with self._exit_lock: self.process_open_trade_positions() - # Then looking for buy opportunities + # Then looking for entry opportunities if self.get_free_open_trades(): self.enter_positions() if self.trading_mode == TradingMode.FUTURES: @@ -268,7 +299,7 @@ class FreqtradeBot(LoggingMixin): """ Close all orders that were left open """ - if self.config['cancel_open_orders_on_exit']: + if self.config["cancel_open_orders_on_exit"]: self.cancel_all_open_orders() def check_for_open_trades(self): @@ -280,13 +311,12 @@ class FreqtradeBot(LoggingMixin): if len(open_trades) != 0 and self.state != State.RELOAD_CONFIG: msg = { - 'type': RPCMessageType.WARNING, - 'status': - f"{len(open_trades)} open trades active.\n\n" - f"Handle these trades manually on {self.exchange.name}, " - f"or '/start' the bot again and use '/stopentry' " - f"to handle open trades gracefully. \n" - f"{'Note: Trades are simulated (dry run).' if self.config['dry_run'] else ''}", + "type": RPCMessageType.WARNING, + "status": f"{len(open_trades)} open trades active.\n\n" + f"Handle these trades manually on {self.exchange.name}, " + f"or '/start' the bot again and use '/stopentry' " + f"to handle open trades gracefully. \n" + f"{'Note: Trades are simulated (dry run).' if self.config['dry_run'] else ''}", } self.rpc.send_msg(msg) @@ -312,7 +342,7 @@ class FreqtradeBot(LoggingMixin): # Called last to include the included pairs if _prev_whitelist != _whitelist: - self.rpc.send_msg({'type': RPCMessageType.WHITELIST, 'data': _whitelist}) + self.rpc.send_msg({"type": RPCMessageType.WHITELIST, "data": _whitelist}) return _whitelist @@ -322,7 +352,7 @@ class FreqtradeBot(LoggingMixin): max number of open trades reached """ open_trades = Trade.get_open_trade_count() - return max(0, self.config['max_open_trades'] - open_trades) + return max(0, self.config["max_open_trades"] - open_trades) def update_funding_fees(self) -> None: if self.trading_mode == TradingMode.FUTURES: @@ -333,11 +363,11 @@ class FreqtradeBot(LoggingMixin): pair=trade.pair, amount=trade.amount, is_short=trade.is_short, - open_date=trade.date_last_filled_utc) + open_date=trade.date_last_filled_utc, + ) ) def startup_backpopulate_precision(self) -> None: - trades = Trade.get_trades([Trade.contract_size.is_(None)]) for trade in trades: if trade.exchange != self.exchange.id: @@ -353,7 +383,7 @@ class FreqtradeBot(LoggingMixin): Updates open orders based on order list kept in the database. Mainly updates the state of orders - but may also close trades """ - if self.config['dry_run'] or self.config['exchange'].get('skip_open_order_update', False): + if self.config["dry_run"] or self.config["exchange"].get("skip_open_order_update", False): # Updating open orders in dry-run does not make sense and will fail. return @@ -361,8 +391,9 @@ class FreqtradeBot(LoggingMixin): logger.info(f"Updating {len(orders)} open orders.") for order in orders: try: - fo = self.exchange.fetch_order_or_stoploss_order(order.order_id, order.ft_pair, - order.ft_order_side == 'stoploss') + fo = self.exchange.fetch_order_or_stoploss_order( + order.order_id, order.ft_pair, order.ft_order_side == "stoploss" + ) if not order.trade: # This should not happen, but it does if trades were deleted manually. # This can only incur on sqlite, which doesn't enforce foreign constraints. @@ -372,22 +403,26 @@ class FreqtradeBot(LoggingMixin): f"The expected trade ID is {order.ft_trade_id}. Ignoring this order." ) continue - self.update_trade_state(order.trade, order.order_id, fo, - stoploss_order=(order.ft_order_side == 'stoploss')) + self.update_trade_state( + order.trade, + order.order_id, + fo, + stoploss_order=(order.ft_order_side == "stoploss"), + ) except InvalidOrderException as e: logger.warning(f"Error updating Order {order.order_id} due to {e}.") if order.order_date_utc - timedelta(days=5) < datetime.now(timezone.utc): logger.warning( - "Order is older than 5 days. Assuming order was fully cancelled.") + "Order is older than 5 days. Assuming order was fully cancelled." + ) fo = order.to_ccxt_object() - fo['status'] = 'canceled' + fo["status"] = "canceled" self.handle_cancel_order( - fo, order, order.trade, constants.CANCEL_REASON['TIMEOUT'] + fo, order, order.trade, constants.CANCEL_REASON["TIMEOUT"] ) except ExchangeError as e: - logger.warning(f"Error updating Order {order.order_id} due to {e}") def update_trades_without_assigned_fees(self) -> None: @@ -395,7 +430,7 @@ class FreqtradeBot(LoggingMixin): Update closed trades without close fees assigned. Only acts when Orders are in the database, otherwise the last order-id is unknown. """ - if self.config['dry_run']: + if self.config["dry_run"]: # Updating open orders in dry-run does not make sense and will fail. return @@ -405,15 +440,18 @@ class FreqtradeBot(LoggingMixin): # Get sell fee order = trade.select_order(trade.exit_side, False, only_filled=True) if not order: - order = trade.select_order('stoploss', False) + order = trade.select_order("stoploss", False) if order: logger.info( f"Updating {trade.exit_side}-fee on trade {trade}" f"for order {order.order_id}." ) - self.update_trade_state(trade, order.order_id, - stoploss_order=order.ft_order_side == 'stoploss', - send_msg=False) + self.update_trade_state( + trade, + order.order_id, + stoploss_order=order.ft_order_side == "stoploss", + send_msg=False, + ) trades = Trade.get_open_trades_without_assigned_fees() for trade in trades: @@ -442,29 +480,33 @@ class FreqtradeBot(LoggingMixin): logger.debug(f"Order {order} is no longer open.") continue try: - fo = self.exchange.fetch_order_or_stoploss_order(order.order_id, order.ft_pair, - order.ft_order_side == 'stoploss') + fo = self.exchange.fetch_order_or_stoploss_order( + order.order_id, order.ft_pair, order.ft_order_side == "stoploss" + ) if fo: logger.info(f"Found {order} for trade {trade}.") - self.update_trade_state(trade, order.order_id, fo, - stoploss_order=order.ft_order_side == 'stoploss') + self.update_trade_state( + trade, order.order_id, fo, stoploss_order=order.ft_order_side == "stoploss" + ) except ExchangeError: logger.warning(f"Error updating {order.order_id}.") - def handle_onexchange_order(self, trade: Trade): + def handle_onexchange_order(self, trade: Trade) -> bool: """ Try refinding a order that is not in the database. Only used balance disappeared, which would make exiting impossible. + :return: True if the trade was deleted, False otherwise """ try: orders = self.exchange.fetch_orders( - trade.pair, trade.open_date_utc - timedelta(seconds=10)) + trade.pair, trade.open_date_utc - timedelta(seconds=10) + ) prev_exit_reason = trade.exit_reason prev_trade_state = trade.is_open prev_trade_amount = trade.amount for order in orders: - trade_order = [o for o in trade.orders if o.order_id == order['id']] + trade_order = [o for o in trade.orders if o.order_id == order["id"]] if trade_order: # We knew this order, but didn't have it updated properly @@ -472,15 +514,16 @@ class FreqtradeBot(LoggingMixin): else: logger.info(f"Found previously unknown order {order['id']} for {trade.pair}.") - order_obj = Order.parse_from_ccxt_object(order, trade.pair, order['side']) + order_obj = Order.parse_from_ccxt_object(order, trade.pair, order["side"]) order_obj.order_filled_date = datetime.fromtimestamp( - safe_value_fallback(order, 'lastTradeTimestamp', 'timestamp') // 1000, - tz=timezone.utc) + safe_value_fallback(order, "lastTradeTimestamp", "timestamp") // 1000, + tz=timezone.utc, + ) trade.orders.append(order_obj) Trade.commit() trade.exit_reason = ExitType.SOLD_ON_EXCHANGE.value - self.update_trade_state(trade, order['id'], order, send_msg=False) + self.update_trade_state(trade, order["id"], order, send_msg=False) logger.info(f"handled order {order['id']}") @@ -489,13 +532,29 @@ class FreqtradeBot(LoggingMixin): if not trade.is_open: # Trade was just closed trade.close_date = trade.date_last_filled_utc - self.order_close_notify(trade, order_obj, - order_obj.ft_order_side == 'stoploss', - send_msg=prev_trade_state != trade.is_open) + self.order_close_notify( + trade, + order_obj, + order_obj.ft_order_side == "stoploss", + send_msg=prev_trade_state != trade.is_open, + ) else: trade.exit_reason = prev_exit_reason total = self.wallets.get_total(trade.base_currency) if trade.base_currency else 0 if total < trade.amount: + if trade.fully_canceled_entry_order_count == len(trade.orders): + logger.warning( + f"Trade only had fully canceled entry orders. " + f"Removing {trade} from database." + ) + + self._notify_enter_cancel( + trade, + order_type=self.strategy.order_types["entry"], + reason=constants.CANCEL_REASON["FULLY_CANCELLED"], + ) + trade.delete() + return True if total > trade.amount * 0.98: logger.warning( f"{trade} has a total of {trade.amount} {trade.base_currency}, " @@ -521,9 +580,11 @@ class FreqtradeBot(LoggingMixin): except Exception: # catching https://github.com/freqtrade/freqtrade/issues/9025 logger.warning("Error finding onexchange order", exc_info=True) -# -# BUY / enter positions / open trades logic and methods -# + return False + + # + # enter positions / open trades logic and methods + # def enter_positions(self) -> int: """ @@ -539,21 +600,26 @@ class FreqtradeBot(LoggingMixin): for trade in Trade.get_open_trades(): if trade.pair in whitelist: whitelist.remove(trade.pair) - logger.debug('Ignoring %s in pair whitelist', trade.pair) + logger.debug("Ignoring %s in pair whitelist", trade.pair) if not whitelist: - self.log_once("No currency pair in active pair whitelist, " - "but checking to exit open trades.", logger.info) + self.log_once( + "No currency pair in active pair whitelist, but checking to exit open trades.", + logger.info, + ) return trades_created - if PairLocks.is_global_lock(side='*'): + if PairLocks.is_global_lock(side="*"): # This only checks for total locks (both sides). # per-side locks will be evaluated by `is_pair_locked` within create_trade, # once the direction for the trade is clear. - lock = PairLocks.get_pair_longest_lock('*') + lock = PairLocks.get_pair_longest_lock("*") if lock: - self.log_once(f"Global pairlock active until " - f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. " - f"Not creating new trades, reason: {lock.reason}.", logger.info) + self.log_once( + f"Global pairlock active until " + f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)}. " + f"Not creating new trades, reason: {lock.reason}.", + logger.info, + ) else: self.log_once("Global pairlock active. Not creating new trades.", logger.info) return trades_created @@ -563,7 +629,7 @@ class FreqtradeBot(LoggingMixin): with self._exit_lock: trades_created += self.create_trade(pair) except DependencyException as exception: - logger.warning('Unable to create trade for %s: %s', pair, exception) + logger.warning("Unable to create trade for %s: %s", pair, exception) if not trades_created: logger.debug("Found no enter signals for whitelisted currencies. Trying again...") @@ -572,17 +638,17 @@ class FreqtradeBot(LoggingMixin): def create_trade(self, pair: str) -> bool: """ - Check the implemented trading strategy for buy signals. + Check the implemented trading strategy for entry signals. - If the pair triggers the buy signal a new trade record gets created - and the buy-order opening the trade gets issued towards the exchange. + If the pair triggers the enter signal a new trade record gets created + and the entry-order opening the trade gets issued towards the exchange. :return: True if a trade has been created. """ logger.debug(f"create_trade for pair {pair}") analyzed_df, _ = self.dataprovider.get_analyzed_dataframe(pair, self.strategy.timeframe) - nowtime = analyzed_df.iloc[-1]['date'] if len(analyzed_df) > 0 else None + nowtime = analyzed_df.iloc[-1]["date"] if len(analyzed_df) > 0 else None # get_free_open_trades is checked before create_trade is called # but it is still used here to prevent opening too many trades within one iteration @@ -592,50 +658,49 @@ class FreqtradeBot(LoggingMixin): # running get_signal on historical data fetched (signal, enter_tag) = self.strategy.get_entry_signal( - pair, - self.strategy.timeframe, - analyzed_df + pair, self.strategy.timeframe, analyzed_df ) if signal: if self.strategy.is_pair_locked(pair, candle_date=nowtime, side=signal): lock = PairLocks.get_pair_longest_lock(pair, nowtime, signal) if lock: - self.log_once(f"Pair {pair} {lock.side} is locked until " - f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)} " - f"due to {lock.reason}.", - logger.info) + self.log_once( + f"Pair {pair} {lock.side} is locked until " + f"{lock.lock_end_time.strftime(constants.DATETIME_PRINT_FORMAT)} " + f"due to {lock.reason}.", + logger.info, + ) else: self.log_once(f"Pair {pair} is currently locked.", logger.info) return False stake_amount = self.wallets.get_trade_stake_amount( - pair, self.config['max_open_trades'], self.edge) + pair, self.config["max_open_trades"], self.edge + ) - bid_check_dom = self.config.get('entry_pricing', {}).get('check_depth_of_market', {}) - if ((bid_check_dom.get('enabled', False)) and - (bid_check_dom.get('bids_to_ask_delta', 0) > 0)): + bid_check_dom = self.config.get("entry_pricing", {}).get("check_depth_of_market", {}) + if (bid_check_dom.get("enabled", False)) and ( + bid_check_dom.get("bids_to_ask_delta", 0) > 0 + ): if self._check_depth_of_market(pair, bid_check_dom, side=signal): return self.execute_entry( pair, stake_amount, enter_tag=enter_tag, - is_short=(signal == SignalDirection.SHORT) + is_short=(signal == SignalDirection.SHORT), ) else: return False return self.execute_entry( - pair, - stake_amount, - enter_tag=enter_tag, - is_short=(signal == SignalDirection.SHORT) + pair, stake_amount, enter_tag=enter_tag, is_short=(signal == SignalDirection.SHORT) ) else: return False -# -# BUY / increase positions / DCA logic and methods -# + # + # Modify positions / DCA logic and methods + # def process_open_trade_positions(self): """ Tries to execute additional buy or sell orders for open trades (positions) @@ -651,7 +716,8 @@ class FreqtradeBot(LoggingMixin): self.check_and_call_adjust_trade_position(trade) except DependencyException as exception: logger.warning( - f"Unable to adjust position of trade for {trade.pair}: {exception}") + f"Unable to adjust position of trade for {trade.pair}: {exception}" + ) def check_and_call_adjust_trade_position(self, trade: Trade): """ @@ -660,27 +726,32 @@ class FreqtradeBot(LoggingMixin): Once that completes, the existing trade is modified to match new data. """ current_entry_rate, current_exit_rate = self.exchange.get_rates( - trade.pair, True, trade.is_short) + trade.pair, True, trade.is_short + ) current_entry_profit = trade.calc_profit_ratio(current_entry_rate) current_exit_profit = trade.calc_profit_ratio(current_exit_rate) - min_entry_stake = self.exchange.get_min_pair_stake_amount(trade.pair, - current_entry_rate, - 0.0) - min_exit_stake = self.exchange.get_min_pair_stake_amount(trade.pair, - current_exit_rate, - self.strategy.stoploss) + min_entry_stake = self.exchange.get_min_pair_stake_amount( + trade.pair, current_entry_rate, 0.0 + ) + min_exit_stake = self.exchange.get_min_pair_stake_amount( + trade.pair, current_exit_rate, self.strategy.stoploss + ) max_entry_stake = self.exchange.get_max_pair_stake_amount(trade.pair, current_entry_rate) stake_available = self.wallets.get_available_stake_amount() logger.debug(f"Calling adjust_trade_position for pair {trade.pair}") stake_amount, order_tag = self.strategy._adjust_trade_position_internal( trade=trade, - current_time=datetime.now(timezone.utc), current_rate=current_entry_rate, - current_profit=current_entry_profit, min_stake=min_entry_stake, + current_time=datetime.now(timezone.utc), + current_rate=current_entry_rate, + current_profit=current_entry_profit, + min_stake=min_entry_stake, max_stake=min(max_entry_stake, stake_available), - current_entry_rate=current_entry_rate, current_exit_rate=current_exit_rate, - current_entry_profit=current_entry_profit, current_exit_profit=current_exit_profit + current_entry_rate=current_entry_rate, + current_exit_rate=current_exit_rate, + current_entry_profit=current_entry_profit, + current_exit_profit=current_exit_profit, ) if stake_amount is not None and stake_amount > 0.0: @@ -692,15 +763,21 @@ class FreqtradeBot(LoggingMixin): return else: logger.debug("Max adjustment entries is set to unlimited.") - self.execute_entry(trade.pair, stake_amount, price=current_entry_rate, - trade=trade, is_short=trade.is_short, mode='pos_adjust', - enter_tag=order_tag) + self.execute_entry( + trade.pair, + stake_amount, + price=current_entry_rate, + trade=trade, + is_short=trade.is_short, + mode="pos_adjust", + enter_tag=order_tag, + ) if stake_amount is not None and stake_amount < 0.0: # We should decrease our position amount = self.exchange.amount_to_contract_precision( - trade.pair, - abs(float(stake_amount * trade.amount / trade.stake_amount))) + trade.pair, abs(float(stake_amount * trade.amount / trade.stake_amount)) + ) if amount == 0.0: logger.info("Amount to exit is 0.0 due to exchange limits - not exiting.") @@ -708,23 +785,30 @@ class FreqtradeBot(LoggingMixin): remaining = (trade.amount - amount) * current_exit_rate if min_exit_stake and remaining != 0 and remaining < min_exit_stake: - logger.info(f"Remaining amount of {remaining} would be smaller " - f"than the minimum of {min_exit_stake}.") + logger.info( + f"Remaining amount of {remaining} would be smaller " + f"than the minimum of {min_exit_stake}." + ) return - self.execute_trade_exit(trade, current_exit_rate, exit_check=ExitCheckTuple( - exit_type=ExitType.PARTIAL_EXIT), sub_trade_amt=amount, exit_tag=order_tag) + self.execute_trade_exit( + trade, + current_exit_rate, + exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), + sub_trade_amt=amount, + exit_tag=order_tag, + ) def _check_depth_of_market(self, pair: str, conf: Dict, side: SignalDirection) -> bool: """ - Checks depth of market before executing a buy + Checks depth of market before executing an entry """ - conf_bids_to_ask_delta = conf.get('bids_to_ask_delta', 0) + conf_bids_to_ask_delta = conf.get("bids_to_ask_delta", 0) logger.info(f"Checking depth of market for {pair} ...") order_book = self.exchange.fetch_l2_order_book(pair, 1000) - order_book_data_frame = order_book_to_dataframe(order_book['bids'], order_book['asks']) - order_book_bids = order_book_data_frame['b_size'].sum() - order_book_asks = order_book_data_frame['a_size'].sum() + order_book_data_frame = order_book_to_dataframe(order_book["bids"], order_book["asks"]) + order_book_bids = order_book_data_frame["b_size"].sum() + order_book_asks = order_book_data_frame["a_size"].sum() entry_side = order_book_bids if side == SignalDirection.LONG else order_book_asks exit_side = order_book_asks if side == SignalDirection.LONG else order_book_bids @@ -757,47 +841,58 @@ class FreqtradeBot(LoggingMixin): ordertype: Optional[str] = None, enter_tag: Optional[str] = None, trade: Optional[Trade] = None, - mode: EntryExecuteMode = 'initial', + mode: EntryExecuteMode = "initial", leverage_: Optional[float] = None, ) -> bool: """ - Executes a limit buy for the given pair - :param pair: pair for which we want to create a LIMIT_BUY + Executes an entry for the given pair + :param pair: pair for which we want to create a LIMIT order :param stake_amount: amount of stake-currency for the pair - :return: True if a buy order is created, false if it fails. + :return: True if an entry order is created, False if it fails. :raise: DependencyException or it's subclasses like ExchangeError. """ - time_in_force = self.strategy.order_time_in_force['entry'] + time_in_force = self.strategy.order_time_in_force["entry"] - side: BuySell = 'sell' if is_short else 'buy' - name = 'Short' if is_short else 'Long' - trade_side: LongShort = 'short' if is_short else 'long' + side: BuySell = "sell" if is_short else "buy" + name = "Short" if is_short else "Long" + trade_side: LongShort = "short" if is_short else "long" pos_adjust = trade is not None enter_limit_requested, stake_amount, leverage = self.get_valid_enter_price_and_stake( - pair, price, stake_amount, trade_side, enter_tag, trade, mode, leverage_) + pair, price, stake_amount, trade_side, enter_tag, trade, mode, leverage_ + ) if not stake_amount: return False - msg = (f"Position adjust: about to create a new order for {pair} with stake_amount: " - f"{stake_amount} for {trade}" if mode == 'pos_adjust' - else - (f"Replacing {side} order: about create a new order for {pair} with stake_amount: " + msg = ( + f"Position adjust: about to create a new order for {pair} with stake_amount: " + f"{stake_amount} for {trade}" + if mode == "pos_adjust" + else ( + f"Replacing {side} order: about create a new order for {pair} with stake_amount: " f"{stake_amount} ..." - if mode == 'replace' else - f"{name} signal found: about create a new trade for {pair} with stake_amount: " + if mode == "replace" + else f"{name} signal found: about create a new trade for {pair} with stake_amount: " f"{stake_amount} ..." - )) + ) + ) logger.info(msg) amount = (stake_amount / enter_limit_requested) * leverage - order_type = ordertype or self.strategy.order_types['entry'] + order_type = ordertype or self.strategy.order_types["entry"] - if mode == 'initial' and not strategy_safe_wrapper( - self.strategy.confirm_trade_entry, default_retval=True)( - pair=pair, order_type=order_type, amount=amount, rate=enter_limit_requested, - time_in_force=time_in_force, current_time=datetime.now(timezone.utc), - entry_tag=enter_tag, side=trade_side): + if mode == "initial" and not strategy_safe_wrapper( + self.strategy.confirm_trade_entry, default_retval=True + )( + pair=pair, + order_type=order_type, + amount=amount, + rate=enter_limit_requested, + time_in_force=time_in_force, + current_time=datetime.now(timezone.utc), + entry_tag=enter_tag, + side=trade_side, + ): logger.info(f"User denied entry for {pair}.") return False order = self.exchange.create_order( @@ -808,48 +903,58 @@ class FreqtradeBot(LoggingMixin): rate=enter_limit_requested, reduceOnly=False, time_in_force=time_in_force, - leverage=leverage + leverage=leverage, ) order_obj = Order.parse_from_ccxt_object(order, pair, side, amount, enter_limit_requested) order_obj.ft_order_tag = enter_tag - order_id = order['id'] - order_status = order.get('status') + order_id = order["id"] + order_status = order.get("status") logger.info(f"Order {order_id} was created for {pair} and status is {order_status}.") # we assume the order is executed at the price requested enter_limit_filled_price = enter_limit_requested amount_requested = amount - if order_status == 'expired' or order_status == 'rejected': - + if order_status == "expired" or order_status == "rejected": # return false if the order is not filled - if float(order['filled']) == 0: - logger.warning(f'{name} {time_in_force} order with time in force {order_type} ' - f'for {pair} is {order_status} by {self.exchange.name}.' - ' zero amount is fulfilled.') + if float(order["filled"]) == 0: + logger.warning( + f"{name} {time_in_force} order with time in force {order_type} " + f"for {pair} is {order_status} by {self.exchange.name}." + " zero amount is fulfilled." + ) return False else: # the order is partially fulfilled # in case of IOC orders we can check immediately # if the order is fulfilled fully or partially - logger.warning('%s %s order with time in force %s for %s is %s by %s.' - ' %s amount fulfilled out of %s (%s remaining which is canceled).', - name, time_in_force, order_type, pair, order_status, - self.exchange.name, order['filled'], order['amount'], - order['remaining'] - ) - amount = safe_value_fallback(order, 'filled', 'amount', amount) + logger.warning( + "%s %s order with time in force %s for %s is %s by %s." + " %s amount fulfilled out of %s (%s remaining which is canceled).", + name, + time_in_force, + order_type, + pair, + order_status, + self.exchange.name, + order["filled"], + order["amount"], + order["remaining"], + ) + amount = safe_value_fallback(order, "filled", "amount", amount) enter_limit_filled_price = safe_value_fallback( - order, 'average', 'price', enter_limit_filled_price) + order, "average", "price", enter_limit_filled_price + ) # in case of FOK the order may be filled immediately and fully - elif order_status == 'closed': - amount = safe_value_fallback(order, 'filled', 'amount', amount) + elif order_status == "closed": + amount = safe_value_fallback(order, "filled", "amount", amount) enter_limit_filled_price = safe_value_fallback( - order, 'average', 'price', enter_limit_requested) + order, "average", "price", enter_limit_requested + ) # Fee is applied twice because we make a LIMIT_BUY and LIMIT_SELL - fee = self.exchange.get_fee(symbol=pair, taker_or_maker='maker') + fee = self.exchange.get_fee(symbol=pair, taker_or_maker="maker") base_currency = self.exchange.get_pair_base_currency(pair) open_date = datetime.now(timezone.utc) @@ -857,16 +962,15 @@ class FreqtradeBot(LoggingMixin): pair=pair, amount=amount + trade.amount if trade else amount, is_short=is_short, - open_date=trade.date_last_filled_utc if trade else open_date + open_date=trade.date_last_filled_utc if trade else open_date, ) # This is a new trade if trade is None: - trade = Trade( pair=pair, base_currency=base_currency, - stake_currency=self.config['stake_currency'], + stake_currency=self.config["stake_currency"], stake_amount=stake_amount, amount=amount, is_open=True, @@ -879,7 +983,7 @@ class FreqtradeBot(LoggingMixin): exchange=self.exchange.id, strategy=self.strategy.get_strategy_name(), enter_tag=enter_tag, - timeframe=timeframe_to_minutes(self.config['timeframe']), + timeframe=timeframe_to_minutes(self.config["timeframe"]), leverage=leverage, is_short=is_short, trading_mode=self.trading_mode, @@ -893,7 +997,7 @@ class FreqtradeBot(LoggingMixin): trade.adjust_stop_loss(trade.open_rate, stoploss, initial=True) else: - # This is additional buy, we reset fee_open_currency so timeout checking can work + # This is additional entry, we reset fee_open_currency so timeout checking can work trade.is_open = True trade.fee_open_currency = None trade.open_rate_requested = enter_limit_requested @@ -910,7 +1014,7 @@ class FreqtradeBot(LoggingMixin): self._notify_enter(trade, order_obj, order_type, sub_trade=pos_adjust) if pos_adjust: - if order_status == 'closed': + if order_status == "closed": logger.info(f"DCA order closed, trade should be up to date: {trade}") trade = self.cancel_stoploss_on_exchange(trade) else: @@ -918,7 +1022,13 @@ class FreqtradeBot(LoggingMixin): # Update fees if order is non-opened if order_status in constants.NON_OPEN_EXCHANGE_STATES: - self.update_trade_state(trade, order_id, order) + fully_canceled = self.update_trade_state(trade, order_id, order) + if fully_canceled and mode != "replace": + # Fully canceled orders, may happen with some time in force setups (IOC). + # Should be handled immediately. + self.handle_cancel_enter( + trade, order, order_obj, constants.CANCEL_REASON["TIMEOUT"] + ) return True @@ -926,18 +1036,22 @@ class FreqtradeBot(LoggingMixin): # First cancelling stoploss on exchange ... for oslo in trade.open_sl_orders: try: - logger.info(f"Cancelling stoploss on exchange for {trade} " - f"order: {oslo.order_id}") + logger.info(f"Cancelling stoploss on exchange for {trade} order: {oslo.order_id}") co = self.exchange.cancel_stoploss_order_with_result( - oslo.order_id, trade.pair, trade.amount) + oslo.order_id, trade.pair, trade.amount + ) self.update_trade_state(trade, oslo.order_id, co, stoploss_order=True) except InvalidOrderException: - logger.exception(f"Could not cancel stoploss order {oslo.order_id} " - f"for pair {trade.pair}") + logger.exception( + f"Could not cancel stoploss order {oslo.order_id} for pair {trade.pair}" + ) return trade def get_valid_enter_price_and_stake( - self, pair: str, price: Optional[float], stake_amount: float, + self, + pair: str, + price: Optional[float], + stake_amount: float, trade_side: LongShort, entry_tag: Optional[str], trade: Optional[Trade], @@ -954,21 +1068,25 @@ class FreqtradeBot(LoggingMixin): else: # Calculate price enter_limit_requested = self.exchange.get_rate( - pair, side='entry', is_short=(trade_side == 'short'), refresh=True) - if mode != 'replace': + pair, side="entry", is_short=(trade_side == "short"), refresh=True + ) + if mode != "replace": # Don't call custom_entry_price in order-adjust scenario - custom_entry_price = strategy_safe_wrapper(self.strategy.custom_entry_price, - default_retval=enter_limit_requested)( - pair=pair, trade=trade, + custom_entry_price = strategy_safe_wrapper( + self.strategy.custom_entry_price, default_retval=enter_limit_requested + )( + pair=pair, + trade=trade, current_time=datetime.now(timezone.utc), - proposed_rate=enter_limit_requested, entry_tag=entry_tag, + proposed_rate=enter_limit_requested, + entry_tag=entry_tag, side=trade_side, ) enter_limit_requested = self.get_valid_price(custom_entry_price, enter_limit_requested) if not enter_limit_requested: - raise PricingError('Could not determine entry price.') + raise PricingError("Could not determine entry price.") if self.trading_mode != TradingMode.SPOT and trade is None: max_leverage = self.exchange.get_max_leverage(pair, stake_amount) @@ -981,7 +1099,8 @@ class FreqtradeBot(LoggingMixin): current_rate=enter_limit_requested, proposed_leverage=1.0, max_leverage=max_leverage, - side=trade_side, entry_tag=entry_tag, + side=trade_side, + entry_tag=entry_tag, ) # Cap leverage between 1.0 and max_leverage. leverage = min(max(leverage, 1.0), max_leverage) @@ -994,20 +1113,29 @@ class FreqtradeBot(LoggingMixin): # We do however also need min-stake to determine leverage, therefore this is ignored as # edge-case for now. min_stake_amount = self.exchange.get_min_pair_stake_amount( - pair, enter_limit_requested, - self.strategy.stoploss if not mode == 'pos_adjust' else 0.0, - leverage) + pair, + enter_limit_requested, + self.strategy.stoploss if not mode == "pos_adjust" else 0.0, + leverage, + ) max_stake_amount = self.exchange.get_max_pair_stake_amount( - pair, enter_limit_requested, leverage) + pair, enter_limit_requested, leverage + ) if not self.edge and trade is None: stake_available = self.wallets.get_available_stake_amount() - stake_amount = strategy_safe_wrapper(self.strategy.custom_stake_amount, - default_retval=stake_amount)( - pair=pair, current_time=datetime.now(timezone.utc), - current_rate=enter_limit_requested, proposed_stake=stake_amount, - min_stake=min_stake_amount, max_stake=min(max_stake_amount, stake_available), - leverage=leverage, entry_tag=entry_tag, side=trade_side + stake_amount = strategy_safe_wrapper( + self.strategy.custom_stake_amount, default_retval=stake_amount + )( + pair=pair, + current_time=datetime.now(timezone.utc), + current_rate=enter_limit_requested, + proposed_stake=stake_amount, + min_stake=min_stake_amount, + max_stake=min(max_stake_amount, stake_available), + leverage=leverage, + entry_tag=entry_tag, + side=trade_side, ) stake_amount = self.wallets.validate_stake_amount( @@ -1020,8 +1148,14 @@ class FreqtradeBot(LoggingMixin): return enter_limit_requested, stake_amount, leverage - def _notify_enter(self, trade: Trade, order: Order, order_type: Optional[str], - fill: bool = False, sub_trade: bool = False) -> None: + def _notify_enter( + self, + trade: Trade, + order: Order, + order_type: Optional[str], + fill: bool = False, + sub_trade: bool = False, + ) -> None: """ Sends rpc notification when a entry order occurred. """ @@ -1031,72 +1165,75 @@ class FreqtradeBot(LoggingMixin): open_rate = trade.open_rate current_rate = self.exchange.get_rate( - trade.pair, side='entry', is_short=trade.is_short, refresh=False) + trade.pair, side="entry", is_short=trade.is_short, refresh=False + ) msg: RPCEntryMsg = { - 'trade_id': trade.id, - 'type': RPCMessageType.ENTRY_FILL if fill else RPCMessageType.ENTRY, - 'buy_tag': trade.enter_tag, - 'enter_tag': trade.enter_tag, - 'exchange': trade.exchange.capitalize(), - 'pair': trade.pair, - 'leverage': trade.leverage if trade.leverage else None, - 'direction': 'Short' if trade.is_short else 'Long', - 'limit': open_rate, # Deprecated (?) - 'open_rate': open_rate, - 'order_type': order_type or 'unknown', - 'stake_amount': trade.stake_amount, - 'stake_currency': self.config['stake_currency'], - 'base_currency': self.exchange.get_pair_base_currency(trade.pair), - 'quote_currency': self.exchange.get_pair_quote_currency(trade.pair), - 'fiat_currency': self.config.get('fiat_display_currency', None), - 'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount), - 'open_date': trade.open_date_utc or datetime.now(timezone.utc), - 'current_rate': current_rate, - 'sub_trade': sub_trade, + "trade_id": trade.id, + "type": RPCMessageType.ENTRY_FILL if fill else RPCMessageType.ENTRY, + "buy_tag": trade.enter_tag, + "enter_tag": trade.enter_tag, + "exchange": trade.exchange.capitalize(), + "pair": trade.pair, + "leverage": trade.leverage if trade.leverage else None, + "direction": "Short" if trade.is_short else "Long", + "limit": open_rate, # Deprecated (?) + "open_rate": open_rate, + "order_type": order_type or "unknown", + "stake_amount": trade.stake_amount, + "stake_currency": self.config["stake_currency"], + "base_currency": self.exchange.get_pair_base_currency(trade.pair), + "quote_currency": self.exchange.get_pair_quote_currency(trade.pair), + "fiat_currency": self.config.get("fiat_display_currency", None), + "amount": order.safe_amount_after_fee if fill else (order.amount or trade.amount), + "open_date": trade.open_date_utc or datetime.now(timezone.utc), + "current_rate": current_rate, + "sub_trade": sub_trade, } # Send the message self.rpc.send_msg(msg) - def _notify_enter_cancel(self, trade: Trade, order_type: str, reason: str, - sub_trade: bool = False) -> None: + def _notify_enter_cancel( + self, trade: Trade, order_type: str, reason: str, sub_trade: bool = False + ) -> None: """ Sends rpc notification when a entry order cancel occurred. """ current_rate = self.exchange.get_rate( - trade.pair, side='entry', is_short=trade.is_short, refresh=False) + trade.pair, side="entry", is_short=trade.is_short, refresh=False + ) msg: RPCCancelMsg = { - 'trade_id': trade.id, - 'type': RPCMessageType.ENTRY_CANCEL, - 'buy_tag': trade.enter_tag, - 'enter_tag': trade.enter_tag, - 'exchange': trade.exchange.capitalize(), - 'pair': trade.pair, - 'leverage': trade.leverage, - 'direction': 'Short' if trade.is_short else 'Long', - 'limit': trade.open_rate, - 'order_type': order_type, - 'stake_amount': trade.stake_amount, - 'open_rate': trade.open_rate, - 'stake_currency': self.config['stake_currency'], - 'base_currency': self.exchange.get_pair_base_currency(trade.pair), - 'quote_currency': self.exchange.get_pair_quote_currency(trade.pair), - 'fiat_currency': self.config.get('fiat_display_currency', None), - 'amount': trade.amount, - 'open_date': trade.open_date, - 'current_rate': current_rate, - 'reason': reason, - 'sub_trade': sub_trade, + "trade_id": trade.id, + "type": RPCMessageType.ENTRY_CANCEL, + "buy_tag": trade.enter_tag, + "enter_tag": trade.enter_tag, + "exchange": trade.exchange.capitalize(), + "pair": trade.pair, + "leverage": trade.leverage, + "direction": "Short" if trade.is_short else "Long", + "limit": trade.open_rate, + "order_type": order_type, + "stake_amount": trade.stake_amount, + "open_rate": trade.open_rate, + "stake_currency": self.config["stake_currency"], + "base_currency": self.exchange.get_pair_base_currency(trade.pair), + "quote_currency": self.exchange.get_pair_quote_currency(trade.pair), + "fiat_currency": self.config.get("fiat_display_currency", None), + "amount": trade.amount, + "open_date": trade.open_date, + "current_rate": current_rate, + "reason": reason, + "sub_trade": sub_trade, } # Send the message self.rpc.send_msg(msg) -# -# SELL / exit positions / close trades logic and methods -# + # + # SELL / exit positions / close trades logic and methods + # def exit_positions(self, trades: List[Trade]) -> int: """ @@ -1104,34 +1241,38 @@ class FreqtradeBot(LoggingMixin): """ trades_closed = 0 for trade in trades: - if ( not trade.has_open_orders and not trade.has_open_sl_orders and not self.wallets.check_exit_amount(trade) ): logger.warning( - f'Not enough {trade.safe_base_currency} in wallet to exit {trade}. ' - 'Trying to recover.') - self.handle_onexchange_order(trade) + f"Not enough {trade.safe_base_currency} in wallet to exit {trade}. " + "Trying to recover." + ) + if self.handle_onexchange_order(trade): + # Trade was deleted. Don't continue. + continue try: try: - if (self.strategy.order_types.get('stoploss_on_exchange') and - self.handle_stoploss_on_exchange(trade)): + if self.strategy.order_types.get( + "stoploss_on_exchange" + ) and self.handle_stoploss_on_exchange(trade): trades_closed += 1 Trade.commit() continue except InvalidOrderException as exception: logger.warning( - f'Unable to handle stoploss on exchange for {trade.pair}: {exception}') + f"Unable to handle stoploss on exchange for {trade.pair}: {exception}" + ) # Check if we can sell our current pair if not trade.has_open_orders and trade.is_open and self.handle_trade(trade): trades_closed += 1 except DependencyException as exception: - logger.warning(f'Unable to exit trade {trade.pair}: {exception}') + logger.warning(f"Unable to exit trade {trade.pair}: {exception}") # Updating wallets if any trade occurred if trades_closed: @@ -1145,37 +1286,38 @@ class FreqtradeBot(LoggingMixin): :return: True if trade has been sold/exited_short, False otherwise """ if not trade.is_open: - raise DependencyException(f'Attempt to handle closed trade: {trade}') + raise DependencyException(f"Attempt to handle closed trade: {trade}") - logger.debug('Handling %s ...', trade) + logger.debug("Handling %s ...", trade) (enter, exit_) = (False, False) exit_tag = None exit_signal_type = "exit_short" if trade.is_short else "exit_long" - if (self.config.get('use_exit_signal', True) or - self.config.get('ignore_roi_if_entry_signal', False)): - analyzed_df, _ = self.dataprovider.get_analyzed_dataframe(trade.pair, - self.strategy.timeframe) - - (enter, exit_, exit_tag) = self.strategy.get_exit_signal( - trade.pair, - self.strategy.timeframe, - analyzed_df, - is_short=trade.is_short + if self.config.get("use_exit_signal", True) or self.config.get( + "ignore_roi_if_entry_signal", False + ): + analyzed_df, _ = self.dataprovider.get_analyzed_dataframe( + trade.pair, self.strategy.timeframe ) - logger.debug('checking exit') + (enter, exit_, exit_tag) = self.strategy.get_exit_signal( + trade.pair, self.strategy.timeframe, analyzed_df, is_short=trade.is_short + ) + + logger.debug("checking exit") exit_rate = self.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=True) + trade.pair, side="exit", is_short=trade.is_short, refresh=True + ) if self._check_and_execute_exit(trade, exit_rate, enter, exit_, exit_tag): return True - logger.debug(f'Found no {exit_signal_type} signal for %s.', trade) + logger.debug(f"Found no {exit_signal_type} signal for %s.", trade) return False - def _check_and_execute_exit(self, trade: Trade, exit_rate: float, - enter: bool, exit_: bool, exit_tag: Optional[str]) -> bool: + def _check_and_execute_exit( + self, trade: Trade, exit_rate: float, enter: bool, exit_: bool, exit_tag: Optional[str] + ) -> bool: """ Check and execute trade exit """ @@ -1185,13 +1327,15 @@ class FreqtradeBot(LoggingMixin): datetime.now(timezone.utc), enter=enter, exit_=exit_, - force_stoploss=self.edge.get_stoploss(trade.pair) if self.edge else 0 + force_stoploss=self.edge.get_stoploss(trade.pair) if self.edge else 0, ) for should_exit in exits: if should_exit.exit_flag: exit_tag1 = exit_tag if should_exit.exit_type == ExitType.EXIT_SIGNAL else None - logger.info(f'Exit for {trade.pair} detected. Reason: {should_exit.exit_type}' - f'{f" Tag: {exit_tag1}" if exit_tag1 is not None else ""}') + logger.info( + f"Exit for {trade.pair} detected. Reason: {should_exit.exit_type}" + f"{f' Tag: {exit_tag1}' if exit_tag1 is not None else ''}" + ) exited = self.execute_trade_exit(trade, exit_rate, should_exit, exit_tag=exit_tag1) if exited: return True @@ -1211,11 +1355,12 @@ class FreqtradeBot(LoggingMixin): stop_price=stop_price, order_types=self.strategy.order_types, side=trade.exit_side, - leverage=trade.leverage + leverage=trade.leverage, ) - order_obj = Order.parse_from_ccxt_object(stoploss_order, trade.pair, 'stoploss', - trade.amount, stop_price) + order_obj = Order.parse_from_ccxt_object( + stoploss_order, trade.pair, "stoploss", trade.amount, stop_price + ) trade.orders.append(order_obj) return True except InsufficientFundsError as e: @@ -1224,12 +1369,12 @@ class FreqtradeBot(LoggingMixin): self.handle_insufficient_funds(trade) except InvalidOrderException as e: - logger.error(f'Unable to place a stoploss order on exchange. {e}') - logger.warning('Exiting the trade forcefully') + logger.error(f"Unable to place a stoploss order on exchange. {e}") + logger.warning("Exiting the trade forcefully") self.emergency_exit(trade, stop_price) except ExchangeError: - logger.exception('Unable to place a stoploss order on exchange.') + logger.exception("Unable to place a stoploss order on exchange.") return False def handle_stoploss_on_exchange(self, trade: Trade) -> bool: @@ -1241,32 +1386,34 @@ class FreqtradeBot(LoggingMixin): # Therefore fetching account liquidations for open pairs may make sense. """ - logger.debug('Handling stoploss on exchange %s ...', trade) + logger.debug("Handling stoploss on exchange %s ...", trade) stoploss_orders = [] for slo in trade.open_sl_orders: stoploss_order = None try: # First we check if there is already a stoploss on exchange - stoploss_order = self.exchange.fetch_stoploss_order( - slo.order_id, trade.pair) if slo.order_id else None + stoploss_order = ( + self.exchange.fetch_stoploss_order(slo.order_id, trade.pair) + if slo.order_id + else None + ) except InvalidOrderException as exception: - logger.warning('Unable to fetch stoploss order: %s', exception) + logger.warning("Unable to fetch stoploss order: %s", exception) if stoploss_order: stoploss_orders.append(stoploss_order) - self.update_trade_state(trade, slo.order_id, stoploss_order, - stoploss_order=True) + self.update_trade_state(trade, slo.order_id, stoploss_order, stoploss_order=True) # We check if stoploss order is fulfilled - if stoploss_order and stoploss_order['status'] in ('closed', 'triggered'): + if stoploss_order and stoploss_order["status"] in ("closed", "triggered"): trade.exit_reason = ExitType.STOPLOSS_ON_EXCHANGE.value self._notify_exit(trade, "stoploss", True) self.handle_protections(trade.pair, trade.trade_direction) return True if trade.has_open_orders or not trade.is_open: - # Trade has an open Buy or Sell order, Stoploss-handling can't happen in this case + # Trade has an open order, Stoploss-handling can't happen in this case # as the Amount on the exchange is tied up in another trade. # The trade can be closed already (sell-order fill confirmation came in this iteration) return False @@ -1277,7 +1424,8 @@ class FreqtradeBot(LoggingMixin): if self.edge: stoploss = self.edge.get_stoploss(pair=trade.pair) stop_price = ( - trade.open_rate * (1 - stoploss) if trade.is_short + trade.open_rate * (1 - stoploss) + if trade.is_short else trade.open_rate * (1 + stoploss) ) @@ -1299,28 +1447,34 @@ class FreqtradeBot(LoggingMixin): :return: None """ stoploss_norm = self.exchange.price_to_precision( - trade.pair, trade.stoploss_or_liquidation, - rounding_mode=ROUND_DOWN if trade.is_short else ROUND_UP) + trade.pair, + trade.stoploss_or_liquidation, + rounding_mode=ROUND_DOWN if trade.is_short else ROUND_UP, + ) if self.exchange.stoploss_adjust(stoploss_norm, order, side=trade.exit_side): # we check if the update is necessary - update_beat = self.strategy.order_types.get('stoploss_on_exchange_interval', 60) + update_beat = self.strategy.order_types.get("stoploss_on_exchange_interval", 60) upd_req = datetime.now(timezone.utc) - timedelta(seconds=update_beat) if trade.stoploss_last_update_utc and upd_req >= trade.stoploss_last_update_utc: # cancelling the current stoploss on exchange first - logger.info(f"Cancelling current stoploss on exchange for pair {trade.pair} " - f"(orderid:{order['id']}) in order to add another one ...") + logger.info( + f"Cancelling current stoploss on exchange for pair {trade.pair} " + f"(orderid:{order['id']}) in order to add another one ..." + ) self.cancel_stoploss_on_exchange(trade) if not trade.is_open: logger.warning( - f"Trade {trade} is closed, not creating trailing stoploss order.") + f"Trade {trade} is closed, not creating trailing stoploss order." + ) return # Create new stoploss order if not self.create_stoploss_order(trade=trade, stop_price=stoploss_norm): - logger.warning(f"Could not create trailing stoploss order " - f"for pair {trade.pair}.") + logger.warning( + f"Could not create trailing stoploss order for pair {trade.pair}." + ) def manage_trade_stoploss_orders(self, trade: Trade, stoploss_orders: List[Dict]): """ @@ -1330,27 +1484,32 @@ class FreqtradeBot(LoggingMixin): :return: None """ # If all stoploss ordered are canceled for some reason we add it again - canceled_sl_orders = [o for o in stoploss_orders - if o['status'] in ('canceled', 'cancelled')] + canceled_sl_orders = [ + o for o in stoploss_orders if o["status"] in ("canceled", "cancelled") + ] if ( - trade.is_open and - len(stoploss_orders) > 0 and - len(stoploss_orders) == len(canceled_sl_orders) + trade.is_open + and len(stoploss_orders) > 0 + and len(stoploss_orders) == len(canceled_sl_orders) ): if self.create_stoploss_order(trade=trade, stop_price=trade.stoploss_or_liquidation): return False else: - logger.warning('All Stoploss orders are cancelled, but unable to recreate one.') + logger.warning("All Stoploss orders are cancelled, but unable to recreate one.") active_sl_orders = [o for o in stoploss_orders if o not in canceled_sl_orders] if len(active_sl_orders) > 0: last_active_sl_order = active_sl_orders[-1] # Finally we check if stoploss on exchange should be moved up because of trailing. # Triggered Orders are now real orders - so don't replace stoploss anymore - if (trade.is_open and - last_active_sl_order.get('status_stop') != 'triggered' and - (self.config.get('trailing_stop', False) or - self.config.get('use_custom_stoploss', False))): + if ( + trade.is_open + and last_active_sl_order.get("status_stop") != "triggered" + and ( + self.config.get("trailing_stop", False) + or self.config.get("use_custom_stoploss", False) + ) + ): # if trailing stoploss is enabled we check if stoploss value has changed # in which case we cancel stoploss order and put another one with new # value immediately @@ -1371,25 +1530,24 @@ class FreqtradeBot(LoggingMixin): try: order = self.exchange.fetch_order(open_order.order_id, trade.pair) - except (ExchangeError): + except ExchangeError: logger.info( - 'Cannot query order for %s due to %s', trade, traceback.format_exc() + "Cannot query order for %s due to %s", trade, traceback.format_exc() ) continue fully_cancelled = self.update_trade_state(trade, open_order.order_id, order) - not_closed = order['status'] == 'open' or fully_cancelled + not_closed = order["status"] == "open" or fully_cancelled if not_closed: - if ( - fully_cancelled or ( - open_order and self.strategy.ft_check_timed_out( - trade, open_order, datetime.now(timezone.utc) - ) + if fully_cancelled or ( + open_order + and self.strategy.ft_check_timed_out( + trade, open_order, datetime.now(timezone.utc) ) ): self.handle_cancel_order( - order, open_order, trade, constants.CANCEL_REASON['TIMEOUT'] + order, open_order, trade, constants.CANCEL_REASON["TIMEOUT"] ) else: self.replace_order(order, open_order, trade) @@ -1402,28 +1560,31 @@ class FreqtradeBot(LoggingMixin): :param trade: Trade object. :return: None """ - if order['side'] == trade.entry_side: + if order["side"] == trade.entry_side: self.handle_cancel_enter(trade, order, order_obj, reason) else: canceled = self.handle_cancel_exit(trade, order, order_obj, reason) canceled_count = trade.get_canceled_exit_order_count() - max_timeouts = self.config.get('unfilledtimeout', {}).get('exit_timeout_count', 0) - if (canceled and max_timeouts > 0 and canceled_count >= max_timeouts): - logger.warning(f"Emergency exiting trade {trade}, as the exit order " - f"timed out {max_timeouts} times. force selling {order['amount']}.") - self.emergency_exit(trade, order['price'], order['amount']) + max_timeouts = self.config.get("unfilledtimeout", {}).get("exit_timeout_count", 0) + if canceled and max_timeouts > 0 and canceled_count >= max_timeouts: + logger.warning( + f"Emergency exiting trade {trade}, as the exit order " + f"timed out {max_timeouts} times. force selling {order['amount']}." + ) + self.emergency_exit(trade, order["price"], order["amount"]) def emergency_exit( - self, trade: Trade, price: float, sub_trade_amt: Optional[float] = None) -> None: + self, trade: Trade, price: float, sub_trade_amt: Optional[float] = None + ) -> None: try: self.execute_trade_exit( - trade, price, + trade, + price, exit_check=ExitCheckTuple(exit_type=ExitType.EMERGENCY_EXIT), - sub_trade_amt=sub_trade_amt - ) + sub_trade_amt=sub_trade_amt, + ) except DependencyException as exception: - logger.warning( - f'Unable to emergency exit trade {trade.pair}: {exception}') + logger.warning(f"Unable to emergency exit trade {trade.pair}: {exception}") def replace_order_failed(self, trade: Trade, msg: str) -> None: """ @@ -1437,8 +1598,10 @@ class FreqtradeBot(LoggingMixin): # this is the first entry and we didn't get filled yet, delete trade logger.warning(f"Removing {trade} from database.") self._notify_enter_cancel( - trade, order_type=self.strategy.order_types['entry'], - reason=constants.CANCEL_REASON['REPLACE_FAILED']) + trade, + order_type=self.strategy.order_types["entry"], + reason=constants.CANCEL_REASON["REPLACE_FAILED"], + ) trade.delete() def replace_order(self, order: Dict, order_obj: Optional[Order], trade: Trade) -> None: @@ -1452,38 +1615,50 @@ class FreqtradeBot(LoggingMixin): :param trade: Trade object. :return: None """ - analyzed_df, _ = self.dataprovider.get_analyzed_dataframe(trade.pair, - self.strategy.timeframe) - latest_candle_open_date = analyzed_df.iloc[-1]['date'] if len(analyzed_df) > 0 else None - latest_candle_close_date = timeframe_to_next_date(self.strategy.timeframe, - latest_candle_open_date) + analyzed_df, _ = self.dataprovider.get_analyzed_dataframe( + trade.pair, self.strategy.timeframe + ) + latest_candle_open_date = analyzed_df.iloc[-1]["date"] if len(analyzed_df) > 0 else None + latest_candle_close_date = timeframe_to_next_date( + self.strategy.timeframe, latest_candle_open_date + ) # Check if new candle if ( - order_obj and order_obj.side == trade.entry_side + order_obj + and order_obj.side == trade.entry_side and latest_candle_close_date > order_obj.order_date_utc ): # New candle proposed_rate = self.exchange.get_rate( - trade.pair, side='entry', is_short=trade.is_short, refresh=True) + trade.pair, side="entry", is_short=trade.is_short, refresh=True + ) adjusted_entry_price = strategy_safe_wrapper( - self.strategy.adjust_entry_price, default_retval=order_obj.safe_placement_price)( - trade=trade, order=order_obj, pair=trade.pair, - current_time=datetime.now(timezone.utc), proposed_rate=proposed_rate, - current_order_rate=order_obj.safe_placement_price, entry_tag=trade.enter_tag, - side=trade.trade_direction) + self.strategy.adjust_entry_price, default_retval=order_obj.safe_placement_price + )( + trade=trade, + order=order_obj, + pair=trade.pair, + current_time=datetime.now(timezone.utc), + proposed_rate=proposed_rate, + current_order_rate=order_obj.safe_placement_price, + entry_tag=trade.enter_tag, + side=trade.trade_direction, + ) replacing = True - cancel_reason = constants.CANCEL_REASON['REPLACE'] + cancel_reason = constants.CANCEL_REASON["REPLACE"] if not adjusted_entry_price: replacing = False - cancel_reason = constants.CANCEL_REASON['USER_CANCEL'] + cancel_reason = constants.CANCEL_REASON["USER_CANCEL"] if order_obj.safe_placement_price != adjusted_entry_price: # cancel existing order if new price is supplied or None - res = self.handle_cancel_enter(trade, order, order_obj, cancel_reason, - replacing=replacing) + res = self.handle_cancel_enter( + trade, order, order_obj, cancel_reason, replacing=replacing + ) if not res: self.replace_order_failed( - trade, f"Could not cancel order for {trade}, therefore not replacing.") + trade, f"Could not cancel order for {trade}, therefore not replacing." + ) return if adjusted_entry_price: # place new order only if new price is supplied @@ -1491,17 +1666,18 @@ class FreqtradeBot(LoggingMixin): if not self.execute_entry( pair=trade.pair, stake_amount=( - order_obj.safe_remaining * order_obj.safe_price / trade.leverage), + order_obj.safe_remaining * order_obj.safe_price / trade.leverage + ), price=adjusted_entry_price, trade=trade, is_short=trade.is_short, - mode='replace', + mode="replace", ): self.replace_order_failed( - trade, f"Could not replace order for {trade}.") + trade, f"Could not replace order for {trade}." + ) except DependencyException as exception: - logger.warning( - f'Unable to replace order for {trade.pair}: {exception}') + logger.warning(f"Unable to replace order for {trade.pair}: {exception}") self.replace_order_failed(trade, f"Could not replace order for {trade}.") def cancel_all_open_orders(self) -> None: @@ -1514,24 +1690,28 @@ class FreqtradeBot(LoggingMixin): for open_order in trade.open_orders: try: order = self.exchange.fetch_order(open_order.order_id, trade.pair) - except (ExchangeError): + except ExchangeError: logger.info("Can't query order for %s due to %s", trade, traceback.format_exc()) continue - if order['side'] == trade.entry_side: + if order["side"] == trade.entry_side: self.handle_cancel_enter( - trade, order, open_order, constants.CANCEL_REASON['ALL_CANCELLED'] + trade, order, open_order, constants.CANCEL_REASON["ALL_CANCELLED"] ) - elif order['side'] == trade.exit_side: + elif order["side"] == trade.exit_side: self.handle_cancel_exit( - trade, order, open_order, constants.CANCEL_REASON['ALL_CANCELLED'] + trade, order, open_order, constants.CANCEL_REASON["ALL_CANCELLED"] ) Trade.commit() def handle_cancel_enter( - self, trade: Trade, order: Dict, order_obj: Order, - reason: str, replacing: Optional[bool] = False + self, + trade: Trade, + order: Dict, + order_obj: Order, + reason: str, + replacing: Optional[bool] = False, ) -> bool: """ entry cancel - cancel order @@ -1543,16 +1723,18 @@ class FreqtradeBot(LoggingMixin): order_id = order_obj.order_id side = trade.entry_side.capitalize() - if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES: - filled_val: float = order.get('filled', 0.0) or 0.0 + if order["status"] not in constants.NON_OPEN_EXCHANGE_STATES: + filled_val: float = order.get("filled", 0.0) or 0.0 filled_stake = filled_val * trade.open_rate minstake = self.exchange.get_min_pair_stake_amount( - trade.pair, trade.open_rate, self.strategy.stoploss) + trade.pair, trade.open_rate, self.strategy.stoploss + ) if filled_val > 0 and minstake and filled_stake < minstake: logger.warning( f"Order {order_id} for {trade.pair} not cancelled, " - f"as the filled amount of {filled_val} would result in an unexitable trade.") + f"as the filled amount of {filled_val} would result in an unexitable trade." + ) return False corder = self.exchange.cancel_order_with_result(order_id, trade.pair, trade.amount) order_obj.ft_cancel_reason = reason @@ -1560,7 +1742,7 @@ class FreqtradeBot(LoggingMixin): if replacing: retry_count = 0 while ( - corder.get('status') not in constants.NON_OPEN_EXCHANGE_STATES + corder.get("status") not in constants.NON_OPEN_EXCHANGE_STATES and retry_count < 3 ): sleep(0.5) @@ -1570,48 +1752,47 @@ class FreqtradeBot(LoggingMixin): # Avoid race condition where the order could not be cancelled coz its already filled. # Simply bailing here is the only safe way - as this order will then be # handled in the next iteration. - if corder.get('status') not in constants.NON_OPEN_EXCHANGE_STATES: + if corder.get("status") not in constants.NON_OPEN_EXCHANGE_STATES: logger.warning(f"Order {order_id} for {trade.pair} not cancelled.") return False else: # Order was cancelled already, so we can reuse the existing dict corder = order if order_obj.ft_cancel_reason is None: - order_obj.ft_cancel_reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE'] + order_obj.ft_cancel_reason = constants.CANCEL_REASON["CANCELLED_ON_EXCHANGE"] - logger.info(f'{side} order {order_obj.ft_cancel_reason} for {trade}.') + logger.info(f"{side} order {order_obj.ft_cancel_reason} for {trade}.") # Using filled to determine the filled amount - filled_amount = safe_value_fallback2(corder, order, 'filled', 'filled') + filled_amount = safe_value_fallback2(corder, order, "filled", "filled") if isclose(filled_amount, 0.0, abs_tol=constants.MATH_CLOSE_PREC): was_trade_fully_canceled = True # if trade is not partially completed and it's the only order, just delete the trade - open_order_count = len([ - order for order in trade.orders if order.ft_is_open and order.order_id != order_id - ]) + open_order_count = len( + [order for order in trade.orders if order.ft_is_open and order.order_id != order_id] + ) if open_order_count < 1 and trade.nr_of_successful_entries == 0 and not replacing: - logger.info(f'{side} order fully cancelled. Removing {trade} from database.') + logger.info(f"{side} order fully cancelled. Removing {trade} from database.") trade.delete() order_obj.ft_cancel_reason += f", {constants.CANCEL_REASON['FULLY_CANCELLED']}" else: self.update_trade_state(trade, order_id, corder) - logger.info(f'{side} Order timeout for {trade}.') + logger.info(f"{side} Order timeout for {trade}.") else: # update_trade_state (and subsequently recalc_trade_from_orders) will handle updates # to the trade object self.update_trade_state(trade, order_id, corder) - logger.info(f'Partial {trade.entry_side} order timeout for {trade}.') + logger.info(f"Partial {trade.entry_side} order timeout for {trade}.") order_obj.ft_cancel_reason += f", {constants.CANCEL_REASON['PARTIALLY_FILLED']}" self.wallets.update() - self._notify_enter_cancel(trade, order_type=self.strategy.order_types['entry'], - reason=order_obj.ft_cancel_reason) + self._notify_enter_cancel( + trade, order_type=self.strategy.order_types["entry"], reason=order_obj.ft_cancel_reason + ) return was_trade_fully_canceled - def handle_cancel_exit( - self, trade: Trade, order: Dict, order_obj: Order, reason: str - ) -> bool: + def handle_cancel_exit(self, trade: Trade, order: Dict, order_obj: Order, reason: str) -> bool: """ exit order cancel - cancel order and update trade :return: True if exit order was cancelled, false otherwise @@ -1619,63 +1800,68 @@ class FreqtradeBot(LoggingMixin): order_id = order_obj.order_id cancelled = False # Cancelled orders may have the status of 'canceled' or 'closed' - if order['status'] not in constants.NON_OPEN_EXCHANGE_STATES: - filled_amt: float = order.get('filled', 0.0) or 0.0 + if order["status"] not in constants.NON_OPEN_EXCHANGE_STATES: + filled_amt: float = order.get("filled", 0.0) or 0.0 # Filled val is in quote currency (after leverage) filled_rem_stake = trade.stake_amount - (filled_amt * trade.open_rate / trade.leverage) minstake = self.exchange.get_min_pair_stake_amount( - trade.pair, trade.open_rate, self.strategy.stoploss) + trade.pair, trade.open_rate, self.strategy.stoploss + ) # Double-check remaining amount if filled_amt > 0: - reason = constants.CANCEL_REASON['PARTIALLY_FILLED'] + reason = constants.CANCEL_REASON["PARTIALLY_FILLED"] if minstake and filled_rem_stake < minstake: logger.warning( f"Order {order_id} for {trade.pair} not cancelled, as " - f"the filled amount of {filled_amt} would result in an unexitable trade.") - reason = constants.CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'] + f"the filled amount of {filled_amt} would result in an unexitable trade." + ) + reason = constants.CANCEL_REASON["PARTIALLY_FILLED_KEEP_OPEN"] self._notify_exit_cancel( trade, - order_type=self.strategy.order_types['exit'], - reason=reason, order_id=order['id'], - sub_trade=trade.amount != order['amount'] + order_type=self.strategy.order_types["exit"], + reason=reason, + order_id=order["id"], + sub_trade=trade.amount != order["amount"], ) return False order_obj.ft_cancel_reason = reason try: order = self.exchange.cancel_order_with_result( - order['id'], trade.pair, trade.amount) + order["id"], trade.pair, trade.amount + ) except InvalidOrderException: - logger.exception( - f"Could not cancel {trade.exit_side} order {order_id}") + logger.exception(f"Could not cancel {trade.exit_side} order {order_id}") return False # Set exit_reason for fill message exit_reason_prev = trade.exit_reason trade.exit_reason = trade.exit_reason + f", {reason}" if trade.exit_reason else reason # Order might be filled above in odd timing issues. - if order.get('status') in ('canceled', 'cancelled'): + if order.get("status") in ("canceled", "cancelled"): trade.exit_reason = None else: trade.exit_reason = exit_reason_prev cancelled = True else: if order_obj.ft_cancel_reason is None: - order_obj.ft_cancel_reason = constants.CANCEL_REASON['CANCELLED_ON_EXCHANGE'] + order_obj.ft_cancel_reason = constants.CANCEL_REASON["CANCELLED_ON_EXCHANGE"] trade.exit_reason = None - self.update_trade_state(trade, order['id'], order) + self.update_trade_state(trade, order["id"], order) logger.info( - f'{trade.exit_side.capitalize()} order {order_obj.ft_cancel_reason} for {trade}.') + f"{trade.exit_side.capitalize()} order {order_obj.ft_cancel_reason} for {trade}." + ) trade.close_rate = None trade.close_rate_requested = None self._notify_exit_cancel( trade, - order_type=self.strategy.order_types['exit'], - reason=order_obj.ft_cancel_reason, order_id=order['id'], - sub_trade=trade.amount != order['amount'] + order_type=self.strategy.order_types["exit"], + reason=order_obj.ft_cancel_reason, + order_id=order["id"], + sub_trade=trade.amount != order["amount"], ) return cancelled @@ -1708,17 +1894,18 @@ class FreqtradeBot(LoggingMixin): return wallet_amount else: raise DependencyException( - f"Not enough amount to exit trade. Trade-amount: {amount}, Wallet: {wallet_amount}") + f"Not enough amount to exit trade. Trade-amount: {amount}, Wallet: {wallet_amount}" + ) def execute_trade_exit( - self, - trade: Trade, - limit: float, - exit_check: ExitCheckTuple, - *, - exit_tag: Optional[str] = None, - ordertype: Optional[str] = None, - sub_trade_amt: Optional[float] = None, + self, + trade: Trade, + limit: float, + exit_check: ExitCheckTuple, + *, + exit_tag: Optional[str] = None, + ordertype: Optional[str] = None, + sub_trade_amt: Optional[float] = None, ) -> bool: """ Executes a trade exit for the given trade and limit @@ -1732,24 +1919,32 @@ class FreqtradeBot(LoggingMixin): pair=trade.pair, amount=trade.amount, is_short=trade.is_short, - open_date=trade.date_last_filled_utc) + open_date=trade.date_last_filled_utc, + ) ) - exit_type = 'exit' + exit_type = "exit" exit_reason = exit_tag or exit_check.exit_reason if exit_check.exit_type in ( - ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS, ExitType.LIQUIDATION): - exit_type = 'stoploss' + ExitType.STOP_LOSS, + ExitType.TRAILING_STOP_LOSS, + ExitType.LIQUIDATION, + ): + exit_type = "stoploss" # set custom_exit_price if available proposed_limit_rate = limit current_profit = trade.calc_profit_ratio(limit) - custom_exit_price = strategy_safe_wrapper(self.strategy.custom_exit_price, - default_retval=proposed_limit_rate)( - pair=trade.pair, trade=trade, + custom_exit_price = strategy_safe_wrapper( + self.strategy.custom_exit_price, default_retval=proposed_limit_rate + )( + pair=trade.pair, + trade=trade, current_time=datetime.now(timezone.utc), - proposed_rate=proposed_limit_rate, current_profit=current_profit, - exit_tag=exit_reason) + proposed_rate=proposed_limit_rate, + current_profit=current_profit, + exit_tag=exit_reason, + ) limit = self.get_valid_price(custom_exit_price, proposed_limit_rate) @@ -1762,16 +1957,23 @@ class FreqtradeBot(LoggingMixin): order_type = self.strategy.order_types.get("emergency_exit", "market") amount = self._safe_exit_amount(trade, trade.pair, sub_trade_amt or trade.amount) - time_in_force = self.strategy.order_time_in_force['exit'] + time_in_force = self.strategy.order_time_in_force["exit"] - if (exit_check.exit_type != ExitType.LIQUIDATION - and not sub_trade_amt - and not strategy_safe_wrapper( - self.strategy.confirm_trade_exit, default_retval=True)( - pair=trade.pair, trade=trade, order_type=order_type, amount=amount, rate=limit, - time_in_force=time_in_force, exit_reason=exit_reason, - sell_reason=exit_reason, # sellreason -> compatibility - current_time=datetime.now(timezone.utc))): + if ( + exit_check.exit_type != ExitType.LIQUIDATION + and not sub_trade_amt + and not strategy_safe_wrapper(self.strategy.confirm_trade_exit, default_retval=True)( + pair=trade.pair, + trade=trade, + order_type=order_type, + amount=amount, + rate=limit, + time_in_force=time_in_force, + exit_reason=exit_reason, + sell_reason=exit_reason, # sellreason -> compatibility + current_time=datetime.now(timezone.utc), + ) + ): logger.info(f"User denied exit for {trade.pair}.") return False @@ -1785,7 +1987,7 @@ class FreqtradeBot(LoggingMixin): rate=limit, leverage=trade.leverage, reduceOnly=self.trading_mode == TradingMode.FUTURES, - time_in_force=time_in_force + time_in_force=time_in_force, ) except InsufficientFundsError as e: logger.warning(f"Unable to place order {e}.") @@ -1797,26 +1999,35 @@ class FreqtradeBot(LoggingMixin): order_obj.ft_order_tag = exit_reason trade.orders.append(order_obj) - trade.exit_order_status = '' + trade.exit_order_status = "" trade.close_rate_requested = limit trade.exit_reason = exit_reason self._notify_exit(trade, order_type, sub_trade=bool(sub_trade_amt), order=order_obj) # In case of market sell orders the order can be closed immediately - if order.get('status', 'unknown') in ('closed', 'expired'): + if order.get("status", "unknown") in ("closed", "expired"): self.update_trade_state(trade, order_obj.order_id, order) Trade.commit() return True - def _notify_exit(self, trade: Trade, order_type: Optional[str], fill: bool = False, - sub_trade: bool = False, order: Optional[Order] = None) -> None: + def _notify_exit( + self, + trade: Trade, + order_type: Optional[str], + fill: bool = False, + sub_trade: bool = False, + order: Optional[Order] = None, + ) -> None: """ Sends rpc notification when a sell occurred. """ # Use cached rates here - it was updated seconds ago. - current_rate = self.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=False) if not fill else None + current_rate = ( + self.exchange.get_rate(trade.pair, side="exit", is_short=trade.is_short, refresh=False) + if not fill + else None + ) # second condition is for mypy only; order will always be passed during sub trade if sub_trade and order is not None: @@ -1831,44 +2042,44 @@ class FreqtradeBot(LoggingMixin): gain: ProfitLossStr = "profit" if profit.profit_ratio > 0 else "loss" msg: RPCExitMsg = { - 'type': (RPCMessageType.EXIT_FILL if fill - else RPCMessageType.EXIT), - 'trade_id': trade.id, - 'exchange': trade.exchange.capitalize(), - 'pair': trade.pair, - 'leverage': trade.leverage, - 'direction': 'Short' if trade.is_short else 'Long', - 'gain': gain, - 'limit': order_rate, # Deprecated - 'order_rate': order_rate, - 'order_type': order_type or 'unknown', - 'amount': amount, - 'open_rate': trade.open_rate, - 'close_rate': order_rate, - 'current_rate': current_rate, - 'profit_amount': profit.profit_abs, - 'profit_ratio': profit.profit_ratio, - 'buy_tag': trade.enter_tag, - 'enter_tag': trade.enter_tag, - 'exit_reason': trade.exit_reason, - 'open_date': trade.open_date_utc, - 'close_date': trade.close_date_utc or datetime.now(timezone.utc), - 'stake_amount': trade.stake_amount, - 'stake_currency': self.config['stake_currency'], - 'base_currency': self.exchange.get_pair_base_currency(trade.pair), - 'quote_currency': self.exchange.get_pair_quote_currency(trade.pair), - 'fiat_currency': self.config.get('fiat_display_currency'), - 'sub_trade': sub_trade, - 'cumulative_profit': trade.realized_profit, - 'final_profit_ratio': trade.close_profit if not trade.is_open else None, - 'is_final_exit': trade.is_open is False, + "type": (RPCMessageType.EXIT_FILL if fill else RPCMessageType.EXIT), + "trade_id": trade.id, + "exchange": trade.exchange.capitalize(), + "pair": trade.pair, + "leverage": trade.leverage, + "direction": "Short" if trade.is_short else "Long", + "gain": gain, + "limit": order_rate, # Deprecated + "order_rate": order_rate, + "order_type": order_type or "unknown", + "amount": amount, + "open_rate": trade.open_rate, + "close_rate": order_rate, + "current_rate": current_rate, + "profit_amount": profit.profit_abs, + "profit_ratio": profit.profit_ratio, + "buy_tag": trade.enter_tag, + "enter_tag": trade.enter_tag, + "exit_reason": trade.exit_reason, + "open_date": trade.open_date_utc, + "close_date": trade.close_date_utc or datetime.now(timezone.utc), + "stake_amount": trade.stake_amount, + "stake_currency": self.config["stake_currency"], + "base_currency": self.exchange.get_pair_base_currency(trade.pair), + "quote_currency": self.exchange.get_pair_quote_currency(trade.pair), + "fiat_currency": self.config.get("fiat_display_currency"), + "sub_trade": sub_trade, + "cumulative_profit": trade.realized_profit, + "final_profit_ratio": trade.close_profit if not trade.is_open else None, + "is_final_exit": trade.is_open is False, } # Send the message self.rpc.send_msg(msg) - def _notify_exit_cancel(self, trade: Trade, order_type: str, reason: str, - order_id: str, sub_trade: bool = False) -> None: + def _notify_exit_cancel( + self, trade: Trade, order_type: str, reason: str, order_id: str, sub_trade: bool = False + ) -> None: """ Sends rpc notification when a sell cancel occurred. """ @@ -1883,36 +2094,37 @@ class FreqtradeBot(LoggingMixin): profit_rate: float = trade.safe_close_rate profit = trade.calculate_profit(rate=profit_rate) current_rate = self.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=False) + trade.pair, side="exit", is_short=trade.is_short, refresh=False + ) gain: ProfitLossStr = "profit" if profit.profit_ratio > 0 else "loss" msg: RPCExitCancelMsg = { - 'type': RPCMessageType.EXIT_CANCEL, - 'trade_id': trade.id, - 'exchange': trade.exchange.capitalize(), - 'pair': trade.pair, - 'leverage': trade.leverage, - 'direction': 'Short' if trade.is_short else 'Long', - 'gain': gain, - 'limit': profit_rate or 0, - 'order_type': order_type, - 'amount': order.safe_amount_after_fee, - 'open_rate': trade.open_rate, - 'current_rate': current_rate, - 'profit_amount': profit.profit_abs, - 'profit_ratio': profit.profit_ratio, - 'buy_tag': trade.enter_tag, - 'enter_tag': trade.enter_tag, - 'exit_reason': trade.exit_reason, - 'open_date': trade.open_date, - 'close_date': trade.close_date or datetime.now(timezone.utc), - 'stake_currency': self.config['stake_currency'], - 'base_currency': self.exchange.get_pair_base_currency(trade.pair), - 'quote_currency': self.exchange.get_pair_quote_currency(trade.pair), - 'fiat_currency': self.config.get('fiat_display_currency', None), - 'reason': reason, - 'sub_trade': sub_trade, - 'stake_amount': trade.stake_amount, + "type": RPCMessageType.EXIT_CANCEL, + "trade_id": trade.id, + "exchange": trade.exchange.capitalize(), + "pair": trade.pair, + "leverage": trade.leverage, + "direction": "Short" if trade.is_short else "Long", + "gain": gain, + "limit": profit_rate or 0, + "order_type": order_type, + "amount": order.safe_amount_after_fee, + "open_rate": trade.open_rate, + "current_rate": current_rate, + "profit_amount": profit.profit_abs, + "profit_ratio": profit.profit_ratio, + "buy_tag": trade.enter_tag, + "enter_tag": trade.enter_tag, + "exit_reason": trade.exit_reason, + "open_date": trade.open_date, + "close_date": trade.close_date or datetime.now(timezone.utc), + "stake_currency": self.config["stake_currency"], + "base_currency": self.exchange.get_pair_base_currency(trade.pair), + "quote_currency": self.exchange.get_pair_quote_currency(trade.pair), + "fiat_currency": self.config.get("fiat_display_currency", None), + "reason": reason, + "sub_trade": sub_trade, + "stake_amount": trade.stake_amount, } # Send the message @@ -1921,17 +2133,23 @@ class FreqtradeBot(LoggingMixin): def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order: if not order_obj: raise DependencyException( - f"Order_obj not found for {order_id}. This should not have happened.") + f"Order_obj not found for {order_id}. This should not have happened." + ) return order_obj -# -# Common update trade state methods -# + # + # Common update trade state methods + # def update_trade_state( - self, trade: Trade, order_id: Optional[str], - action_order: Optional[Dict[str, Any]] = None, *, - stoploss_order: bool = False, send_msg: bool = True) -> bool: + self, + trade: Trade, + order_id: Optional[str], + action_order: Optional[Dict[str, Any]] = None, + *, + stoploss_order: bool = False, + send_msg: bool = True, + ) -> bool: """ Checks trades with open orders and updates the amount if necessary Handles closing both buy and sell orders. @@ -1942,17 +2160,18 @@ class FreqtradeBot(LoggingMixin): :return: True if order has been cancelled without being filled partially, False otherwise """ if not order_id: - logger.warning(f'Orderid for trade {trade} is empty.') + logger.warning(f"Orderid for trade {trade} is empty.") return False # Update trade with order values if not stoploss_order: - logger.info(f'Found open order for {trade}') + logger.info(f"Found open order for {trade}") try: order = action_order or self.exchange.fetch_order_or_stoploss_order( - order_id, trade.pair, stoploss_order) + order_id, trade.pair, stoploss_order + ) except InvalidOrderException as exception: - logger.warning('Unable to fetch order %s: %s', order_id, exception) + logger.warning("Unable to fetch order %s: %s", order_id, exception) return False trade.update_order(order) @@ -1978,9 +2197,9 @@ class FreqtradeBot(LoggingMixin): def _update_trade_after_fill(self, trade: Trade, order: Order, send_msg: bool) -> Trade: if order.status in constants.NON_OPEN_EXCHANGE_STATES: - strategy_safe_wrapper( - self.strategy.order_filled, default_retval=None)( - pair=trade.pair, trade=trade, order=order, current_time=datetime.now(timezone.utc)) + strategy_safe_wrapper(self.strategy.order_filled, default_retval=None)( + pair=trade.pair, trade=trade, order=order, current_time=datetime.now(timezone.utc) + ) # If a entry order was closed, force update on stoploss on exchange if order.ft_order_side == trade.entry_side: if send_msg: @@ -1995,68 +2214,78 @@ class FreqtradeBot(LoggingMixin): # TODO: Margin will need to use interest_rate as well. # interest_rate = self.exchange.get_interest_rate() try: - trade.set_liquidation_price(self.exchange.get_liquidation_price( - pair=trade.pair, - open_rate=trade.open_rate, - is_short=trade.is_short, - amount=trade.amount, - stake_amount=trade.stake_amount, - leverage=trade.leverage, - wallet_balance=trade.stake_amount, - )) + trade.set_liquidation_price( + self.exchange.get_liquidation_price( + pair=trade.pair, + open_rate=trade.open_rate, + is_short=trade.is_short, + amount=trade.amount, + stake_amount=trade.stake_amount, + leverage=trade.leverage, + wallet_balance=trade.stake_amount, + ) + ) except DependencyException: - logger.warning('Unable to calculate liquidation price') + logger.warning("Unable to calculate liquidation price") if self.strategy.use_custom_stoploss: current_rate = self.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=True) + trade.pair, side="exit", is_short=trade.is_short, refresh=True + ) profit = trade.calc_profit_ratio(current_rate) - self.strategy.ft_stoploss_adjust(current_rate, trade, - datetime.now(timezone.utc), profit, 0, - after_fill=True) + self.strategy.ft_stoploss_adjust( + current_rate, trade, datetime.now(timezone.utc), profit, 0, after_fill=True + ) # Updating wallets when order is closed self.wallets.update() return trade - def order_close_notify( - self, trade: Trade, order: Order, stoploss_order: bool, send_msg: bool): + def order_close_notify(self, trade: Trade, order: Order, stoploss_order: bool, send_msg: bool): """send "fill" notifications""" if order.ft_order_side == trade.exit_side: # Exit notification if send_msg and not stoploss_order and order.order_id not in trade.open_orders_ids: - self._notify_exit(trade, order.order_type, fill=True, - sub_trade=trade.is_open, order=order) + self._notify_exit( + trade, order.order_type, fill=True, sub_trade=trade.is_open, order=order + ) if not trade.is_open: self.handle_protections(trade.pair, trade.trade_direction) elif send_msg and order.order_id not in trade.open_orders_ids and not stoploss_order: - sub_trade = not isclose(order.safe_amount_after_fee, - trade.amount, abs_tol=constants.MATH_CLOSE_PREC) + sub_trade = not isclose( + order.safe_amount_after_fee, trade.amount, abs_tol=constants.MATH_CLOSE_PREC + ) # Enter fill self._notify_enter(trade, order, order.order_type, fill=True, sub_trade=sub_trade) def handle_protections(self, pair: str, side: LongShort) -> None: - # Lock pair for one candle to prevent immediate rebuys - self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason='Auto lock') + # Lock pair for one candle to prevent immediate re-entries + self.strategy.lock_pair(pair, datetime.now(timezone.utc), reason="Auto lock") prot_trig = self.protections.stop_per_pair(pair, side=side) if prot_trig: msg: RPCProtectionMsg = { - 'type': RPCMessageType.PROTECTION_TRIGGER, - 'base_currency': self.exchange.get_pair_base_currency(prot_trig.pair), - **prot_trig.to_json() # type: ignore + "type": RPCMessageType.PROTECTION_TRIGGER, + "base_currency": self.exchange.get_pair_base_currency(prot_trig.pair), + **prot_trig.to_json(), # type: ignore } self.rpc.send_msg(msg) prot_trig_glb = self.protections.global_stop(side=side) if prot_trig_glb: msg = { - 'type': RPCMessageType.PROTECTION_TRIGGER_GLOBAL, - 'base_currency': self.exchange.get_pair_base_currency(prot_trig_glb.pair), - **prot_trig_glb.to_json() # type: ignore + "type": RPCMessageType.PROTECTION_TRIGGER_GLOBAL, + "base_currency": self.exchange.get_pair_base_currency(prot_trig_glb.pair), + **prot_trig_glb.to_json(), # type: ignore } self.rpc.send_msg(msg) - def apply_fee_conditional(self, trade: Trade, trade_base_currency: str, - amount: float, fee_abs: float, order_obj: Order) -> Optional[float]: + def apply_fee_conditional( + self, + trade: Trade, + trade_base_currency: str, + amount: float, + fee_abs: float, + order_obj: Order, + ) -> Optional[float]: """ Applies the fee to amount (either from Order or from Trades). Can eat into dust if more than the required asset is available. @@ -2066,18 +2295,19 @@ class FreqtradeBot(LoggingMixin): """ self.wallets.update() amount_ = trade.amount - if order_obj.ft_order_side == trade.exit_side or order_obj.ft_order_side == 'stoploss': + if order_obj.ft_order_side == trade.exit_side or order_obj.ft_order_side == "stoploss": # check against remaining amount! amount_ = trade.amount - amount if trade.nr_of_successful_entries >= 1 and order_obj.ft_order_side == trade.entry_side: - # In case of rebuy's, trade.amount doesn't contain the amount of the last entry. + # In case of re-entry's, trade.amount doesn't contain the amount of the last entry. amount_ = trade.amount + amount if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_: # Eat into dust if we own more than base currency - logger.info(f"Fee amount for {trade} was in base currency - " - f"Eating Fee {fee_abs} into dust.") + logger.info( + f"Fee amount for {trade} was in base currency - Eating Fee {fee_abs} into dust." + ) elif fee_abs != 0: logger.info(f"Applying fee on amount for {trade}, fee={fee_abs}.") return fee_abs @@ -2101,11 +2331,11 @@ class FreqtradeBot(LoggingMixin): :return: Absolute fee to apply for this order or None """ # Init variables - order_amount = safe_value_fallback(order, 'filled', 'amount') + order_amount = safe_value_fallback(order, "filled", "amount") # Only run for closed orders if ( - trade.fee_updated(order.get('side', '')) - or order['status'] == 'open' + trade.fee_updated(order.get("side", "")) + or order["status"] == "open" or order_obj.ft_fee_base ): return None @@ -2114,32 +2344,42 @@ class FreqtradeBot(LoggingMixin): # use fee from order-dict if possible if self.exchange.order_has_fee(order): fee_cost, fee_currency, fee_rate = self.exchange.extract_cost_curr_rate( - order['fee'], order['symbol'], order['cost'], order_obj.safe_filled) - logger.info(f"Fee for Trade {trade} [{order_obj.ft_order_side}]: " - f"{fee_cost:.8g} {fee_currency} - rate: {fee_rate}") + order["fee"], order["symbol"], order["cost"], order_obj.safe_filled + ) + logger.info( + f"Fee for Trade {trade} [{order_obj.ft_order_side}]: " + f"{fee_cost:.8g} {fee_currency} - rate: {fee_rate}" + ) if fee_rate is None or fee_rate < 0.02: # Reject all fees that report as > 2%. # These are most likely caused by a parsing bug in ccxt # due to multiple trades (https://github.com/ccxt/ccxt/issues/8025) - trade.update_fee(fee_cost, fee_currency, fee_rate, order.get('side', '')) + trade.update_fee(fee_cost, fee_currency, fee_rate, order.get("side", "")) if trade_base_currency == fee_currency: # Apply fee to amount - return self.apply_fee_conditional(trade, trade_base_currency, - amount=order_amount, fee_abs=fee_cost, - order_obj=order_obj) + return self.apply_fee_conditional( + trade, + trade_base_currency, + amount=order_amount, + fee_abs=fee_cost, + order_obj=order_obj, + ) return None return self.fee_detection_from_trades( - trade, order, order_obj, order_amount, order.get('trades', [])) + trade, order, order_obj, order_amount, order.get("trades", []) + ) - def fee_detection_from_trades(self, trade: Trade, order: Dict, order_obj: Order, - order_amount: float, trades: List) -> Optional[float]: + def fee_detection_from_trades( + self, trade: Trade, order: Dict, order_obj: Order, order_amount: float, trades: List + ) -> Optional[float]: """ fee-detection fallback to Trades. Either uses provided trades list or the result of fetch_my_trades to get correct fee. """ if not trades: trades = self.exchange.get_trades_for_order( - self.exchange.get_order_id_conditional(order), trade.pair, order_obj.order_date) + self.exchange.get_order_id_conditional(order), trade.pair, order_obj.order_date + ) if len(trades) == 0: logger.info("Applying fee on amount for %s failed: myTrade-Dict empty found", trade) @@ -2151,16 +2391,15 @@ class FreqtradeBot(LoggingMixin): trade_base_currency = self.exchange.get_pair_base_currency(trade.pair) fee_rate_array: List[float] = [] for exectrade in trades: - amount += exectrade['amount'] + amount += exectrade["amount"] if self.exchange.order_has_fee(exectrade): # Prefer singular fee - fees = [exectrade['fee']] + fees = [exectrade["fee"]] else: - fees = exectrade.get('fees', []) + fees = exectrade.get("fees", []) for fee in fees: - fee_cost_, fee_currency, fee_rate_ = self.exchange.extract_cost_curr_rate( - fee, exectrade['symbol'], exectrade['cost'], exectrade['amount'] + fee, exectrade["symbol"], exectrade["cost"], exectrade["amount"] ) fee_cost += fee_cost_ if fee_rate_ is not None: @@ -2174,10 +2413,11 @@ class FreqtradeBot(LoggingMixin): fee_rate = sum(fee_rate_array) / float(len(fee_rate_array)) if fee_rate_array else None if fee_rate is not None and fee_rate < 0.02: # Only update if fee-rate is < 2% - trade.update_fee(fee_cost, fee_currency, fee_rate, order.get('side', '')) + trade.update_fee(fee_cost, fee_currency, fee_rate, order.get("side", "")) else: logger.warning( - f"Not updating {order.get('side', '')}-fee - rate: {fee_rate}, {fee_currency}.") + f"Not updating {order.get('side', '')}-fee - rate: {fee_rate}, {fee_currency}." + ) if not isclose(amount, order_amount, abs_tol=constants.MATH_CLOSE_PREC): # * Leverage could be a cause for this warning @@ -2186,7 +2426,8 @@ class FreqtradeBot(LoggingMixin): if fee_abs != 0: return self.apply_fee_conditional( - trade, trade_base_currency, amount=amount, fee_abs=fee_abs, order_obj=order_obj) + trade, trade_base_currency, amount=amount, fee_abs=fee_abs, order_obj=order_obj + ) return None def get_valid_price(self, custom_price: float, proposed_price: float) -> float: @@ -2203,11 +2444,9 @@ class FreqtradeBot(LoggingMixin): else: valid_custom_price = proposed_price - cust_p_max_dist_r = self.config.get('custom_price_max_distance_ratio', 0.02) + cust_p_max_dist_r = self.config.get("custom_price_max_distance_ratio", 0.02) min_custom_price_allowed = proposed_price - (proposed_price * cust_p_max_dist_r) max_custom_price_allowed = proposed_price + (proposed_price * cust_p_max_dist_r) # Bracket between min_custom_price_allowed and max_custom_price_allowed - return max( - min(valid_custom_price, max_custom_price_allowed), - min_custom_price_allowed) + return max(min(valid_custom_price, max_custom_price_allowed), min_custom_price_allowed) diff --git a/freqtrade/leverage/interest.py b/freqtrade/leverage/interest.py index d18cc458f..f409f2b94 100644 --- a/freqtrade/leverage/interest.py +++ b/freqtrade/leverage/interest.py @@ -10,10 +10,7 @@ twenty_four = FtPrecise(24.0) def interest( - exchange_name: str, - borrowed: FtPrecise, - rate: FtPrecise, - hours: FtPrecise + exchange_name: str, borrowed: FtPrecise, rate: FtPrecise, hours: FtPrecise ) -> FtPrecise: """ Equation to calculate interest on margin trades diff --git a/freqtrade/loggers/__init__.py b/freqtrade/loggers/__init__.py index 390f210c0..1cc0590a1 100644 --- a/freqtrade/loggers/__init__.py +++ b/freqtrade/loggers/__init__.py @@ -10,7 +10,7 @@ from freqtrade.loggers.std_err_stream_handler import FTStdErrStreamHandler logger = logging.getLogger(__name__) -LOGFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' +LOGFORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" # Initialize bufferhandler - will be used for /log endpoints bufferHandler = FTBufferingHandler(1000) @@ -33,9 +33,7 @@ def setup_logging_pre() -> None: ones the user desires beforehand. """ logging.basicConfig( - level=logging.INFO, - format=LOGFORMAT, - handlers=[FTStdErrStreamHandler(), bufferHandler] + level=logging.INFO, format=LOGFORMAT, handlers=[FTStdErrStreamHandler(), bufferHandler] ) @@ -44,20 +42,20 @@ def setup_logging(config: Config) -> None: Process -v/--verbose, --logfile options """ # Log level - verbosity = config['verbosity'] + verbosity = config["verbosity"] logging.root.addHandler(bufferHandler) - logfile = config.get('logfile') + logfile = config.get("logfile") if logfile: - s = logfile.split(':') - if s[0] == 'syslog': + s = logfile.split(":") + if s[0] == "syslog": # Address can be either a string (socket filename) for Unix domain socket or # a tuple (hostname, port) for UDP socket. # Address can be omitted (i.e. simple 'syslog' used as the value of # config['logfilename']), which defaults to '/dev/log', applicable for most # of the systems. - address = (s[1], int(s[2])) if len(s) > 2 else s[1] if len(s) > 1 else '/dev/log' + address = (s[1], int(s[2])) if len(s) > 2 else s[1] if len(s) > 1 else "/dev/log" handler_sl = get_existing_handlers(SysLogHandler) if handler_sl: logging.root.removeHandler(handler_sl) @@ -65,14 +63,16 @@ def setup_logging(config: Config) -> None: # No datetime field for logging into syslog, to allow syslog # to perform reduction of repeating messages if this is set in the # syslog config. The messages should be equal for this. - handler_sl.setFormatter(Formatter('%(name)s - %(levelname)s - %(message)s')) + handler_sl.setFormatter(Formatter("%(name)s - %(levelname)s - %(message)s")) logging.root.addHandler(handler_sl) - elif s[0] == 'journald': # pragma: no cover + elif s[0] == "journald": # pragma: no cover try: from cysystemd.journal import JournaldLogHandler except ImportError: - raise OperationalException("You need the cysystemd python package be installed in " - "order to use logging to journald.") + raise OperationalException( + "You need the cysystemd python package be installed in " + "order to use logging to journald." + ) handler_jd = get_existing_handlers(JournaldLogHandler) if handler_jd: logging.root.removeHandler(handler_jd) @@ -80,19 +80,21 @@ def setup_logging(config: Config) -> None: # No datetime field for logging into journald, to allow syslog # to perform reduction of repeating messages if this is set in the # syslog config. The messages should be equal for this. - handler_jd.setFormatter(Formatter('%(name)s - %(levelname)s - %(message)s')) + handler_jd.setFormatter(Formatter("%(name)s - %(levelname)s - %(message)s")) logging.root.addHandler(handler_jd) else: handler_rf = get_existing_handlers(RotatingFileHandler) if handler_rf: logging.root.removeHandler(handler_rf) - handler_rf = RotatingFileHandler(logfile, - maxBytes=1024 * 1024 * 10, # 10Mb - backupCount=10) + handler_rf = RotatingFileHandler( + logfile, + maxBytes=1024 * 1024 * 10, # 10Mb + backupCount=10, + ) handler_rf.setFormatter(Formatter(LOGFORMAT)) logging.root.addHandler(handler_rf) logging.root.setLevel(logging.INFO if verbosity < 1 else logging.DEBUG) - set_loggers(verbosity, config.get('api_server', {}).get('verbosity', 'info')) + set_loggers(verbosity, config.get("api_server", {}).get("verbosity", "info")) - logger.info('Verbosity set to %s', verbosity) + logger.info("Verbosity set to %s", verbosity) diff --git a/freqtrade/loggers/buffering_handler.py b/freqtrade/loggers/buffering_handler.py index e4621fa79..02409708a 100644 --- a/freqtrade/loggers/buffering_handler.py +++ b/freqtrade/loggers/buffering_handler.py @@ -10,6 +10,7 @@ class FTBufferingHandler(BufferingHandler): self.acquire() try: # Keep half of the records in buffer. - self.buffer = self.buffer[-int(self.capacity / 2):] + records_to_keep = -int(self.capacity / 2) + self.buffer = self.buffer[records_to_keep:] finally: self.release() diff --git a/freqtrade/loggers/set_log_levels.py b/freqtrade/loggers/set_log_levels.py index abaee1523..24f26ffd6 100644 --- a/freqtrade/loggers/set_log_levels.py +++ b/freqtrade/loggers/set_log_levels.py @@ -1,35 +1,32 @@ - import logging logger = logging.getLogger(__name__) -def set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None: +def set_loggers(verbosity: int = 0, api_verbosity: str = "info") -> None: """ Set the logging level for third party libraries :param verbosity: Verbosity level. amount of `-v` passed to the command line :return: None """ - for logger_name in ('requests', 'urllib3', 'httpcore'): - logging.getLogger(logger_name).setLevel( - logging.INFO if verbosity <= 1 else logging.DEBUG - ) - logging.getLogger('ccxt.base.exchange').setLevel( + for logger_name in ("requests", "urllib3", "httpcore"): + logging.getLogger(logger_name).setLevel(logging.INFO if verbosity <= 1 else logging.DEBUG) + logging.getLogger("ccxt.base.exchange").setLevel( logging.INFO if verbosity <= 2 else logging.DEBUG ) - logging.getLogger('telegram').setLevel(logging.INFO) - logging.getLogger('httpx').setLevel(logging.WARNING) + logging.getLogger("telegram").setLevel(logging.INFO) + logging.getLogger("httpx").setLevel(logging.WARNING) - logging.getLogger('werkzeug').setLevel( - logging.ERROR if api_verbosity == 'error' else logging.INFO + logging.getLogger("werkzeug").setLevel( + logging.ERROR if api_verbosity == "error" else logging.INFO ) __BIAS_TESTER_LOGGERS = [ - 'freqtrade.resolvers', - 'freqtrade.strategy.hyper', - 'freqtrade.configuration.config_validation', + "freqtrade.resolvers", + "freqtrade.strategy.hyper", + "freqtrade.configuration.config_validation", ] diff --git a/freqtrade/loggers/std_err_stream_handler.py b/freqtrade/loggers/std_err_stream_handler.py index 487a7c100..d7f7e4052 100644 --- a/freqtrade/loggers/std_err_stream_handler.py +++ b/freqtrade/loggers/std_err_stream_handler.py @@ -18,7 +18,7 @@ class FTStdErrStreamHandler(Handler): try: msg = self.format(record) # Don't keep a reference to stderr - this can be problematic with progressbars. - sys.stderr.write(msg + '\n') + sys.stderr.write(msg + "\n") self.flush() except RecursionError: raise diff --git a/freqtrade/main.py b/freqtrade/main.py index 5eabe398f..8161e20a6 100755 --- a/freqtrade/main.py +++ b/freqtrade/main.py @@ -3,6 +3,7 @@ Main Freqtrade bot script. Read the documentation to know what cli arguments you need. """ + import logging import sys from typing import Any, List, Optional @@ -20,7 +21,7 @@ from freqtrade.loggers import setup_logging_pre from freqtrade.util.gc_setup import gc_set_threshold -logger = logging.getLogger('freqtrade') +logger = logging.getLogger("freqtrade") def main(sysargv: Optional[List[str]] = None) -> None: @@ -36,10 +37,10 @@ def main(sysargv: Optional[List[str]] = None) -> None: args = arguments.get_parsed_arg() # Call subcommand. - if 'func' in args: - logger.info(f'freqtrade {__version__}') + if "func" in args: + logger.info(f"freqtrade {__version__}") gc_set_threshold() - return_code = args['func'](args) + return_code = args["func"](args) else: # No subcommand was issued. raise OperationalException( @@ -54,19 +55,21 @@ def main(sysargv: Optional[List[str]] = None) -> None: except SystemExit as e: # pragma: no cover return_code = e except KeyboardInterrupt: - logger.info('SIGINT received, aborting ...') + logger.info("SIGINT received, aborting ...") return_code = 0 except ConfigurationError as e: - logger.error(f"Configuration error: {e}\n" - f"Please make sure to review the documentation at {DOCS_LINK}.") + logger.error( + f"Configuration error: {e}\n" + f"Please make sure to review the documentation at {DOCS_LINK}." + ) except FreqtradeException as e: logger.error(str(e)) return_code = 2 except Exception: - logger.exception('Fatal exception!') + logger.exception("Fatal exception!") finally: sys.exit(return_code) -if __name__ == '__main__': # pragma: no cover +if __name__ == "__main__": # pragma: no cover main() diff --git a/freqtrade/misc.py b/freqtrade/misc.py index a6c6f15fd..9a33fe430 100644 --- a/freqtrade/misc.py +++ b/freqtrade/misc.py @@ -1,6 +1,7 @@ """ Various tool function for Freqtrade and scripts """ + import gzip import logging from io import StringIO @@ -27,17 +28,17 @@ def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = """ if is_zip: - if filename.suffix != '.gz': - filename = filename.with_suffix('.gz') + if filename.suffix != ".gz": + filename = filename.with_suffix(".gz") if log: logger.info(f'dumping json to "{filename}"') - with gzip.open(filename, 'w') as fpz: + with gzip.open(filename, "w") as fpz: rapidjson.dump(data, fpz, default=str, number_mode=rapidjson.NM_NATIVE) else: if log: logger.info(f'dumping json to "{filename}"') - with filename.open('w') as fp: + with filename.open("w") as fp: rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE) logger.debug(f'done json to "{filename}"') @@ -54,7 +55,7 @@ def file_dump_joblib(filename: Path, data: Any, log: bool = True) -> None: if log: logger.info(f'dumping joblib to "{filename}"') - with filename.open('wb') as fp: + with filename.open("wb") as fp: joblib.dump(data, fp) logger.debug(f'done joblib dump to "{filename}"') @@ -69,9 +70,8 @@ def json_load(datafile: Union[gzip.GzipFile, TextIO]) -> Any: def file_load_json(file: Path): - if file.suffix != ".gz": - gzipfile = file.with_suffix(file.suffix + '.gz') + gzipfile = file.with_suffix(file.suffix + ".gz") else: gzipfile = file # Try gzip file first, otherwise regular json file. @@ -96,8 +96,8 @@ def is_file_in_dir(file: Path, directory: Path) -> bool: def pair_to_filename(pair: str) -> str: - for ch in ['/', ' ', '.', '@', '$', '+', ':']: - pair = pair.replace(ch, '_') + for ch in ["/", " ", ".", "@", "$", "+", ":"]: + pair = pair.replace(ch, "_") return pair @@ -161,7 +161,7 @@ def safe_value_fallback2(dict1: dictMap, dict2: dictMap, key1: str, key2: str, d def plural(num: float, singular: str, plural: Optional[str] = None) -> str: - return singular if (num == 1 or num == -1) else plural or singular + 's' + return singular if (num == 1 or num == -1) else plural or singular + "s" def chunks(lst: List[Any], n: int) -> Iterator[List[Any]]: @@ -172,7 +172,7 @@ def chunks(lst: List[Any], n: int) -> Iterator[List[Any]]: :return: None """ for chunk in range(0, len(lst), n): - yield (lst[chunk:chunk + n]) + yield (lst[chunk : chunk + n]) def parse_db_uri_for_logging(uri: str): @@ -184,8 +184,8 @@ def parse_db_uri_for_logging(uri: str): parsed_db_uri = urlparse(uri) if not parsed_db_uri.netloc: # No need for censoring as no password was provided return uri - pwd = parsed_db_uri.netloc.split(':')[1].split('@')[0] - return parsed_db_uri.geturl().replace(f':{pwd}@', ':*****@') + pwd = parsed_db_uri.netloc.split(":")[1].split("@")[0] + return parsed_db_uri.geturl().replace(f":{pwd}@", ":*****@") def dataframe_to_json(dataframe: pd.DataFrame) -> str: @@ -194,7 +194,7 @@ def dataframe_to_json(dataframe: pd.DataFrame) -> str: :param dataframe: A pandas DataFrame :returns: A JSON string of the pandas DataFrame """ - return dataframe.to_json(orient='split') + return dataframe.to_json(orient="split") def json_to_dataframe(data: str) -> pd.DataFrame: @@ -203,9 +203,9 @@ def json_to_dataframe(data: str) -> pd.DataFrame: :param data: A JSON string :returns: A pandas DataFrame from the JSON string """ - dataframe = pd.read_json(StringIO(data), orient='split') - if 'date' in dataframe.columns: - dataframe['date'] = pd.to_datetime(dataframe['date'], unit='ms', utc=True) + dataframe = pd.read_json(StringIO(data), orient="split") + if "date" in dataframe.columns: + dataframe["date"] = pd.to_datetime(dataframe["date"], unit="ms", utc=True) return dataframe @@ -234,7 +234,7 @@ def append_candles_to_dataframe(left: pd.DataFrame, right: pd.DataFrame) -> pd.D :param right: The new dataframe containing the data you want appended :returns: The dataframe with the right data in it """ - if left.iloc[-1]['date'] != right.iloc[-1]['date']: + if left.iloc[-1]["date"] != right.iloc[-1]["date"]: left = pd.concat([left, right]) # Only keep the last 1500 candles in memory diff --git a/freqtrade/mixins/logging_mixin.py b/freqtrade/mixins/logging_mixin.py index 31b49ba55..44fb4f63a 100644 --- a/freqtrade/mixins/logging_mixin.py +++ b/freqtrade/mixins/logging_mixin.py @@ -8,6 +8,7 @@ class LoggingMixin: Logging Mixin Shows similar messages only once every `refresh_period`. """ + # Disable output completely show_output = True @@ -27,6 +28,7 @@ class LoggingMixin: :param logmethod: Function that'll be called. Most likely `logger.info`. :return: None. """ + @cached(cache=self._log_cache) def _log_once(message: str): logmethod(message) diff --git a/freqtrade/optimize/analysis/lookahead.py b/freqtrade/optimize/analysis/lookahead.py index 9fa4235e7..a8eb0258e 100755 --- a/freqtrade/optimize/analysis/lookahead.py +++ b/freqtrade/optimize/analysis/lookahead.py @@ -9,8 +9,10 @@ from pandas import DataFrame from freqtrade.data.history import get_timerange from freqtrade.exchange import timeframe_to_minutes -from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester, - restore_verbosity_for_bias_tester) +from freqtrade.loggers.set_log_levels import ( + reduce_verbosity_for_bias_tester, + restore_verbosity_for_bias_tester, +) from freqtrade.optimize.backtesting import Backtesting from freqtrade.optimize.base_analysis import BaseAnalysis, VarHolder @@ -28,38 +30,33 @@ class Analysis: class LookaheadAnalysis(BaseAnalysis): - def __init__(self, config: Dict[str, Any], strategy_obj: Dict): - super().__init__(config, strategy_obj) self.entry_varHolders: List[VarHolder] = [] self.exit_varHolders: List[VarHolder] = [] self.current_analysis = Analysis() - self.minimum_trade_amount = config['minimum_trade_amount'] - self.targeted_trade_amount = config['targeted_trade_amount'] + self.minimum_trade_amount = config["minimum_trade_amount"] + self.targeted_trade_amount = config["targeted_trade_amount"] @staticmethod def get_result(backtesting: Backtesting, processed: DataFrame): min_date, max_date = get_timerange(processed) result = backtesting.backtest( - processed=deepcopy(processed), - start_date=min_date, - end_date=max_date + processed=deepcopy(processed), start_date=min_date, end_date=max_date ) return result @staticmethod def report_signal(result: dict, column_name: str, checked_timestamp: datetime): - df = result['results'] + df = result["results"] row_count = df[column_name].shape[0] if row_count == 0: return False else: - df_cut = df[(df[column_name] == checked_timestamp)] if df_cut[column_name].shape[0] == 0: return False @@ -74,16 +71,11 @@ class LookaheadAnalysis(BaseAnalysis): full_df: DataFrame = full_vars.indicators[current_pair] # cut longer dataframe to length of the shorter - full_df_cut = full_df[ - (full_df.date == cut_vars.compared_dt) - ].reset_index(drop=True) - cut_df_cut = cut_df[ - (cut_df.date == cut_vars.compared_dt) - ].reset_index(drop=True) + full_df_cut = full_df[(full_df.date == cut_vars.compared_dt)].reset_index(drop=True) + cut_df_cut = cut_df[(cut_df.date == cut_vars.compared_dt)].reset_index(drop=True) # check if dataframes are not empty if full_df_cut.shape[0] != 0 and cut_df_cut.shape[0] != 0: - # compare dataframes compare_df = full_df_cut.compare(cut_df_cut) @@ -92,40 +84,44 @@ class LookaheadAnalysis(BaseAnalysis): col_idx = compare_df.columns.get_loc(col_name) compare_df_row = compare_df.iloc[0] # compare_df now comprises tuples with [1] having either 'self' or 'other' - if 'other' in col_name[1]: + if "other" in col_name[1]: continue self_value = compare_df_row.iloc[col_idx] other_value = compare_df_row.iloc[col_idx + 1] # output differences if self_value != other_value: - if not self.current_analysis.false_indicators.__contains__(col_name[0]): self.current_analysis.false_indicators.append(col_name[0]) - logger.info(f"=> found look ahead bias in indicator " - f"{col_name[0]}. " - f"{str(self_value)} != {str(other_value)}") + logger.info( + f"=> found look ahead bias in indicator " + f"{col_name[0]}. " + f"{str(self_value)} != {str(other_value)}" + ) def prepare_data(self, varholder: VarHolder, pairs_to_load: List[DataFrame]): - - if 'freqai' in self.local_config and 'identifier' in self.local_config['freqai']: + if "freqai" in self.local_config and "identifier" in self.local_config["freqai"]: # purge previous data if the freqai model is defined # (to be sure nothing is carried over from older backtests) - path_to_current_identifier = ( - Path(f"{self.local_config['user_data_dir']}/models/" - f"{self.local_config['freqai']['identifier']}").resolve()) + path_to_current_identifier = Path( + f"{self.local_config['user_data_dir']}/models/" + f"{self.local_config['freqai']['identifier']}" + ).resolve() # remove folder and its contents if Path.exists(path_to_current_identifier): shutil.rmtree(path_to_current_identifier) prepare_data_config = deepcopy(self.local_config) - prepare_data_config['timerange'] = (str(self.dt_to_timestamp(varholder.from_dt)) + "-" + - str(self.dt_to_timestamp(varholder.to_dt))) - prepare_data_config['exchange']['pair_whitelist'] = pairs_to_load + prepare_data_config["timerange"] = ( + str(self.dt_to_timestamp(varholder.from_dt)) + + "-" + + str(self.dt_to_timestamp(varholder.to_dt)) + ) + prepare_data_config["exchange"]["pair_whitelist"] = pairs_to_load if self._fee is not None: # Don't re-calculate fee per pair, as fee might differ per pair. - prepare_data_config['fee'] = self._fee + prepare_data_config["fee"] = self._fee backtesting = Backtesting(prepare_data_config, self.exchange) self.exchange = backtesting.exchange @@ -144,23 +140,23 @@ class LookaheadAnalysis(BaseAnalysis): entry_varHolder = VarHolder() self.entry_varHolders.append(entry_varHolder) entry_varHolder.from_dt = self.full_varHolder.from_dt - entry_varHolder.compared_dt = result_row['open_date'] + entry_varHolder.compared_dt = result_row["open_date"] # to_dt needs +1 candle since it won't buy on the last candle - entry_varHolder.to_dt = ( - result_row['open_date'] + - timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe))) - self.prepare_data(entry_varHolder, [result_row['pair']]) + entry_varHolder.to_dt = result_row["open_date"] + timedelta( + minutes=timeframe_to_minutes(self.full_varHolder.timeframe) + ) + self.prepare_data(entry_varHolder, [result_row["pair"]]) # exit_varHolder exit_varHolder = VarHolder() self.exit_varHolders.append(exit_varHolder) # to_dt needs +1 candle since it will always exit/force-exit trades on the last candle exit_varHolder.from_dt = self.full_varHolder.from_dt - exit_varHolder.to_dt = ( - result_row['close_date'] + - timedelta(minutes=timeframe_to_minutes(self.full_varHolder.timeframe))) - exit_varHolder.compared_dt = result_row['close_date'] - self.prepare_data(exit_varHolder, [result_row['pair']]) + exit_varHolder.to_dt = result_row["close_date"] + timedelta( + minutes=timeframe_to_minutes(self.full_varHolder.timeframe) + ) + exit_varHolder.compared_dt = result_row["close_date"] + self.prepare_data(exit_varHolder, [result_row["pair"]]) # now we analyze a full trade of full_varholder and look for analyze its bias def analyze_row(self, idx: int, result_row): @@ -179,65 +175,72 @@ class LookaheadAnalysis(BaseAnalysis): # register if buy signal is broken if not self.report_signal( - self.entry_varHolders[idx].result, - "open_date", - self.entry_varHolders[idx].compared_dt): + self.entry_varHolders[idx].result, "open_date", self.entry_varHolders[idx].compared_dt + ): self.current_analysis.false_entry_signals += 1 buy_or_sell_biased = True # register if buy or sell signal is broken if not self.report_signal( - self.exit_varHolders[idx].result, - "close_date", - self.exit_varHolders[idx].compared_dt): + self.exit_varHolders[idx].result, "close_date", self.exit_varHolders[idx].compared_dt + ): self.current_analysis.false_exit_signals += 1 buy_or_sell_biased = True if buy_or_sell_biased: - logger.info(f"found lookahead-bias in trade " - f"pair: {result_row['pair']}, " - f"timerange:{result_row['open_date']} - {result_row['close_date']}, " - f"idx: {idx}") + logger.info( + f"found lookahead-bias in trade " + f"pair: {result_row['pair']}, " + f"timerange:{result_row['open_date']} - {result_row['close_date']}, " + f"idx: {idx}" + ) # check if the indicators themselves contain biased data - self.analyze_indicators(self.full_varHolder, self.entry_varHolders[idx], result_row['pair']) - self.analyze_indicators(self.full_varHolder, self.exit_varHolders[idx], result_row['pair']) + self.analyze_indicators(self.full_varHolder, self.entry_varHolders[idx], result_row["pair"]) + self.analyze_indicators(self.full_varHolder, self.exit_varHolders[idx], result_row["pair"]) def start(self) -> None: - super().start() reduce_verbosity_for_bias_tester() # check if requirements have been met of full_varholder - found_signals: int = self.full_varHolder.result['results'].shape[0] + 1 + found_signals: int = self.full_varHolder.result["results"].shape[0] + 1 if found_signals >= self.targeted_trade_amount: - logger.info(f"Found {found_signals} trades, " - f"calculating {self.targeted_trade_amount} trades.") + logger.info( + f"Found {found_signals} trades, " + f"calculating {self.targeted_trade_amount} trades." + ) elif self.targeted_trade_amount >= found_signals >= self.minimum_trade_amount: logger.info(f"Only found {found_signals} trades. Calculating all available trades.") else: - logger.info(f"found {found_signals} trades " - f"which is less than minimum_trade_amount {self.minimum_trade_amount}. " - f"Cancelling this backtest lookahead bias test.") + logger.info( + f"found {found_signals} trades " + f"which is less than minimum_trade_amount {self.minimum_trade_amount}. " + f"Cancelling this backtest lookahead bias test." + ) return # now we loop through all signals # starting from the same datetime to avoid miss-reports of bias - for idx, result_row in self.full_varHolder.result['results'].iterrows(): + for idx, result_row in self.full_varHolder.result["results"].iterrows(): if self.current_analysis.total_signals == self.targeted_trade_amount: logger.info(f"Found targeted trade amount = {self.targeted_trade_amount} signals.") break if found_signals < self.minimum_trade_amount: - logger.info(f"only found {found_signals} " - f"which is smaller than " - f"minimum trade amount = {self.minimum_trade_amount}. " - f"Exiting this lookahead-analysis") + logger.info( + f"only found {found_signals} " + f"which is smaller than " + f"minimum trade amount = {self.minimum_trade_amount}. " + f"Exiting this lookahead-analysis" + ) return None - if "force_exit" in result_row['exit_reason']: - logger.info("found force-exit in pair: {result_row['pair']}, " - f"timerange:{result_row['open_date']}-{result_row['close_date']}, " - f"idx: {idx}, skipping this one to avoid a false-positive.") + if "force_exit" in result_row["exit_reason"]: + logger.info( + "found force-exit in pair: {result_row['pair']}, " + f"timerange:{result_row['open_date']}-{result_row['close_date']}, " + f"idx: {idx}, skipping this one to avoid a false-positive." + ) # just to keep the IDs of both full, entry and exit varholders the same # to achieve a better debugging experience @@ -248,27 +251,33 @@ class LookaheadAnalysis(BaseAnalysis): self.analyze_row(idx, result_row) if len(self.entry_varHolders) < self.minimum_trade_amount: - logger.info(f"only found {found_signals} after skipping forced exits " - f"which is smaller than " - f"minimum trade amount = {self.minimum_trade_amount}. " - f"Exiting this lookahead-analysis") + logger.info( + f"only found {found_signals} after skipping forced exits " + f"which is smaller than " + f"minimum trade amount = {self.minimum_trade_amount}. " + f"Exiting this lookahead-analysis" + ) # Restore verbosity, so it's not too quiet for the next strategy restore_verbosity_for_bias_tester() # check and report signals - if self.current_analysis.total_signals < self.local_config['minimum_trade_amount']: - logger.info(f" -> {self.local_config['strategy']} : too few trades. " - f"We only found {self.current_analysis.total_signals} trades. " - f"Hint: Extend the timerange " - f"to get at least {self.local_config['minimum_trade_amount']} " - f"or lower the value of minimum_trade_amount.") + if self.current_analysis.total_signals < self.local_config["minimum_trade_amount"]: + logger.info( + f" -> {self.local_config['strategy']} : too few trades. " + f"We only found {self.current_analysis.total_signals} trades. " + f"Hint: Extend the timerange " + f"to get at least {self.local_config['minimum_trade_amount']} " + f"or lower the value of minimum_trade_amount." + ) self.failed_bias_check = True - elif (self.current_analysis.false_entry_signals > 0 or - self.current_analysis.false_exit_signals > 0 or - len(self.current_analysis.false_indicators) > 0): + elif ( + self.current_analysis.false_entry_signals > 0 + or self.current_analysis.false_exit_signals > 0 + or len(self.current_analysis.false_indicators) > 0 + ): logger.info(f" => {self.local_config['strategy']} : bias detected!") self.current_analysis.has_bias = True self.failed_bias_check = False else: - logger.info(self.local_config['strategy'] + ": no bias detected") + logger.info(self.local_config["strategy"] + ": no bias detected") self.failed_bias_check = False diff --git a/freqtrade/optimize/analysis/lookahead_helpers.py b/freqtrade/optimize/analysis/lookahead_helpers.py index d2cc541f2..c0e6fa1ba 100644 --- a/freqtrade/optimize/analysis/lookahead_helpers.py +++ b/freqtrade/optimize/analysis/lookahead_helpers.py @@ -15,46 +15,53 @@ logger = logging.getLogger(__name__) class LookaheadAnalysisSubFunctions: - @staticmethod def text_table_lookahead_analysis_instances( - config: Dict[str, Any], - lookahead_instances: List[LookaheadAnalysis]): - headers = ['filename', 'strategy', 'has_bias', 'total_signals', - 'biased_entry_signals', 'biased_exit_signals', 'biased_indicators'] + config: Dict[str, Any], lookahead_instances: List[LookaheadAnalysis] + ): + headers = [ + "filename", + "strategy", + "has_bias", + "total_signals", + "biased_entry_signals", + "biased_exit_signals", + "biased_indicators", + ] data = [] for inst in lookahead_instances: - if config['minimum_trade_amount'] > inst.current_analysis.total_signals: + if config["minimum_trade_amount"] > inst.current_analysis.total_signals: data.append( [ - inst.strategy_obj['location'].parts[-1], - inst.strategy_obj['name'], + inst.strategy_obj["location"].parts[-1], + inst.strategy_obj["name"], "too few trades caught " f"({inst.current_analysis.total_signals}/{config['minimum_trade_amount']})." - f"Test failed." + f"Test failed.", ] ) elif inst.failed_bias_check: data.append( [ - inst.strategy_obj['location'].parts[-1], - inst.strategy_obj['name'], - 'error while checking' + inst.strategy_obj["location"].parts[-1], + inst.strategy_obj["name"], + "error while checking", ] ) else: data.append( [ - inst.strategy_obj['location'].parts[-1], - inst.strategy_obj['name'], + inst.strategy_obj["location"].parts[-1], + inst.strategy_obj["name"], inst.current_analysis.has_bias, inst.current_analysis.total_signals, inst.current_analysis.false_entry_signals, inst.current_analysis.false_exit_signals, - ", ".join(inst.current_analysis.false_indicators) + ", ".join(inst.current_analysis.false_indicators), ] ) from tabulate import tabulate + table = tabulate(data, headers=headers, tablefmt="orgtbl") print(table) return table, headers, data @@ -63,89 +70,101 @@ class LookaheadAnalysisSubFunctions: def export_to_csv(config: Dict[str, Any], lookahead_analysis: List[LookaheadAnalysis]): def add_or_update_row(df, row_data): if ( - (df['filename'] == row_data['filename']) & - (df['strategy'] == row_data['strategy']) + (df["filename"] == row_data["filename"]) & (df["strategy"] == row_data["strategy"]) ).any(): # Update existing row pd_series = pd.DataFrame([row_data]) df.loc[ - (df['filename'] == row_data['filename']) & - (df['strategy'] == row_data['strategy']) - ] = pd_series + (df["filename"] == row_data["filename"]) + & (df["strategy"] == row_data["strategy"]) + ] = pd_series else: # Add new row df = pd.concat([df, pd.DataFrame([row_data], columns=df.columns)]) return df - if Path(config['lookahead_analysis_exportfilename']).exists(): + if Path(config["lookahead_analysis_exportfilename"]).exists(): # Read CSV file into a pandas dataframe - csv_df = pd.read_csv(config['lookahead_analysis_exportfilename']) + csv_df = pd.read_csv(config["lookahead_analysis_exportfilename"]) else: # Create a new empty DataFrame with the desired column names and set the index - csv_df = pd.DataFrame(columns=[ - 'filename', 'strategy', 'has_bias', 'total_signals', - 'biased_entry_signals', 'biased_exit_signals', 'biased_indicators' - ], - index=None) + csv_df = pd.DataFrame( + columns=[ + "filename", + "strategy", + "has_bias", + "total_signals", + "biased_entry_signals", + "biased_exit_signals", + "biased_indicators", + ], + index=None, + ) for inst in lookahead_analysis: # only update if - if (inst.current_analysis.total_signals > config['minimum_trade_amount'] - and inst.failed_bias_check is not True): - new_row_data = {'filename': inst.strategy_obj['location'].parts[-1], - 'strategy': inst.strategy_obj['name'], - 'has_bias': inst.current_analysis.has_bias, - 'total_signals': - int(inst.current_analysis.total_signals), - 'biased_entry_signals': - int(inst.current_analysis.false_entry_signals), - 'biased_exit_signals': - int(inst.current_analysis.false_exit_signals), - 'biased_indicators': - ",".join(inst.current_analysis.false_indicators)} + if ( + inst.current_analysis.total_signals > config["minimum_trade_amount"] + and inst.failed_bias_check is not True + ): + new_row_data = { + "filename": inst.strategy_obj["location"].parts[-1], + "strategy": inst.strategy_obj["name"], + "has_bias": inst.current_analysis.has_bias, + "total_signals": int(inst.current_analysis.total_signals), + "biased_entry_signals": int(inst.current_analysis.false_entry_signals), + "biased_exit_signals": int(inst.current_analysis.false_exit_signals), + "biased_indicators": ",".join(inst.current_analysis.false_indicators), + } csv_df = add_or_update_row(csv_df, new_row_data) # Fill NaN values with a default value (e.g., 0) - csv_df['total_signals'] = csv_df['total_signals'].astype(int).fillna(0) - csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].astype(int).fillna(0) - csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].astype(int).fillna(0) + csv_df["total_signals"] = csv_df["total_signals"].astype(int).fillna(0) + csv_df["biased_entry_signals"] = csv_df["biased_entry_signals"].astype(int).fillna(0) + csv_df["biased_exit_signals"] = csv_df["biased_exit_signals"].astype(int).fillna(0) # Convert columns to integers - csv_df['total_signals'] = csv_df['total_signals'].astype(int) - csv_df['biased_entry_signals'] = csv_df['biased_entry_signals'].astype(int) - csv_df['biased_exit_signals'] = csv_df['biased_exit_signals'].astype(int) + csv_df["total_signals"] = csv_df["total_signals"].astype(int) + csv_df["biased_entry_signals"] = csv_df["biased_entry_signals"].astype(int) + csv_df["biased_exit_signals"] = csv_df["biased_exit_signals"].astype(int) logger.info(f"saving {config['lookahead_analysis_exportfilename']}") - csv_df.to_csv(config['lookahead_analysis_exportfilename'], index=False) + csv_df.to_csv(config["lookahead_analysis_exportfilename"], index=False) @staticmethod def calculate_config_overrides(config: Config): - if config.get('enable_protections', False): + if config.get("enable_protections", False): # if protections are used globally, they can produce false positives. - config['enable_protections'] = False - logger.info('Protections were enabled. ' - 'Disabling protections now ' - 'since they could otherwise produce false positives.') - if config['targeted_trade_amount'] < config['minimum_trade_amount']: + config["enable_protections"] = False + logger.info( + "Protections were enabled. " + "Disabling protections now " + "since they could otherwise produce false positives." + ) + if config["targeted_trade_amount"] < config["minimum_trade_amount"]: # this combo doesn't make any sense. raise OperationalException( "Targeted trade amount can't be smaller than minimum trade amount." ) - if len(config['pairs']) > config.get('max_open_trades', 0): - logger.info('Max_open_trades were less than amount of pairs ' - 'or defined in the strategy. ' - 'Set max_open_trades to amount of pairs ' - 'just to avoid false positives.') - config['max_open_trades'] = len(config['pairs']) + if len(config["pairs"]) > config.get("max_open_trades", 0): + logger.info( + "Max_open_trades were less than amount of pairs " + "or defined in the strategy. " + "Set max_open_trades to amount of pairs " + "just to avoid false positives." + ) + config["max_open_trades"] = len(config["pairs"]) min_dry_run_wallet = 1000000000 - if config['dry_run_wallet'] < min_dry_run_wallet: - logger.info('Dry run wallet was not set to 1 billion, pushing it up there ' - 'just to avoid false positives') - config['dry_run_wallet'] = min_dry_run_wallet + if config["dry_run_wallet"] < min_dry_run_wallet: + logger.info( + "Dry run wallet was not set to 1 billion, pushing it up there " + "just to avoid false positives" + ) + config["dry_run_wallet"] = min_dry_run_wallet - if 'timerange' not in config: + if "timerange" not in config: # setting a timerange is enforced here raise OperationalException( "Please set a timerange. " @@ -155,32 +174,35 @@ class LookaheadAnalysisSubFunctions: # in a combination with a wallet size of 1 billion it should always be able to trade # no matter if they use custom_stake_amount as a small percentage of wallet size # or fixate custom_stake_amount to a certain value. - logger.info('fixing stake_amount to 10k') - config['stake_amount'] = 10000 + logger.info("fixing stake_amount to 10k") + config["stake_amount"] = 10000 # enforce cache to be 'none', shift it to 'none' if not already # (since the default value is 'day') - if config.get('backtest_cache') is None: - config['backtest_cache'] = 'none' - elif config['backtest_cache'] != 'none': - logger.info(f"backtest_cache = " - f"{config['backtest_cache']} detected. " - f"Inside lookahead-analysis it is enforced to be 'none'. " - f"Changed it to 'none'") - config['backtest_cache'] = 'none' + if config.get("backtest_cache") is None: + config["backtest_cache"] = "none" + elif config["backtest_cache"] != "none": + logger.info( + f"backtest_cache = " + f"{config['backtest_cache']} detected. " + f"Inside lookahead-analysis it is enforced to be 'none'. " + f"Changed it to 'none'" + ) + config["backtest_cache"] = "none" return config @staticmethod def initialize_single_lookahead_analysis(config: Config, strategy_obj: Dict[str, Any]): - logger.info(f"Bias test of {Path(strategy_obj['location']).name} started.") start = time.perf_counter() current_instance = LookaheadAnalysis(config, strategy_obj) current_instance.start() elapsed = time.perf_counter() - start - logger.info(f"Checking look ahead bias via backtests " - f"of {Path(strategy_obj['location']).name} " - f"took {elapsed:.0f} seconds.") + logger.info( + f"Checking look ahead bias via backtests " + f"of {Path(strategy_obj['location']).name} " + f"took {elapsed:.0f} seconds." + ) return current_instance @staticmethod @@ -188,36 +210,42 @@ class LookaheadAnalysisSubFunctions: config = LookaheadAnalysisSubFunctions.calculate_config_overrides(config) strategy_objs = StrategyResolver.search_all_objects( - config, enum_failed=False, recursive=config.get('recursive_strategy_search', False)) + config, enum_failed=False, recursive=config.get("recursive_strategy_search", False) + ) lookaheadAnalysis_instances = [] # unify --strategy and --strategy-list to one list - if not (strategy_list := config.get('strategy_list', [])): - if config.get('strategy') is None: + if not (strategy_list := config.get("strategy_list", [])): + if config.get("strategy") is None: raise OperationalException( "No Strategy specified. Please specify a strategy via --strategy or " "--strategy-list" ) - strategy_list = [config['strategy']] + strategy_list = [config["strategy"]] # check if strategies can be properly loaded, only check them if they can be. for strat in strategy_list: for strategy_obj in strategy_objs: - if strategy_obj['name'] == strat and strategy_obj not in strategy_list: + if strategy_obj["name"] == strat and strategy_obj not in strategy_list: lookaheadAnalysis_instances.append( LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis( - config, strategy_obj)) + config, strategy_obj + ) + ) break # report the results if lookaheadAnalysis_instances: LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( - config, lookaheadAnalysis_instances) - if config.get('lookahead_analysis_exportfilename') is not None: + config, lookaheadAnalysis_instances + ) + if config.get("lookahead_analysis_exportfilename") is not None: LookaheadAnalysisSubFunctions.export_to_csv(config, lookaheadAnalysis_instances) else: - logger.error("There were no strategies specified neither through " - "--strategy nor through " - "--strategy-list " - "or timeframe was not specified.") + logger.error( + "There were no strategies specified neither through " + "--strategy nor through " + "--strategy-list " + "or timeframe was not specified." + ) diff --git a/freqtrade/optimize/analysis/recursive.py b/freqtrade/optimize/analysis/recursive.py index 5a41f8795..f6e4fa3a9 100644 --- a/freqtrade/optimize/analysis/recursive.py +++ b/freqtrade/optimize/analysis/recursive.py @@ -8,8 +8,10 @@ from typing import Any, Dict, List from pandas import DataFrame from freqtrade.exchange import timeframe_to_minutes -from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester, - restore_verbosity_for_bias_tester) +from freqtrade.loggers.set_log_levels import ( + reduce_verbosity_for_bias_tester, + restore_verbosity_for_bias_tester, +) from freqtrade.optimize.backtesting import Backtesting from freqtrade.optimize.base_analysis import BaseAnalysis, VarHolder @@ -18,10 +20,8 @@ logger = logging.getLogger(__name__) class RecursiveAnalysis(BaseAnalysis): - def __init__(self, config: Dict[str, Any], strategy_obj: Dict): - - self._startup_candle = config.get('startup_candle', [199, 399, 499, 999, 1999]) + self._startup_candle = config.get("startup_candle", [199, 399, 499, 999, 1999]) super().__init__(config, strategy_obj) @@ -33,8 +33,7 @@ class RecursiveAnalysis(BaseAnalysis): # For recursive bias check # analyzes two data frames with processed indicators and shows differences between them. def analyze_indicators(self): - - pair_to_check = self.local_config['pairs'][0] + pair_to_check = self.local_config["pairs"][0] logger.info("Start checking for recursive bias") # check and report signals @@ -48,17 +47,17 @@ class RecursiveAnalysis(BaseAnalysis): # print(compare_df) for col_name, values in compare_df.items(): # print(col_name) - if 'other' == col_name: + if "other" == col_name: continue indicators = values.index for indicator in indicators: - if (indicator not in self.dict_recursive): + if indicator not in self.dict_recursive: self.dict_recursive[indicator] = {} values_diff = compare_df.loc[indicator] - values_diff_self = values_diff.loc['self'] - values_diff_other = values_diff.loc['other'] + values_diff_self = values_diff.loc["self"] + values_diff_other = values_diff.loc["other"] diff = (values_diff_other - values_diff_self) / values_diff_self * 100 self.dict_recursive[indicator][part.startup_candle] = f"{diff:.3f}%" @@ -70,17 +69,16 @@ class RecursiveAnalysis(BaseAnalysis): # For lookahead bias check # analyzes two data frames with processed indicators and shows differences between them. def analyze_indicators_lookahead(self): - - pair_to_check = self.local_config['pairs'][0] + pair_to_check = self.local_config["pairs"][0] logger.info("Start checking for lookahead bias on indicators only") part = self.partial_varHolder_lookahead_array[0] part_last_row = part.indicators[pair_to_check].iloc[-1] - date_to_check = part_last_row['date'] - index_to_get = (self.full_varHolder.indicators[pair_to_check]['date'] == date_to_check) + date_to_check = part_last_row["date"] + index_to_get = self.full_varHolder.indicators[pair_to_check]["date"] == date_to_check base_row_check = self.full_varHolder.indicators[pair_to_check].loc[index_to_get].iloc[-1] - check_time = part.to_dt.strftime('%Y-%m-%dT%H:%M:%S') + check_time = part.to_dt.strftime("%Y-%m-%dT%H:%M:%S") logger.info(f"Check indicators at {check_time}") # logger.info(f"vs {part_timerange} with {part.startup_candle} startup candle") @@ -90,7 +88,7 @@ class RecursiveAnalysis(BaseAnalysis): # print(compare_df) for col_name, values in compare_df.items(): # print(col_name) - if 'other' == col_name: + if "other" == col_name: continue indicators = values.index @@ -103,21 +101,24 @@ class RecursiveAnalysis(BaseAnalysis): logger.info("No lookahead bias on indicators found.") def prepare_data(self, varholder: VarHolder, pairs_to_load: List[DataFrame]): - - if 'freqai' in self.local_config and 'identifier' in self.local_config['freqai']: + if "freqai" in self.local_config and "identifier" in self.local_config["freqai"]: # purge previous data if the freqai model is defined # (to be sure nothing is carried over from older backtests) - path_to_current_identifier = ( - Path(f"{self.local_config['user_data_dir']}/models/" - f"{self.local_config['freqai']['identifier']}").resolve()) + path_to_current_identifier = Path( + f"{self.local_config['user_data_dir']}/models/" + f"{self.local_config['freqai']['identifier']}" + ).resolve() # remove folder and its contents if Path.exists(path_to_current_identifier): shutil.rmtree(path_to_current_identifier) prepare_data_config = deepcopy(self.local_config) - prepare_data_config['timerange'] = (str(self.dt_to_timestamp(varholder.from_dt)) + "-" + - str(self.dt_to_timestamp(varholder.to_dt))) - prepare_data_config['exchange']['pair_whitelist'] = pairs_to_load + prepare_data_config["timerange"] = ( + str(self.dt_to_timestamp(varholder.from_dt)) + + "-" + + str(self.dt_to_timestamp(varholder.to_dt)) + ) + prepare_data_config["exchange"]["pair_whitelist"] = pairs_to_load backtesting = Backtesting(prepare_data_config, self.exchange) self.exchange = backtesting.exchange @@ -137,9 +138,9 @@ class RecursiveAnalysis(BaseAnalysis): partial_varHolder.to_dt = self.full_varHolder.to_dt partial_varHolder.startup_candle = startup_candle - self.local_config['startup_candle_count'] = startup_candle + self.local_config["startup_candle_count"] = startup_candle - self.prepare_data(partial_varHolder, self.local_config['pairs']) + self.prepare_data(partial_varHolder, self.local_config["pairs"]) self.partial_varHolder_array.append(partial_varHolder) @@ -151,12 +152,11 @@ class RecursiveAnalysis(BaseAnalysis): partial_varHolder.from_dt = self.full_varHolder.from_dt partial_varHolder.to_dt = end_date - self.prepare_data(partial_varHolder, self.local_config['pairs']) + self.prepare_data(partial_varHolder, self.local_config["pairs"]) self.partial_varHolder_lookahead_array.append(partial_varHolder) def start(self) -> None: - super().start() reduce_verbosity_for_bias_tester() diff --git a/freqtrade/optimize/analysis/recursive_helpers.py b/freqtrade/optimize/analysis/recursive_helpers.py index 32dbce149..cde1a214e 100644 --- a/freqtrade/optimize/analysis/recursive_helpers.py +++ b/freqtrade/optimize/analysis/recursive_helpers.py @@ -13,12 +13,10 @@ logger = logging.getLogger(__name__) class RecursiveAnalysisSubFunctions: - @staticmethod - def text_table_recursive_analysis_instances( - recursive_instances: List[RecursiveAnalysis]): + def text_table_recursive_analysis_instances(recursive_instances: List[RecursiveAnalysis]): startups = recursive_instances[0]._startup_candle - headers = ['indicators'] + headers = ["indicators"] for candle in startups: headers.append(candle) @@ -28,11 +26,12 @@ class RecursiveAnalysisSubFunctions: for indicator, values in inst.dict_recursive.items(): temp_data = [indicator] for candle in startups: - temp_data.append(values.get(int(candle), '-')) + temp_data.append(values.get(int(candle), "-")) data.append(temp_data) if len(data) > 0: from tabulate import tabulate + table = tabulate(data, headers=headers, tablefmt="orgtbl") print(table) return table, headers, data @@ -41,34 +40,37 @@ class RecursiveAnalysisSubFunctions: @staticmethod def calculate_config_overrides(config: Config): - if 'timerange' not in config: + if "timerange" not in config: # setting a timerange is enforced here raise OperationalException( "Please set a timerange. " "A timerange of 5000 candles are enough for recursive analysis." ) - if config.get('backtest_cache') is None: - config['backtest_cache'] = 'none' - elif config['backtest_cache'] != 'none': - logger.info(f"backtest_cache = " - f"{config['backtest_cache']} detected. " - f"Inside recursive-analysis it is enforced to be 'none'. " - f"Changed it to 'none'") - config['backtest_cache'] = 'none' + if config.get("backtest_cache") is None: + config["backtest_cache"] = "none" + elif config["backtest_cache"] != "none": + logger.info( + f"backtest_cache = " + f"{config['backtest_cache']} detected. " + f"Inside recursive-analysis it is enforced to be 'none'. " + f"Changed it to 'none'" + ) + config["backtest_cache"] = "none" return config @staticmethod def initialize_single_recursive_analysis(config: Config, strategy_obj: Dict[str, Any]): - logger.info(f"Recursive test of {Path(strategy_obj['location']).name} started.") start = time.perf_counter() current_instance = RecursiveAnalysis(config, strategy_obj) current_instance.start() elapsed = time.perf_counter() - start - logger.info(f"Checking recursive and indicator-only lookahead bias of indicators " - f"of {Path(strategy_obj['location']).name} " - f"took {elapsed:.0f} seconds.") + logger.info( + f"Checking recursive and indicator-only lookahead bias of indicators " + f"of {Path(strategy_obj['location']).name} " + f"took {elapsed:.0f} seconds." + ) return current_instance @staticmethod @@ -76,31 +78,37 @@ class RecursiveAnalysisSubFunctions: config = RecursiveAnalysisSubFunctions.calculate_config_overrides(config) strategy_objs = StrategyResolver.search_all_objects( - config, enum_failed=False, recursive=config.get('recursive_strategy_search', False)) + config, enum_failed=False, recursive=config.get("recursive_strategy_search", False) + ) RecursiveAnalysis_instances = [] # unify --strategy and --strategy-list to one list - if not (strategy_list := config.get('strategy_list', [])): - if config.get('strategy') is None: + if not (strategy_list := config.get("strategy_list", [])): + if config.get("strategy") is None: raise OperationalException( "No Strategy specified. Please specify a strategy via --strategy" ) - strategy_list = [config['strategy']] + strategy_list = [config["strategy"]] # check if strategies can be properly loaded, only check them if they can be. for strat in strategy_list: for strategy_obj in strategy_objs: - if strategy_obj['name'] == strat and strategy_obj not in strategy_list: + if strategy_obj["name"] == strat and strategy_obj not in strategy_list: RecursiveAnalysis_instances.append( RecursiveAnalysisSubFunctions.initialize_single_recursive_analysis( - config, strategy_obj)) + config, strategy_obj + ) + ) break # report the results if RecursiveAnalysis_instances: RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances( - RecursiveAnalysis_instances) + RecursiveAnalysis_instances + ) else: - logger.error("There was no strategy specified through --strategy " - "or timeframe was not specified.") + logger.error( + "There was no strategy specified through --strategy " + "or timeframe was not specified." + ) diff --git a/freqtrade/optimize/backtest_caching.py b/freqtrade/optimize/backtest_caching.py index f34bbffef..2f9c151ad 100644 --- a/freqtrade/optimize/backtest_caching.py +++ b/freqtrade/optimize/backtest_caching.py @@ -17,19 +17,23 @@ def get_strategy_run_id(strategy) -> str: config = deepcopy(strategy.config) # Options that have no impact on results of individual backtest. - not_important_keys = ('strategy_list', 'original_config', 'telegram', 'api_server') + not_important_keys = ("strategy_list", "original_config", "telegram", "api_server") for k in not_important_keys: if k in config: del config[k] # Explicitly allow NaN values (e.g. max_open_trades). # as it does not matter for getting the hash. - digest.update(rapidjson.dumps(config, default=str, - number_mode=rapidjson.NM_NAN).encode('utf-8')) + digest.update( + rapidjson.dumps(config, default=str, number_mode=rapidjson.NM_NAN).encode("utf-8") + ) # Include _ft_params_from_file - so changing parameter files cause cache eviction - digest.update(rapidjson.dumps( - strategy._ft_params_from_file, default=str, number_mode=rapidjson.NM_NAN).encode('utf-8')) - with Path(strategy.__file__).open('rb') as fp: + digest.update( + rapidjson.dumps( + strategy._ft_params_from_file, default=str, number_mode=rapidjson.NM_NAN + ).encode("utf-8") + ) + with Path(strategy.__file__).open("rb") as fp: digest.update(fp.read()) return digest.hexdigest().lower() @@ -37,4 +41,4 @@ def get_strategy_run_id(strategy) -> str: def get_backtest_metadata_filename(filename: Union[Path, str]) -> Path: """Return metadata filename for specified backtest results file.""" filename = Path(filename) - return filename.parent / Path(f'{filename.stem}.meta{filename.suffix}') + return filename.parent / Path(f"{filename.stem}.meta{filename.suffix}") diff --git a/freqtrade/optimize/backtesting.py b/freqtrade/optimize/backtesting.py index 1952c4fe7..03b026744 100644 --- a/freqtrade/optimize/backtesting.py +++ b/freqtrade/optimize/backtesting.py @@ -3,6 +3,7 @@ """ This module contains the backtesting logic """ + import logging from collections import defaultdict from copy import deepcopy @@ -20,28 +21,48 @@ from freqtrade.data.btanalysis import find_existing_backtest_stats, trade_list_t from freqtrade.data.converter import trim_dataframe, trim_dataframes from freqtrade.data.dataprovider import DataProvider from freqtrade.data.metrics import combined_dataframes_with_rel_mean -from freqtrade.enums import (BacktestState, CandleType, ExitCheckTuple, ExitType, RunMode, - TradingMode) +from freqtrade.enums import ( + BacktestState, + CandleType, + ExitCheckTuple, + ExitType, + RunMode, + TradingMode, +) from freqtrade.exceptions import DependencyException, OperationalException -from freqtrade.exchange import (amount_to_contract_precision, price_to_precision, - timeframe_to_seconds) +from freqtrade.exchange import ( + amount_to_contract_precision, + price_to_precision, + timeframe_to_seconds, +) from freqtrade.exchange.exchange import Exchange from freqtrade.mixins import LoggingMixin from freqtrade.optimize.backtest_caching import get_strategy_run_id from freqtrade.optimize.bt_progress import BTProgress -from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_rejected_signals, - generate_trade_signal_candles, - show_backtest_results, - store_backtest_analysis_results, - store_backtest_stats) -from freqtrade.persistence import (CustomDataWrapper, LocalTrade, Order, PairLocks, Trade, - disable_database_use, enable_database_use) +from freqtrade.optimize.optimize_reports import ( + generate_backtest_stats, + generate_rejected_signals, + generate_trade_signal_candles, + show_backtest_results, + store_backtest_analysis_results, + store_backtest_stats, +) +from freqtrade.persistence import ( + CustomDataWrapper, + LocalTrade, + Order, + PairLocks, + Trade, + disable_database_use, + enable_database_use, +) from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.protectionmanager import ProtectionManager from freqtrade.resolvers import ExchangeResolver, StrategyResolver from freqtrade.strategy.interface import IStrategy from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper from freqtrade.types import BacktestResultType, get_BacktestResultType_default +from freqtrade.util import FtPrecise from freqtrade.util.migrations import migrate_data from freqtrade.wallets import Wallets @@ -63,8 +84,19 @@ EXIT_TAG_IDX = 10 # Every change to this headers list must evaluate further usages of the resulting tuple # and eventually change the constants for indexes at the top -HEADERS = ['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', - 'enter_short', 'exit_short', 'enter_tag', 'exit_tag'] +HEADERS = [ + "date", + "open", + "high", + "low", + "close", + "enter_long", + "exit_long", + "enter_short", + "exit_short", + "enter_tag", + "exit_tag", +] class Backtesting: @@ -77,14 +109,13 @@ class Backtesting: """ def __init__(self, config: Config, exchange: Optional[Exchange] = None) -> None: - LoggingMixin.show_output = False self.config = config self.results: BacktestResultType = get_BacktestResultType_default() self.trade_id_counter: int = 0 self.order_id_counter: int = 0 - config['dry_run'] = True + config["dry_run"] = True self.run_ids: Dict[str, str] = {} self.strategylist: List[IStrategy] = [] self.all_results: Dict[str, Dict] = {} @@ -92,20 +123,22 @@ class Backtesting: self.rejected_dict: Dict[str, List] = {} self.rejected_df: Dict[str, Dict] = {} - self._exchange_name = self.config['exchange']['name'] + self._exchange_name = self.config["exchange"]["name"] if not exchange: exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True) self.exchange = exchange self.dataprovider = DataProvider(self.config, self.exchange) - if self.config.get('strategy_list'): - if self.config.get('freqai', {}).get('enabled', False): - logger.warning("Using --strategy-list with FreqAI REQUIRES all strategies " - "to have identical feature_engineering_* functions.") - for strat in list(self.config['strategy_list']): + if self.config.get("strategy_list"): + if self.config.get("freqai", {}).get("enabled", False): + logger.warning( + "Using --strategy-list with FreqAI REQUIRES all strategies " + "to have identical feature_engineering_* functions." + ) + for strat in list(self.config["strategy_list"]): stratconf = deepcopy(self.config) - stratconf['strategy'] = strat + stratconf["strategy"] = strat self.strategylist.append(StrategyResolver.load_strategy(stratconf)) validate_config_consistency(stratconf) @@ -115,9 +148,11 @@ class Backtesting: validate_config_consistency(self.config) if "timeframe" not in self.config: - raise OperationalException("Timeframe needs to be set in either " - "configuration or as cli argument `--timeframe 5m`") - self.timeframe = str(self.config.get('timeframe')) + raise OperationalException( + "Timeframe needs to be set in either " + "configuration or as cli argument `--timeframe 5m`" + ) + self.timeframe = str(self.config.get("timeframe")) self.timeframe_secs = timeframe_to_seconds(self.timeframe) self.timeframe_min = self.timeframe_secs // 60 self.timeframe_td = timedelta(seconds=self.timeframe_secs) @@ -132,48 +167,60 @@ class Backtesting: if len(self.pairlists.whitelist) == 0: raise OperationalException("No pair in whitelist.") - if config.get('fee', None) is not None: - self.fee = config['fee'] + if config.get("fee", None) is not None: + self.fee = config["fee"] + logger.info(f"Using fee {self.fee:.4%} from config.") else: - self.fee = self.exchange.get_fee(symbol=self.pairlists.whitelist[0]) + fees = [ + self.exchange.get_fee( + symbol=self.pairlists.whitelist[0], + taker_or_maker=mt, # type: ignore + ) + for mt in ("taker", "maker") + ] + self.fee = max(fee for fee in fees if fee is not None) + logger.info(f"Using fee {self.fee:.4%} - worst case fee from exchange (lowest tier).") self.precision_mode = self.exchange.precisionMode - if self.config.get('freqai_backtest_live_models', False): + if self.config.get("freqai_backtest_live_models", False): from freqtrade.freqai.utils import get_timerange_backtest_live_models - self.config['timerange'] = get_timerange_backtest_live_models(self.config) + + self.config["timerange"] = get_timerange_backtest_live_models(self.config) self.timerange = TimeRange.parse_timerange( - None if self.config.get('timerange') is None else str(self.config.get('timerange'))) + None if self.config.get("timerange") is None else str(self.config.get("timerange")) + ) # Get maximum required startup period self.required_startup = max([strat.startup_candle_count for strat in self.strategylist]) self.exchange.validate_required_startup_candles(self.required_startup, self.timeframe) # Add maximum startup candle count to configuration for informative pairs support - self.config['startup_candle_count'] = self.required_startup + self.config["startup_candle_count"] = self.required_startup - if self.config.get('freqai', {}).get('enabled', False): + if self.config.get("freqai", {}).get("enabled", False): # For FreqAI, increase the required_startup to includes the training data # This value should NOT be written to startup_candle_count self.required_startup = self.dataprovider.get_required_startup(self.timeframe) - self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) + self.trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT) # strategies which define "can_short=True" will fail to load in Spot mode. self._can_short = self.trading_mode != TradingMode.SPOT - self._position_stacking: bool = self.config.get('position_stacking', False) - self.enable_protections: bool = self.config.get('enable_protections', False) + self._position_stacking: bool = self.config.get("position_stacking", False) + self.enable_protections: bool = self.config.get("enable_protections", False) migrate_data(config, self.exchange) self.init_backtest() def _validate_pairlists_for_backtesting(self): - if 'VolumePairList' in self.pairlists.name_list: - raise OperationalException("VolumePairList not allowed for backtesting. " - "Please use StaticPairList instead.") - if 'PerformanceFilter' in self.pairlists.name_list: + if "VolumePairList" in self.pairlists.name_list: + raise OperationalException( + "VolumePairList not allowed for backtesting. Please use StaticPairList instead." + ) + if "PerformanceFilter" in self.pairlists.name_list: raise OperationalException("PerformanceFilter not allowed for backtesting.") - if len(self.strategylist) > 1 and 'PrecisionFilter' in self.pairlists.name_list: + if len(self.strategylist) > 1 and "PrecisionFilter" in self.pairlists.name_list: raise OperationalException( "PrecisionFilter not allowed for backtesting multiple strategies." ) @@ -185,13 +232,14 @@ class Backtesting: def init_backtest_detail(self) -> None: # Load detail timeframe if specified - self.timeframe_detail = str(self.config.get('timeframe_detail', '')) + self.timeframe_detail = str(self.config.get("timeframe_detail", "")) if self.timeframe_detail: timeframe_detail_secs = timeframe_to_seconds(self.timeframe_detail) self.timeframe_detail_td = timedelta(seconds=timeframe_detail_secs) if self.timeframe_secs <= timeframe_detail_secs: raise OperationalException( - "Detail timeframe must be smaller than strategy timeframe.") + "Detail timeframe must be smaller than strategy timeframe." + ) else: self.timeframe_detail_td = timedelta(seconds=0) @@ -199,7 +247,6 @@ class Backtesting: self.futures_data: Dict[str, DataFrame] = {} def init_backtest(self): - self.prepare_backtest(False) self.wallets = Wallets(self.config, self.exchange, is_backtest=True) @@ -218,18 +265,18 @@ class Backtesting: # Set stoploss_on_exchange to false for backtesting, # since a "perfect" stoploss-exit is assumed anyway # And the regular "stoploss" function would not apply to that case - self.strategy.order_types['stoploss_on_exchange'] = False + self.strategy.order_types["stoploss_on_exchange"] = False # Update can_short flag self._can_short = self.trading_mode != TradingMode.SPOT and strategy.can_short self.strategy.ft_bot_start() def _load_protections(self, strategy: IStrategy): - if self.config.get('enable_protections', False): + if self.config.get("enable_protections", False): conf = self.config - if hasattr(strategy, 'protections'): + if hasattr(strategy, "protections"): conf = deepcopy(conf) - conf['protections'] = strategy.protections + conf["protections"] = strategy.protections self.protections = ProtectionManager(self.config, strategy.protections) def load_bt_data(self) -> Tuple[Dict[str, DataFrame], TimeRange]: @@ -240,25 +287,28 @@ class Backtesting: self.progress.init_step(BacktestState.DATALOAD, 1) data = history.load_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=self.pairlists.whitelist, timeframe=self.timeframe, timerange=self.timerange, startup_candles=self.required_startup, fail_without_data=True, - data_format=self.config['dataformat_ohlcv'], - candle_type=self.config.get('candle_type_def', CandleType.SPOT) + data_format=self.config["dataformat_ohlcv"], + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) min_date, max_date = history.get_timerange(data) - logger.info(f'Loading data from {min_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'({(max_date - min_date).days} days).') + logger.info( + f"Loading data from {min_date.strftime(DATETIME_PRINT_FORMAT)} " + f"up to {max_date.strftime(DATETIME_PRINT_FORMAT)} " + f"({(max_date - min_date).days} days)." + ) # Adjust startts forward if not enough data is available - self.timerange.adjust_start_if_necessary(timeframe_to_seconds(self.timeframe), - self.required_startup, min_date) + self.timerange.adjust_start_if_necessary( + timeframe_to_seconds(self.timeframe), self.required_startup, min_date + ) self.progress.set_new_value(1) return data, self.timerange @@ -269,44 +319,44 @@ class Backtesting: """ if self.timeframe_detail: self.detail_data = history.load_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=self.pairlists.whitelist, timeframe=self.timeframe_detail, timerange=self.timerange, startup_candles=0, fail_without_data=True, - data_format=self.config['dataformat_ohlcv'], - candle_type=self.config.get('candle_type_def', CandleType.SPOT) + data_format=self.config["dataformat_ohlcv"], + candle_type=self.config.get("candle_type_def", CandleType.SPOT), ) else: self.detail_data = {} if self.trading_mode == TradingMode.FUTURES: - self.funding_fee_timeframe: str = self.exchange.get_option('funding_fee_timeframe') + self.funding_fee_timeframe: str = self.exchange.get_option("funding_fee_timeframe") self.funding_fee_timeframe_secs: int = timeframe_to_seconds(self.funding_fee_timeframe) - mark_timeframe: str = self.exchange.get_option('mark_ohlcv_timeframe') + mark_timeframe: str = self.exchange.get_option("mark_ohlcv_timeframe") # Load additional futures data. funding_rates_dict = history.load_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=self.pairlists.whitelist, timeframe=self.funding_fee_timeframe, timerange=self.timerange, startup_candles=0, fail_without_data=True, - data_format=self.config['dataformat_ohlcv'], - candle_type=CandleType.FUNDING_RATE + data_format=self.config["dataformat_ohlcv"], + candle_type=CandleType.FUNDING_RATE, ) # For simplicity, assign to CandleType.Mark (might contain index candles!) mark_rates_dict = history.load_data( - datadir=self.config['datadir'], + datadir=self.config["datadir"], pairs=self.pairlists.whitelist, timeframe=mark_timeframe, timerange=self.timerange, startup_candles=0, fail_without_data=True, - data_format=self.config['dataformat_ohlcv'], - candle_type=CandleType.from_string(self.exchange.get_option("mark_ohlcv_price")) + data_format=self.config["dataformat_ohlcv"], + candle_type=CandleType.from_string(self.exchange.get_option("mark_ohlcv_price")), ) # Combine data to avoid combining the data per trade. unavailable_pairs = [] @@ -318,13 +368,14 @@ class Backtesting: self.futures_data[pair] = self.exchange.combine_funding_and_mark( funding_rates=funding_rates_dict[pair], mark_rates=mark_rates_dict[pair], - futures_funding_rate=self.config.get('futures_funding_rate', None), + futures_funding_rate=self.config.get("futures_funding_rate", None), ) if unavailable_pairs: raise OperationalException( f"Pairs {', '.join(unavailable_pairs)} got no leverage tiers available. " - "It is therefore impossible to backtest with this pair at the moment.") + "It is therefore impossible to backtest with this pair at the moment." + ) else: self.futures_data = {} @@ -379,15 +430,17 @@ class Backtesting: if not pair_data.empty: # Cleanup from prior runs - pair_data.drop(HEADERS[5:] + ['buy', 'sell'], axis=1, errors='ignore') - df_analyzed = self.strategy.ft_advise_signals(pair_data, {'pair': pair}) + pair_data.drop(HEADERS[5:] + ["buy", "sell"], axis=1, errors="ignore") + df_analyzed = self.strategy.ft_advise_signals(pair_data, {"pair": pair}) # Update dataprovider cache self.dataprovider._set_cached_df( - pair, self.timeframe, df_analyzed, self.config['candle_type_def']) + pair, self.timeframe, df_analyzed, self.config["candle_type_def"] + ) # Trim startup period from analyzed dataframe df_analyzed = processed[pair] = pair_data = trim_dataframe( - df_analyzed, self.timerange, startup_candles=self.required_startup) + df_analyzed, self.timerange, startup_candles=self.required_startup + ) # Create a copy of the dataframe before shifting, that way the entry signal/tag # remains on the correct candle for callbacks. @@ -396,10 +449,13 @@ class Backtesting: # To avoid using data from future, we use entry/exit signals shifted # from the previous candle for col in HEADERS[5:]: - tag_col = col in ('enter_tag', 'exit_tag') + tag_col = col in ("enter_tag", "exit_tag") if col in df_analyzed.columns: - df_analyzed[col] = df_analyzed.loc[:, col].replace( - [nan], [0 if not tag_col else None]).shift(1) + df_analyzed[col] = ( + df_analyzed.loc[:, col] + .replace([nan], [0 if not tag_col else None]) + .shift(1) + ) elif not df_analyzed.empty: df_analyzed[col] = 0 if not tag_col else None @@ -410,22 +466,27 @@ class Backtesting: data[pair] = df_analyzed[HEADERS].values.tolist() if not df_analyzed.empty else [] return data - def _get_close_rate(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, - trade_dur: int) -> float: + def _get_close_rate( + self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, trade_dur: int + ) -> float: """ Get close rate for backtesting result """ # Special handling if high or low hit STOP_LOSS or ROI if exit.exit_type in ( - ExitType.STOP_LOSS, ExitType.TRAILING_STOP_LOSS, ExitType.LIQUIDATION): + ExitType.STOP_LOSS, + ExitType.TRAILING_STOP_LOSS, + ExitType.LIQUIDATION, + ): return self._get_close_rate_for_stoploss(row, trade, exit, trade_dur) elif exit.exit_type == (ExitType.ROI): return self._get_close_rate_for_roi(row, trade, exit, trade_dur) else: return row[OPEN_IDX] - def _get_close_rate_for_stoploss(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, - trade_dur: int) -> float: + def _get_close_rate_for_stoploss( + self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, trade_dur: int + ) -> float: # our stoploss was already lower than candle high, # possibly due to a cancelled trade exit. # exit at open price. @@ -449,19 +510,23 @@ class Backtesting: # immediately going down to stop price. if exit.exit_type == ExitType.TRAILING_STOP_LOSS and trade_dur == 0: if ( - not self.strategy.use_custom_stoploss and self.strategy.trailing_stop + not self.strategy.use_custom_stoploss + and self.strategy.trailing_stop and self.strategy.trailing_only_offset_is_reached and self.strategy.trailing_stop_positive_offset is not None and self.strategy.trailing_stop_positive ): # Worst case: price reaches stop_positive_offset and dives down. - stop_rate = (row[OPEN_IDX] * - (1 + side_1 * abs(self.strategy.trailing_stop_positive_offset) - - side_1 * abs(self.strategy.trailing_stop_positive / leverage))) + stop_rate = row[OPEN_IDX] * ( + 1 + + side_1 * abs(self.strategy.trailing_stop_positive_offset) + - side_1 * abs(self.strategy.trailing_stop_positive / leverage) + ) else: # Worst case: price ticks tiny bit above open and dives down. - stop_rate = row[OPEN_IDX] * (1 - side_1 * abs( - (trade.stop_loss_pct or 0.0) / leverage)) + stop_rate = row[OPEN_IDX] * ( + 1 - side_1 * abs((trade.stop_loss_pct or 0.0) / leverage) + ) # Limit lower-end to candle low to avoid exits below the low. # This still remains "worst case" - but "worst realistic case". @@ -473,8 +538,9 @@ class Backtesting: # Set close_rate to stoploss return stoploss_value - def _get_close_rate_for_roi(self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, - trade_dur: int) -> float: + def _get_close_rate_for_roi( + self, row: Tuple, trade: LocalTrade, exit: ExitCheckTuple, trade_dur: int + ) -> float: is_short = trade.is_short or False leverage = trade.leverage or 1.0 side_1 = -1 if is_short else 1 @@ -494,14 +560,17 @@ class Backtesting: is_new_roi = row[OPEN_IDX] < close_rate else: is_new_roi = row[OPEN_IDX] > close_rate - if (trade_dur > 0 and trade_dur == roi_entry - and roi_entry % self.timeframe_min == 0 - and is_new_roi): + if ( + trade_dur > 0 + and trade_dur == roi_entry + and roi_entry % self.timeframe_min == 0 + and is_new_roi + ): # new ROI entry came into effect. # use Open rate if open_rate > calculated exit rate return row[OPEN_IDX] - if (trade_dur == 0 and ( + if trade_dur == 0 and ( ( is_short # Red candle (for longs) @@ -509,15 +578,14 @@ class Backtesting: and trade.open_rate > row[OPEN_IDX] # trade-open above open_rate and close_rate < row[CLOSE_IDX] # closes below close ) - or - ( + or ( not is_short # green candle (for shorts) and row[OPEN_IDX] > row[CLOSE_IDX] # green candle and trade.open_rate < row[OPEN_IDX] # trade-open below open_rate and close_rate > row[CLOSE_IDX] # closes above close ) - )): + ): # ROI on opening candles with custom pricing can only # trigger if the entry was at Open or lower wick. # details: https: // github.com/freqtrade/freqtrade/issues/6261 @@ -534,7 +602,7 @@ class Backtesting: return row[OPEN_IDX] def _get_adjust_trade_entry_for_candle( - self, trade: LocalTrade, row: Tuple, current_time: datetime + self, trade: LocalTrade, row: Tuple, current_time: datetime ) -> LocalTrade: current_rate: float = row[OPEN_IDX] current_profit = trade.calc_profit_ratio(current_rate) @@ -543,11 +611,15 @@ class Backtesting: stake_available = self.wallets.get_available_stake_amount() stake_amount, order_tag = self.strategy._adjust_trade_position_internal( trade=trade, # type: ignore[arg-type] - current_time=current_time, current_rate=current_rate, - current_profit=current_profit, min_stake=min_stake, + current_time=current_time, + current_rate=current_rate, + current_profit=current_profit, + min_stake=min_stake, max_stake=min(max_stake, stake_available), - current_entry_rate=current_rate, current_exit_rate=current_rate, - current_entry_profit=current_profit, current_exit_profit=current_profit + current_entry_rate=current_rate, + current_exit_rate=current_rate, + current_entry_profit=current_profit, + current_exit_profit=current_profit, ) # Check if we should increase our position @@ -555,20 +627,33 @@ class Backtesting: check_adjust_entry = True if self.strategy.max_entry_position_adjustment > -1: entry_count = trade.nr_of_successful_entries - check_adjust_entry = (entry_count <= self.strategy.max_entry_position_adjustment) + check_adjust_entry = entry_count <= self.strategy.max_entry_position_adjustment if check_adjust_entry: pos_trade = self._enter_trade( - trade.pair, row, 'short' if trade.is_short else 'long', stake_amount, trade, - entry_tag1=order_tag) + trade.pair, + row, + "short" if trade.is_short else "long", + stake_amount, + trade, + entry_tag1=order_tag, + ) if pos_trade is not None: self.wallets.update() return pos_trade if stake_amount is not None and stake_amount < 0.0: amount = amount_to_contract_precision( - abs(stake_amount * trade.amount / trade.stake_amount), + abs( + float( + FtPrecise(stake_amount) + * FtPrecise(trade.amount) + / FtPrecise(trade.stake_amount) + ) + ), trade.amount_precision, - self.precision_mode, trade.contract_size) + self.precision_mode, + trade.contract_size, + ) if amount == 0.0: return trade remaining = (trade.amount - amount) * current_rate @@ -579,25 +664,30 @@ class Backtesting: pos_trade = self._get_exit_for_signal(trade, row, exit_, current_time, amount) if pos_trade is not None: order = pos_trade.orders[-1] - if self._try_close_open_order(order, trade, current_time, row): - trade.recalc_trade_from_orders() - self.wallets.update() + # If the order was filled and for the full trade amount, we need to close the trade. + self._process_exit_order(order, pos_trade, current_time, row, trade.pair) return pos_trade return trade def _get_order_filled(self, rate: float, row: Tuple) -> bool: - """ Rate is within candle, therefore filled""" + """Rate is within candle, therefore filled""" return row[LOW_IDX] <= rate <= row[HIGH_IDX] def _call_adjust_stop(self, current_date: datetime, trade: LocalTrade, current_rate: float): profit = trade.calc_profit_ratio(current_rate) - self.strategy.ft_stoploss_adjust(current_rate, trade, # type: ignore - current_date, profit, 0, after_fill=True) + self.strategy.ft_stoploss_adjust( + current_rate, + trade, # type: ignore + current_date, + profit, + 0, + after_fill=True, + ) def _try_close_open_order( - self, order: Optional[Order], trade: LocalTrade, current_date: datetime, - row: Tuple) -> bool: + self, order: Optional[Order], trade: LocalTrade, current_date: datetime, row: Tuple + ) -> bool: """ Check if an order is open and if it should've filled. :return: True if the order filled. @@ -605,33 +695,58 @@ class Backtesting: if order and self._get_order_filled(order.ft_price, row): order.close_bt_order(current_date, trade) self._run_funding_fees(trade, current_date, force=True) - strategy_safe_wrapper( - self.strategy.order_filled, - default_retval=None)( - pair=trade.pair, trade=trade, # type: ignore[arg-type] - order=order, current_time=current_date) + strategy_safe_wrapper(self.strategy.order_filled, default_retval=None)( + pair=trade.pair, + trade=trade, # type: ignore[arg-type] + order=order, + current_time=current_date, + ) if not (order.ft_order_side == trade.exit_side and order.safe_amount == trade.amount): # trade is still open - trade.set_liquidation_price(self.exchange.get_liquidation_price( - pair=trade.pair, - open_rate=trade.open_rate, - is_short=trade.is_short, - amount=trade.amount, - stake_amount=trade.stake_amount, - leverage=trade.leverage, - wallet_balance=trade.stake_amount, - )) + trade.set_liquidation_price( + self.exchange.get_liquidation_price( + pair=trade.pair, + open_rate=trade.open_rate, + is_short=trade.is_short, + amount=trade.amount, + stake_amount=trade.stake_amount, + leverage=trade.leverage, + wallet_balance=trade.stake_amount, + ) + ) self._call_adjust_stop(current_date, trade, order.ft_price) # pass return True return False - def _get_exit_for_signal( - self, trade: LocalTrade, row: Tuple, exit_: ExitCheckTuple, - current_time: datetime, - amount: Optional[float] = None) -> Optional[LocalTrade]: + def _process_exit_order( + self, order: Order, trade: LocalTrade, current_time: datetime, row: Tuple, pair: str + ): + """ + Takes an exit order and processes it, potentially closing the trade. + """ + if self._try_close_open_order(order, trade, current_time, row): + sub_trade = order.safe_amount_after_fee != trade.amount + if sub_trade: + trade.recalc_trade_from_orders() + else: + trade.close_date = current_time + trade.close(order.ft_price, show_msg=False) + # logger.debug(f"{pair} - Backtesting exit {trade}") + LocalTrade.close_bt_trade(trade) + self.wallets.update() + self.run_protections(pair, current_time, trade.trade_direction) + + def _get_exit_for_signal( + self, + trade: LocalTrade, + row: Tuple, + exit_: ExitCheckTuple, + current_time: datetime, + amount: Optional[float] = None, + ) -> Optional[LocalTrade]: if exit_.exit_flag: trade.close_date = current_time exit_reason = exit_.exit_reason @@ -643,9 +758,12 @@ class Backtesting: return None # call the custom exit price,with default value as previous close_rate current_profit = trade.calc_profit_ratio(close_rate) - order_type = self.strategy.order_types['exit'] - if exit_.exit_type in (ExitType.EXIT_SIGNAL, ExitType.CUSTOM_EXIT, - ExitType.PARTIAL_EXIT): + order_type = self.strategy.order_types["exit"] + if exit_.exit_type in ( + ExitType.EXIT_SIGNAL, + ExitType.CUSTOM_EXIT, + ExitType.PARTIAL_EXIT, + ): # Checks and adds an exit tag, after checking that the length of the # row has the length for an exit tag column if ( @@ -656,17 +774,21 @@ class Backtesting: ): exit_reason = row[EXIT_TAG_IDX] # Custom exit pricing only for exit-signals - if order_type == 'limit': - rate = strategy_safe_wrapper(self.strategy.custom_exit_price, - default_retval=close_rate)( + if order_type == "limit": + rate = strategy_safe_wrapper( + self.strategy.custom_exit_price, default_retval=close_rate + )( pair=trade.pair, trade=trade, # type: ignore[arg-type] current_time=current_time, - proposed_rate=close_rate, current_profit=current_profit, - exit_tag=exit_reason) + proposed_rate=close_rate, + current_profit=current_profit, + exit_tag=exit_reason, + ) if rate is not None and rate != close_rate: - close_rate = price_to_precision(rate, trade.price_precision, - self.precision_mode) + close_rate = price_to_precision( + rate, trade.price_precision, self.precision_mode + ) # We can't place orders lower than current low. # freqtrade does not support this in live, and the order would fill immediately if trade.is_short: @@ -674,20 +796,22 @@ class Backtesting: else: close_rate = max(close_rate, row[LOW_IDX]) # Confirm trade exit: - time_in_force = self.strategy.order_time_in_force['exit'] + time_in_force = self.strategy.order_time_in_force["exit"] - if (exit_.exit_type not in (ExitType.LIQUIDATION, ExitType.PARTIAL_EXIT) - and not strategy_safe_wrapper( - self.strategy.confirm_trade_exit, default_retval=True)( - pair=trade.pair, - trade=trade, # type: ignore[arg-type] - order_type=order_type, - amount=amount_, - rate=close_rate, - time_in_force=time_in_force, - sell_reason=exit_reason, # deprecated - exit_reason=exit_reason, - current_time=current_time)): + if exit_.exit_type not in ( + ExitType.LIQUIDATION, + ExitType.PARTIAL_EXIT, + ) and not strategy_safe_wrapper(self.strategy.confirm_trade_exit, default_retval=True)( + pair=trade.pair, + trade=trade, # type: ignore[arg-type] + order_type=order_type, + amount=amount_, + rate=close_rate, + time_in_force=time_in_force, + sell_reason=exit_reason, # deprecated + exit_reason=exit_reason, + current_time=current_time, + ): return None trade.exit_reason = exit_reason @@ -695,14 +819,21 @@ class Backtesting: return self._exit_trade(trade, row, close_rate, amount_, exit_reason) return None - def _exit_trade(self, trade: LocalTrade, sell_row: Tuple, close_rate: float, - amount: float, exit_reason: Optional[str]) -> Optional[LocalTrade]: + def _exit_trade( + self, + trade: LocalTrade, + sell_row: Tuple, + close_rate: float, + amount: float, + exit_reason: Optional[str], + ) -> Optional[LocalTrade]: self.order_id_counter += 1 exit_candle_time = sell_row[DATE_IDX].to_pydatetime() - order_type = self.strategy.order_types['exit'] + order_type = self.strategy.order_types["exit"] # amount = amount or trade.amount - amount = amount_to_contract_precision(amount or trade.amount, trade.amount_precision, - self.precision_mode, trade.contract_size) + amount = amount_to_contract_precision( + amount or trade.amount, trade.amount_precision, self.precision_mode, trade.contract_size + ) order = Order( id=self.order_id_counter, ft_trade_id=trade.id, @@ -730,26 +861,30 @@ class Backtesting: return trade def _check_trade_exit( - self, trade: LocalTrade, row: Tuple, current_time: datetime + self, trade: LocalTrade, row: Tuple, current_time: datetime ) -> Optional[LocalTrade]: - self._run_funding_fees(trade, current_time) # Check if we need to adjust our current positions if self.strategy.position_adjustment_enable: trade = self._get_adjust_trade_entry_for_candle(trade, row, current_time) - enter = row[SHORT_IDX] if trade.is_short else row[LONG_IDX] - exit_sig = row[ESHORT_IDX] if trade.is_short else row[ELONG_IDX] - exits = self.strategy.should_exit( - trade, row[OPEN_IDX], row[DATE_IDX].to_pydatetime(), # type: ignore - enter=enter, exit_=exit_sig, - low=row[LOW_IDX], high=row[HIGH_IDX] - ) - for exit_ in exits: - t = self._get_exit_for_signal(trade, row, exit_, current_time) - if t: - return t + if trade.is_open: + enter = row[SHORT_IDX] if trade.is_short else row[LONG_IDX] + exit_sig = row[ESHORT_IDX] if trade.is_short else row[ELONG_IDX] + exits = self.strategy.should_exit( + trade, # type: ignore + row[OPEN_IDX], + row[DATE_IDX].to_pydatetime(), + enter=enter, + exit_=exit_sig, + low=row[LOW_IDX], + high=row[HIGH_IDX], + ) + for exit_ in exits: + t = self._get_exit_for_signal(trade, row, exit_, current_time) + if t: + return t return None def _run_funding_fees(self, trade: LocalTrade, current_time: datetime, force: bool = False): @@ -757,11 +892,7 @@ class Backtesting: Calculate funding fees if necessary and add them to the trade. """ if self.trading_mode == TradingMode.FUTURES: - - if ( - force - or (current_time.timestamp() % self.funding_fee_timeframe_secs) == 0 - ): + if force or (current_time.timestamp() % self.funding_fee_timeframe_secs) == 0: # Funding fee interval. trade.set_funding_fees( self.exchange.calculate_funding_fees( @@ -769,30 +900,38 @@ class Backtesting: amount=trade.amount, is_short=trade.is_short, open_date=trade.date_last_filled_utc, - close_date=current_time + close_date=current_time, ) ) def get_valid_price_and_stake( - self, pair: str, row: Tuple, propose_rate: float, stake_amount: float, - direction: LongShort, current_time: datetime, entry_tag: Optional[str], - trade: Optional[LocalTrade], order_type: str, price_precision: Optional[float] + self, + pair: str, + row: Tuple, + propose_rate: float, + stake_amount: float, + direction: LongShort, + current_time: datetime, + entry_tag: Optional[str], + trade: Optional[LocalTrade], + order_type: str, + price_precision: Optional[float], ) -> Tuple[float, float, float, float]: - - if order_type == 'limit': - new_rate = strategy_safe_wrapper(self.strategy.custom_entry_price, - default_retval=propose_rate)( + if order_type == "limit": + new_rate = strategy_safe_wrapper( + self.strategy.custom_entry_price, default_retval=propose_rate + )( pair=pair, trade=trade, # type: ignore[arg-type] current_time=current_time, - proposed_rate=propose_rate, entry_tag=entry_tag, + proposed_rate=propose_rate, + entry_tag=entry_tag, side=direction, ) # default value is the open rate # We can't place orders higher than current high (otherwise it'd be a stop limit entry) # which freqtrade does not support in live. if new_rate is not None and new_rate != propose_rate: - propose_rate = price_to_precision(new_rate, price_precision, - self.precision_mode) + propose_rate = price_to_precision(new_rate, price_precision, self.precision_mode) if direction == "short": propose_rate = max(propose_rate, row[LOW_IDX]) else: @@ -803,53 +942,75 @@ class Backtesting: if not pos_adjust: try: stake_amount = self.wallets.get_trade_stake_amount( - pair, self.strategy.max_open_trades, update=False) + pair, self.strategy.max_open_trades, update=False + ) except DependencyException: return 0, 0, 0, 0 max_leverage = self.exchange.get_max_leverage(pair, stake_amount) - leverage = strategy_safe_wrapper(self.strategy.leverage, default_retval=1.0)( - pair=pair, - current_time=current_time, - current_rate=row[OPEN_IDX], - proposed_leverage=1.0, - max_leverage=max_leverage, - side=direction, entry_tag=entry_tag, - ) if self.trading_mode != TradingMode.SPOT else 1.0 + leverage = ( + strategy_safe_wrapper(self.strategy.leverage, default_retval=1.0)( + pair=pair, + current_time=current_time, + current_rate=row[OPEN_IDX], + proposed_leverage=1.0, + max_leverage=max_leverage, + side=direction, + entry_tag=entry_tag, + ) + if self.trading_mode != TradingMode.SPOT + else 1.0 + ) # Cap leverage between 1.0 and max_leverage. leverage = min(max(leverage, 1.0), max_leverage) - min_stake_amount = self.exchange.get_min_pair_stake_amount( - pair, propose_rate, -0.05 if not pos_adjust else 0.0, leverage=leverage) or 0 + min_stake_amount = ( + self.exchange.get_min_pair_stake_amount( + pair, propose_rate, -0.05 if not pos_adjust else 0.0, leverage=leverage + ) + or 0 + ) max_stake_amount = self.exchange.get_max_pair_stake_amount( - pair, propose_rate, leverage=leverage) + pair, propose_rate, leverage=leverage + ) stake_available = self.wallets.get_available_stake_amount() if not pos_adjust: - stake_amount = strategy_safe_wrapper(self.strategy.custom_stake_amount, - default_retval=stake_amount)( - pair=pair, current_time=current_time, current_rate=propose_rate, - proposed_stake=stake_amount, min_stake=min_stake_amount, + stake_amount = strategy_safe_wrapper( + self.strategy.custom_stake_amount, default_retval=stake_amount + )( + pair=pair, + current_time=current_time, + current_rate=propose_rate, + proposed_stake=stake_amount, + min_stake=min_stake_amount, max_stake=min(stake_available, max_stake_amount), - leverage=leverage, entry_tag=entry_tag, side=direction) + leverage=leverage, + entry_tag=entry_tag, + side=direction, + ) stake_amount_val = self.wallets.validate_stake_amount( pair=pair, stake_amount=stake_amount, min_stake_amount=min_stake_amount, max_stake_amount=max_stake_amount, - trade_amount=trade.stake_amount if trade else None + trade_amount=trade.stake_amount if trade else None, ) return propose_rate, stake_amount_val, leverage, min_stake_amount - def _enter_trade(self, pair: str, row: Tuple, direction: LongShort, - stake_amount: Optional[float] = None, - trade: Optional[LocalTrade] = None, - requested_rate: Optional[float] = None, - requested_stake: Optional[float] = None, - entry_tag1: Optional[str] = None - ) -> Optional[LocalTrade]: + def _enter_trade( + self, + pair: str, + row: Tuple, + direction: LongShort, + stake_amount: Optional[float] = None, + trade: Optional[LocalTrade] = None, + requested_rate: Optional[float] = None, + requested_stake: Optional[float] = None, + entry_tag1: Optional[str] = None, + ) -> Optional[LocalTrade]: """ :param trade: Trade to adjust - initial entry if None :param requested_rate: Adjusted entry rate @@ -859,15 +1020,23 @@ class Backtesting: current_time = row[DATE_IDX].to_pydatetime() entry_tag = entry_tag1 or (row[ENTER_TAG_IDX] if len(row) >= ENTER_TAG_IDX + 1 else None) # let's call the custom entry price, using the open price as default price - order_type = self.strategy.order_types['entry'] + order_type = self.strategy.order_types["entry"] pos_adjust = trade is not None and requested_rate is None stake_amount_ = stake_amount or (trade.stake_amount if trade else 0.0) precision_price = self.exchange.get_precision_price(pair) propose_rate, stake_amount, leverage, min_stake_amount = self.get_valid_price_and_stake( - pair, row, row[OPEN_IDX], stake_amount_, direction, current_time, entry_tag, trade, - order_type, precision_price, + pair, + row, + row[OPEN_IDX], + stake_amount_, + direction, + current_time, + entry_tag, + trade, + order_type, + precision_price, ) # replace proposed rate if another rate was requested @@ -878,7 +1047,7 @@ class Backtesting: # In case of pos adjust, still return the original trade # If not pos adjust, trade is None return trade - time_in_force = self.strategy.order_time_in_force['entry'] + time_in_force = self.strategy.order_time_in_force["entry"] if stake_amount and (not min_stake_amount or stake_amount >= min_stake_amount): self.order_id_counter += 1 @@ -887,8 +1056,9 @@ class Backtesting: contract_size = self.exchange.get_contract_size(pair) precision_amount = self.exchange.get_precision_amount(pair) - amount = amount_to_contract_precision(amount_p, precision_amount, self.precision_mode, - contract_size) + amount = amount_to_contract_precision( + amount_p, precision_amount, self.precision_mode, contract_size + ) if not amount: # No amount left after truncating to precision. return trade @@ -898,13 +1068,20 @@ class Backtesting: if not pos_adjust: # Confirm trade entry: if not strategy_safe_wrapper( - self.strategy.confirm_trade_entry, default_retval=True)( - pair=pair, order_type=order_type, amount=amount, rate=propose_rate, - time_in_force=time_in_force, current_time=current_time, - entry_tag=entry_tag, side=direction): + self.strategy.confirm_trade_entry, default_retval=True + )( + pair=pair, + order_type=order_type, + amount=amount, + rate=propose_rate, + time_in_force=time_in_force, + current_time=current_time, + entry_tag=entry_tag, + side=direction, + ): return trade - is_short = (direction == 'short') + is_short = direction == "short" # Necessary for Margin trading. Disabled until support is enabled. # interest_rate = self.exchange.get_interest_rate() @@ -915,7 +1092,7 @@ class Backtesting: id=self.trade_id_counter, pair=pair, base_currency=base_currency, - stake_currency=self.config['stake_currency'], + stake_currency=self.config["stake_currency"], open_rate=propose_rate, open_rate_requested=propose_rate, open_date=current_time, @@ -937,6 +1114,7 @@ class Backtesting: contract_size=contract_size, orders=[], ) + LocalTrade.add_bt_trade(trade) trade.adjust_stop_loss(trade.open_rate, self.strategy.stoploss, initial=True) @@ -970,8 +1148,9 @@ class Backtesting: return trade - def handle_left_open(self, open_trades: Dict[str, List[LocalTrade]], - data: Dict[str, List[Tuple]]) -> None: + def handle_left_open( + self, open_trades: Dict[str, List[LocalTrade]], data: Dict[str, List[Tuple]] + ) -> None: """ Handling of left open trades at the end of backtesting """ @@ -981,8 +1160,9 @@ class Backtesting: # Ignore trade if entry-order did not fill yet continue exit_row = data[pair][-1] - self._exit_trade(trade, exit_row, exit_row[OPEN_IDX], trade.amount, - ExitType.FORCE_EXIT.value) + self._exit_trade( + trade, exit_row, exit_row[OPEN_IDX], trade.amount, ExitType.FORCE_EXIT.value + ) trade.orders[-1].close_bt_order(exit_row[DATE_IDX].to_pydatetime(), trade) trade.close_date = exit_row[DATE_IDX].to_pydatetime() @@ -1007,10 +1187,10 @@ class Backtesting: if enter_long == 1 and not any([exit_long, enter_short]): # Long - return 'long' + return "long" if enter_short == 1 and not any([exit_short, enter_long]): # Short - return 'short' + return "short" return None def run_protections(self, pair: str, current_time: datetime, side: LongShort): @@ -1036,7 +1216,8 @@ class Backtesting: return False def check_order_cancel( - self, trade: LocalTrade, order: Order, current_time: datetime) -> Optional[bool]: + self, trade: LocalTrade, order: Order, current_time: datetime + ) -> Optional[bool]: """ Check if current analyzed order has to be canceled. Returns True if the trade should be Deleted (initial order was canceled), @@ -1045,7 +1226,9 @@ class Backtesting: """ timedout = self.strategy.ft_check_timed_out( trade, # type: ignore[arg-type] - order, current_time) + order, + current_time, + ) if timedout: if order.side == trade.entry_side: self.timedout_entry_orders += 1 @@ -1063,8 +1246,9 @@ class Backtesting: return False return None - def check_order_replace(self, trade: LocalTrade, order: Order, current_time, - row: Tuple) -> bool: + def check_order_replace( + self, trade: LocalTrade, order: Order, current_time, row: Tuple + ) -> bool: """ Check if current analyzed entry order has to be replaced and do so. If user requested cancellation and there are no filled orders in the trade will @@ -1073,12 +1257,17 @@ class Backtesting: """ # only check on new candles for open entry orders if order.side == trade.entry_side and current_time > order.order_date_utc: - requested_rate = strategy_safe_wrapper(self.strategy.adjust_entry_price, - default_retval=order.ft_price)( + requested_rate = strategy_safe_wrapper( + self.strategy.adjust_entry_price, default_retval=order.ft_price + )( trade=trade, # type: ignore[arg-type] - order=order, pair=trade.pair, current_time=current_time, - proposed_rate=row[OPEN_IDX], current_order_rate=order.ft_price, - entry_tag=trade.enter_tag, side=trade.trade_direction + order=order, + pair=trade.pair, + current_time=current_time, + proposed_rate=row[OPEN_IDX], + current_order_rate=order.ft_price, + entry_tag=trade.enter_tag, + side=trade.trade_direction, ) # default value is current order price # cancel existing order whenever a new rate is requested (or None) @@ -1091,22 +1280,26 @@ class Backtesting: # place new order if result was not None if requested_rate: - self._enter_trade(pair=trade.pair, row=row, trade=trade, - requested_rate=requested_rate, - requested_stake=( - order.safe_remaining * order.ft_price / trade.leverage), - direction='short' if trade.is_short else 'long') + self._enter_trade( + pair=trade.pair, + row=row, + trade=trade, + requested_rate=requested_rate, + requested_stake=(order.safe_remaining * order.ft_price / trade.leverage), + direction="short" if trade.is_short else "long", + ) # Delete trade if no successful entries happened (if placing the new order failed) if not trade.has_open_orders and trade.nr_of_successful_entries == 0: return True self.replaced_entry_orders += 1 else: # assumption: there can't be multiple open entry orders at any given time - return (trade.nr_of_successful_entries == 0) + return trade.nr_of_successful_entries == 0 return False def validate_row( - self, data: Dict, pair: str, row_index: int, current_time: datetime) -> Optional[Tuple]: + self, data: Dict, pair: str, row_index: int, current_time: datetime + ) -> Optional[Tuple]: try: # Row is treated as "current incomplete candle". # entry / exit signals are shifted by 1 to compensate for this. @@ -1127,16 +1320,24 @@ class Backtesting: """ # It could be fun to enable hyperopt mode to write # a loss function to reduce rejected signals - if (self.config.get('export', 'none') == 'signals' and - self.dataprovider.runmode == RunMode.BACKTEST): + if ( + self.config.get("export", "none") == "signals" + and self.dataprovider.runmode == RunMode.BACKTEST + ): if pair not in self.rejected_dict: self.rejected_dict[pair] = [] self.rejected_dict[pair].append([row[DATE_IDX], row[ENTER_TAG_IDX]]) def backtest_loop( - self, row: Tuple, pair: str, current_time: datetime, end_date: datetime, - open_trade_count_start: int, trade_dir: Optional[LongShort], - is_first: bool = True) -> int: + self, + row: Tuple, + pair: str, + current_time: datetime, + end_date: datetime, + open_trade_count_start: int, + trade_dir: Optional[LongShort], + is_first: bool = True, + ) -> int: """ NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized. @@ -1162,15 +1363,13 @@ class Backtesting: and trade_dir is not None and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir) ): - if (self.trade_slot_available(open_trade_count_start)): + if self.trade_slot_available(open_trade_count_start): trade = self._enter_trade(pair, row, trade_dir) if trade: # TODO: hacky workaround to avoid opening > max_open_trades # This emulates previous behavior - not sure if this is correct # Prevents entering if the trade-slot was freed in this candle open_trade_count_start += 1 - # logger.debug(f"{pair} - Emulate creation of new trade: {trade}.") - LocalTrade.add_bt_trade(trade) self.wallets.update() else: self._collate_rejected(pair, row) @@ -1187,22 +1386,11 @@ class Backtesting: # 5. Process exit orders. order = trade.select_order(trade.exit_side, is_open=True) - if order and self._try_close_open_order(order, trade, current_time, row): - sub_trade = order.safe_amount_after_fee != trade.amount - if sub_trade: - trade.recalc_trade_from_orders() - else: - trade.close_date = current_time - trade.close(order.ft_price, show_msg=False) - - # logger.debug(f"{pair} - Backtesting exit {trade}") - LocalTrade.close_bt_trade(trade) - self.wallets.update() - self.run_protections(pair, current_time, trade.trade_direction) + if order: + self._process_exit_order(order, trade, current_time, row, pair) return open_trade_count_start - def backtest(self, processed: Dict, - start_date: datetime, end_date: datetime) -> Dict[str, Any]: + def backtest(self, processed: Dict, start_date: datetime, end_date: datetime) -> Dict[str, Any]: """ Implement backtesting functionality @@ -1227,14 +1415,16 @@ class Backtesting: indexes: Dict = defaultdict(int) current_time = start_date + self.timeframe_td - self.progress.init_step(BacktestState.BACKTEST, int( - (end_date - start_date) / self.timeframe_td)) + self.progress.init_step( + BacktestState.BACKTEST, int((end_date - start_date) / self.timeframe_td) + ) # Loop timerange and get candle for each pair at that point in time while current_time <= end_date: open_trade_count_start = LocalTrade.bt_open_open_trade_count self.check_abort() strategy_safe_wrapper(self.strategy.bot_loop_start, supress_error=True)( - current_time=current_time) + current_time=current_time + ) for i, pair in enumerate(data): row_index = indexes[pair] row = self.validate_row(data, pair, row_index, current_time) @@ -1250,7 +1440,8 @@ class Backtesting: if ( (trade_dir is not None or len(LocalTrade.bt_trades_open_pp[pair]) > 0) - and self.timeframe_detail and pair in self.detail_data + and self.timeframe_detail + and pair in self.detail_data ): # Spread out into detail timeframe. # Should only happen when we are either in a trade for this pair @@ -1259,35 +1450,41 @@ class Backtesting: detail_data = self.detail_data[pair] detail_data = detail_data.loc[ - (detail_data['date'] >= current_detail_time) & - (detail_data['date'] < exit_candle_end) + (detail_data["date"] >= current_detail_time) + & (detail_data["date"] < exit_candle_end) ].copy() if len(detail_data) == 0: # Fall back to "regular" data if no detail data was found for this candle open_trade_count_start = self.backtest_loop( - row, pair, current_time, end_date, - open_trade_count_start, trade_dir) + row, pair, current_time, end_date, open_trade_count_start, trade_dir + ) continue - detail_data.loc[:, 'enter_long'] = row[LONG_IDX] - detail_data.loc[:, 'exit_long'] = row[ELONG_IDX] - detail_data.loc[:, 'enter_short'] = row[SHORT_IDX] - detail_data.loc[:, 'exit_short'] = row[ESHORT_IDX] - detail_data.loc[:, 'enter_tag'] = row[ENTER_TAG_IDX] - detail_data.loc[:, 'exit_tag'] = row[EXIT_TAG_IDX] + detail_data.loc[:, "enter_long"] = row[LONG_IDX] + detail_data.loc[:, "exit_long"] = row[ELONG_IDX] + detail_data.loc[:, "enter_short"] = row[SHORT_IDX] + detail_data.loc[:, "exit_short"] = row[ESHORT_IDX] + detail_data.loc[:, "enter_tag"] = row[ENTER_TAG_IDX] + detail_data.loc[:, "exit_tag"] = row[EXIT_TAG_IDX] is_first = True current_time_det = current_time for det_row in detail_data[HEADERS].values.tolist(): self.dataprovider._set_dataframe_max_date(current_time_det) open_trade_count_start = self.backtest_loop( - det_row, pair, current_time_det, end_date, - open_trade_count_start, trade_dir, is_first) + det_row, + pair, + current_time_det, + end_date, + open_trade_count_start, + trade_dir, + is_first, + ) current_time_det += self.timeframe_detail_td is_first = False else: self.dataprovider._set_dataframe_max_date(current_time) open_trade_count_start = self.backtest_loop( - row, pair, current_time, end_date, - open_trade_count_start, trade_dir) + row, pair, current_time, end_date, open_trade_count_start, trade_dir + ) # Move time one configured time_interval ahead. self.progress.increment() @@ -1298,20 +1495,21 @@ class Backtesting: results = trade_list_to_dataframe(LocalTrade.trades) return { - 'results': results, - 'config': self.strategy.config, - 'locks': PairLocks.get_all_locks(), - 'rejected_signals': self.rejected_trades, - 'timedout_entry_orders': self.timedout_entry_orders, - 'timedout_exit_orders': self.timedout_exit_orders, - 'canceled_trade_entries': self.canceled_trade_entries, - 'canceled_entry_orders': self.canceled_entry_orders, - 'replaced_entry_orders': self.replaced_entry_orders, - 'final_balance': self.wallets.get_total(self.strategy.config['stake_currency']), + "results": results, + "config": self.strategy.config, + "locks": PairLocks.get_all_locks(), + "rejected_signals": self.rejected_trades, + "timedout_entry_orders": self.timedout_entry_orders, + "timedout_exit_orders": self.timedout_exit_orders, + "canceled_trade_entries": self.canceled_trade_entries, + "canceled_entry_orders": self.canceled_entry_orders, + "replaced_entry_orders": self.replaced_entry_orders, + "final_balance": self.wallets.get_total(self.strategy.config["stake_currency"]), } - def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame], - timerange: TimeRange): + def backtest_one_strategy( + self, strat: IStrategy, data: Dict[str, DataFrame], timerange: TimeRange + ): self.progress.init_step(BacktestState.ANALYZE, 0) strategy_name = strat.get_strategy_name() logger.info(f"Running backtesting for Strategy {strategy_name}") @@ -1319,11 +1517,10 @@ class Backtesting: self._set_strategy(strat) # Use max_open_trades in backtesting, except --disable-max-market-positions is set - if not self.config.get('use_max_market_positions', True): - logger.info( - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...') - self.strategy.max_open_trades = float('inf') - self.config.update({'max_open_trades': self.strategy.max_open_trades}) + if not self.config.get("use_max_market_positions", True): + logger.info("Ignoring max_open_trades (--disable-max-market-positions was used) ...") + self.strategy.max_open_trades = float("inf") + self.config.update({"max_open_trades": self.strategy.max_open_trades}) # need to reprocess data every time to populate signals preprocessed = self.strategy.advise_all_indicators(data) @@ -1333,15 +1530,16 @@ class Backtesting: preprocessed_tmp = trim_dataframes(preprocessed, timerange, self.required_startup) if not preprocessed_tmp: - raise OperationalException( - "No data left after adjusting for startup candles.") + raise OperationalException("No data left after adjusting for startup candles.") # Use preprocessed_tmp for date generation (the trimmed dataframe). # Backtesting will re-trim the dataframes after entry/exit signal generation. min_date, max_date = history.get_timerange(preprocessed_tmp) - logger.info(f'Backtesting with data from {min_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'up to {max_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'({(max_date - min_date).days} days).') + logger.info( + f"Backtesting with data from {min_date.strftime(DATETIME_PRINT_FORMAT)} " + f"up to {max_date.strftime(DATETIME_PRINT_FORMAT)} " + f"({(max_date - min_date).days} days)." + ) # Execute backtest and store results results = self.backtest( processed=preprocessed, @@ -1349,32 +1547,38 @@ class Backtesting: end_date=max_date, ) backtest_end_time = datetime.now(timezone.utc) - results.update({ - 'run_id': self.run_ids.get(strategy_name, ''), - 'backtest_start_time': int(backtest_start_time.timestamp()), - 'backtest_end_time': int(backtest_end_time.timestamp()), - }) + results.update( + { + "run_id": self.run_ids.get(strategy_name, ""), + "backtest_start_time": int(backtest_start_time.timestamp()), + "backtest_end_time": int(backtest_end_time.timestamp()), + } + ) self.all_results[strategy_name] = results - if (self.config.get('export', 'none') == 'signals' and - self.dataprovider.runmode == RunMode.BACKTEST): + if ( + self.config.get("export", "none") == "signals" + and self.dataprovider.runmode == RunMode.BACKTEST + ): self.processed_dfs[strategy_name] = generate_trade_signal_candles( - preprocessed_tmp, results) + preprocessed_tmp, results + ) self.rejected_df[strategy_name] = generate_rejected_signals( - preprocessed_tmp, self.rejected_dict) + preprocessed_tmp, self.rejected_dict + ) return min_date, max_date def _get_min_cached_backtest_date(self): min_backtest_date = None - backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT) + backtest_cache_age = self.config.get("backtest_cache", constants.BACKTEST_CACHE_DEFAULT) if self.timerange.stopts == 0 or self.timerange.stopdt > datetime.now(tz=timezone.utc): - logger.warning('Backtest result caching disabled due to use of open-ended timerange.') - elif backtest_cache_age == 'day': + logger.warning("Backtest result caching disabled due to use of open-ended timerange.") + elif backtest_cache_age == "day": min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(days=1) - elif backtest_cache_age == 'week': + elif backtest_cache_age == "week": min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(weeks=1) - elif backtest_cache_age == 'month': + elif backtest_cache_age == "month": min_backtest_date = datetime.now(tz=timezone.utc) - timedelta(weeks=4) return min_backtest_date @@ -1389,7 +1593,8 @@ class Backtesting: min_backtest_date = self._get_min_cached_backtest_date() if min_backtest_date is not None: self.results = find_existing_backtest_stats( - self.config['user_data_dir'] / 'backtest_results', self.run_ids, min_backtest_date) + self.config["user_data_dir"] / "backtest_results", self.run_ids, min_backtest_date + ) def start(self) -> None: """ @@ -1404,42 +1609,53 @@ class Backtesting: self.load_prior_backtest() for strat in self.strategylist: - if self.results and strat.get_strategy_name() in self.results['strategy']: + if self.results and strat.get_strategy_name() in self.results["strategy"]: # When previous result hash matches - reuse that result and skip backtesting. - logger.info(f'Reusing result of previous backtest for {strat.get_strategy_name()}') + logger.info(f"Reusing result of previous backtest for {strat.get_strategy_name()}") continue min_date, max_date = self.backtest_one_strategy(strat, data, timerange) # Update old results with new ones. if len(self.all_results) > 0: results = generate_backtest_stats( - data, self.all_results, min_date=min_date, max_date=max_date) + data, self.all_results, min_date=min_date, max_date=max_date + ) if self.results: - self.results['metadata'].update(results['metadata']) - self.results['strategy'].update(results['strategy']) - self.results['strategy_comparison'].extend(results['strategy_comparison']) + self.results["metadata"].update(results["metadata"]) + self.results["strategy"].update(results["strategy"]) + self.results["strategy_comparison"].extend(results["strategy_comparison"]) else: self.results = results dt_appendix = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - if self.config.get('export', 'none') in ('trades', 'signals'): + if self.config.get("export", "none") in ("trades", "signals"): combined_res = combined_dataframes_with_rel_mean(data, min_date, max_date) - store_backtest_stats(self.config['exportfilename'], self.results, dt_appendix, - market_change_data=combined_res) + store_backtest_stats( + self.config["exportfilename"], + self.results, + dt_appendix, + market_change_data=combined_res, + ) - if (self.config.get('export', 'none') == 'signals' and - self.dataprovider.runmode == RunMode.BACKTEST): + if ( + self.config.get("export", "none") == "signals" + and self.dataprovider.runmode == RunMode.BACKTEST + ): store_backtest_analysis_results( - self.config['exportfilename'], self.processed_dfs, self.rejected_df, - dt_appendix) + self.config["exportfilename"], self.processed_dfs, self.rejected_df, dt_appendix + ) # Results may be mixed up now. Sort them so they follow --strategy-list order. - if 'strategy_list' in self.config and len(self.results) > 0: - self.results['strategy_comparison'] = sorted( - self.results['strategy_comparison'], - key=lambda c: self.config['strategy_list'].index(c['key'])) - self.results['strategy'] = dict( - sorted(self.results['strategy'].items(), - key=lambda kv: self.config['strategy_list'].index(kv[0]))) + if "strategy_list" in self.config and len(self.results) > 0: + self.results["strategy_comparison"] = sorted( + self.results["strategy_comparison"], + key=lambda c: self.config["strategy_list"].index(c["key"]), + ) + self.results["strategy"] = dict( + sorted( + self.results["strategy"].items(), + key=lambda kv: self.config["strategy_list"].index(kv[0]), + ) + ) if len(self.strategylist) > 0: # Show backtest results diff --git a/freqtrade/optimize/base_analysis.py b/freqtrade/optimize/base_analysis.py index eb0a5e002..2503ede72 100644 --- a/freqtrade/optimize/base_analysis.py +++ b/freqtrade/optimize/base_analysis.py @@ -25,7 +25,6 @@ class VarHolder: class BaseAnalysis: - def __init__(self, config: Dict[str, Any], strategy_obj: Dict): self.failed_bias_check = True self.full_varHolder = VarHolder() @@ -34,7 +33,7 @@ class BaseAnalysis: # pull variables the scope of the lookahead_analysis-instance self.local_config = deepcopy(config) - self.local_config['strategy'] = strategy_obj['name'] + self.local_config["strategy"] = strategy_obj["name"] self.strategy_obj = strategy_obj @staticmethod @@ -46,7 +45,7 @@ class BaseAnalysis: self.full_varHolder = VarHolder() # define datetime in human-readable format - parsed_timerange = TimeRange.parse_timerange(self.local_config['timerange']) + parsed_timerange = TimeRange.parse_timerange(self.local_config["timerange"]) if parsed_timerange.startdt is None: self.full_varHolder.from_dt = datetime.fromtimestamp(0, tz=timezone.utc) @@ -58,9 +57,8 @@ class BaseAnalysis: else: self.full_varHolder.to_dt = parsed_timerange.stopdt - self.prepare_data(self.full_varHolder, self.local_config['pairs']) + self.prepare_data(self.full_varHolder, self.local_config["pairs"]) def start(self) -> None: - # first make a single backtest self.fill_full_varholder() diff --git a/freqtrade/optimize/bt_progress.py b/freqtrade/optimize/bt_progress.py index c3b105915..a49fe0d86 100644 --- a/freqtrade/optimize/bt_progress.py +++ b/freqtrade/optimize/bt_progress.py @@ -25,8 +25,9 @@ class BTProgress: """ Get progress as ratio, capped to be between 0 and 1 (to avoid small calculation errors). """ - return max(min(round(self._progress / self._max_steps, 5) - if self._max_steps > 0 else 0, 1), 0) + return max( + min(round(self._progress / self._max_steps, 5) if self._max_steps > 0 else 0, 1), 0 + ) @property def action(self): diff --git a/freqtrade/optimize/edge_cli.py b/freqtrade/optimize/edge_cli.py index 07c54d720..9bd8ff1c9 100644 --- a/freqtrade/optimize/edge_cli.py +++ b/freqtrade/optimize/edge_cli.py @@ -3,6 +3,7 @@ """ This module contains the edge backtesting interface """ + import logging from freqtrade import constants @@ -30,8 +31,8 @@ class EdgeCli: self.config = config # Ensure using dry-run - self.config['dry_run'] = True - self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT + self.config["dry_run"] = True + self.config["stake_amount"] = constants.UNLIMITED_STAKE_AMOUNT self.exchange = ExchangeResolver.load_exchange(self.config) self.strategy = StrategyResolver.load_strategy(self.config) self.strategy.dp = DataProvider(config, self.exchange) @@ -42,12 +43,13 @@ class EdgeCli: # Set refresh_pairs to false for edge-cli (it must be true for edge) self.edge._refresh_pairs = False - self.edge._timerange = TimeRange.parse_timerange(None if self.config.get( - 'timerange') is None else str(self.config.get('timerange'))) + self.edge._timerange = TimeRange.parse_timerange( + None if self.config.get("timerange") is None else str(self.config.get("timerange")) + ) self.strategy.ft_bot_start() def start(self) -> None: - result = self.edge.calculate(self.config['exchange']['pair_whitelist']) + result = self.edge.calculate(self.config["exchange"]["pair_whitelist"]) if result: - print('') # blank line for readability + print("") # blank line for readability print(generate_edge_table(self.edge._cached_pairs)) diff --git a/freqtrade/optimize/hyperopt.py b/freqtrade/optimize/hyperopt.py index 5d5d15b03..7ae2791bf 100644 --- a/freqtrade/optimize/hyperopt.py +++ b/freqtrade/optimize/hyperopt.py @@ -18,8 +18,15 @@ from colorama import init as colorama_init from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects from joblib.externals import cloudpickle from pandas import DataFrame -from rich.progress import (BarColumn, MofNCompleteColumn, Progress, TaskProgressColumn, TextColumn, - TimeElapsedColumn, TimeRemainingColumn) +from rich.progress import ( + BarColumn, + MofNCompleteColumn, + Progress, + TaskProgressColumn, + TextColumn, + TimeElapsedColumn, + TimeRemainingColumn, +) from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config from freqtrade.data.converter import trim_dataframes @@ -29,11 +36,15 @@ from freqtrade.enums import HyperoptState from freqtrade.exceptions import OperationalException from freqtrade.misc import deep_merge_dicts, file_dump_json, plural from freqtrade.optimize.backtesting import Backtesting + # Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules from freqtrade.optimize.hyperopt_auto import HyperOptAuto from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss -from freqtrade.optimize.hyperopt_tools import (HyperoptStateContainer, HyperoptTools, - hyperopt_serializer) +from freqtrade.optimize.hyperopt_tools import ( + HyperoptStateContainer, + HyperoptTools, + hyperopt_serializer, +) from freqtrade.optimize.optimize_reports import generate_strategy_stats from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver @@ -82,30 +93,36 @@ class Hyperopt: self.backtesting = Backtesting(self.config) self.pairlist = self.backtesting.pairlists.whitelist self.custom_hyperopt: HyperOptAuto - self.analyze_per_epoch = self.config.get('analyze_per_epoch', False) + self.analyze_per_epoch = self.config.get("analyze_per_epoch", False) HyperoptStateContainer.set_state(HyperoptState.STARTUP) - if not self.config.get('hyperopt'): + if not self.config.get("hyperopt"): self.custom_hyperopt = HyperOptAuto(self.config) else: raise OperationalException( "Using separate Hyperopt files has been removed in 2021.9. Please convert " - "your existing Hyperopt file to the new Hyperoptable strategy interface") + "your existing Hyperopt file to the new Hyperoptable strategy interface" + ) self.backtesting._set_strategy(self.backtesting.strategylist[0]) self.custom_hyperopt.strategy = self.backtesting.strategy self.hyperopt_pickle_magic(self.backtesting.strategy.__class__.__bases__) self.custom_hyperoptloss: IHyperOptLoss = HyperOptLossResolver.load_hyperoptloss( - self.config) + self.config + ) self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") - strategy = str(self.config['strategy']) - self.results_file: Path = (self.config['user_data_dir'] / 'hyperopt_results' / - f'strategy_{strategy}_{time_now}.fthypt') - self.data_pickle_file = (self.config['user_data_dir'] / - 'hyperopt_results' / 'hyperopt_tickerdata.pkl') - self.total_epochs = config.get('epochs', 0) + strategy = str(self.config["strategy"]) + self.results_file: Path = ( + self.config["user_data_dir"] + / "hyperopt_results" + / f"strategy_{strategy}_{time_now}.fthypt" + ) + self.data_pickle_file = ( + self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl" + ) + self.total_epochs = config.get("epochs", 0) self.current_best_loss = 100 @@ -116,24 +133,23 @@ class Hyperopt: self.current_best_epoch: Optional[Dict[str, Any]] = None # Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set - if not self.config.get('use_max_market_positions', True): - logger.debug('Ignoring max_open_trades (--disable-max-market-positions was used) ...') - self.backtesting.strategy.max_open_trades = float('inf') - config.update({'max_open_trades': self.backtesting.strategy.max_open_trades}) + if not self.config.get("use_max_market_positions", True): + logger.debug("Ignoring max_open_trades (--disable-max-market-positions was used) ...") + self.backtesting.strategy.max_open_trades = float("inf") + config.update({"max_open_trades": self.backtesting.strategy.max_open_trades}) - if HyperoptTools.has_space(self.config, 'sell'): + if HyperoptTools.has_space(self.config, "sell"): # Make sure use_exit_signal is enabled - self.config['use_exit_signal'] = True + self.config["use_exit_signal"] = True - self.print_all = self.config.get('print_all', False) + self.print_all = self.config.get("print_all", False) self.hyperopt_table_header = 0 - self.print_colorized = self.config.get('print_colorized', False) - self.print_json = self.config.get('print_json', False) + self.print_colorized = self.config.get("print_colorized", False) + self.print_json = self.config.get("print_json", False) @staticmethod def get_lock_filename(config: Config) -> str: - - return str(config['user_data_dir'] / 'hyperopt.lock') + return str(config["user_data_dir"] / "hyperopt.lock") def clean_hyperopt(self) -> None: """ @@ -152,16 +168,15 @@ class Hyperopt: to pickle as value. """ for modules in bases: - if modules.__name__ != 'IStrategy': + if modules.__name__ != "IStrategy": cloudpickle.register_pickle_by_value(sys.modules[modules.__module__]) self.hyperopt_pickle_magic(modules.__bases__) def _get_params_dict(self, dimensions: List[Dimension], raw_params: List[Any]) -> Dict: - # Ensure the number of dimensions match # the number of parameters in the list. if len(raw_params) != len(dimensions): - raise ValueError('Mismatch in number of search-space dimensions.') + raise ValueError("Mismatch in number of search-space dimensions.") # Return a dict where the keys are the names of the dimensions # and the values are taken from the list of parameters. @@ -175,18 +190,23 @@ class Hyperopt: :param epoch: result dictionary for this epoch. """ epoch[FTHYPT_FILEVERSION] = 2 - with self.results_file.open('a') as f: - rapidjson.dump(epoch, f, default=hyperopt_serializer, - number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN) + with self.results_file.open("a") as f: + rapidjson.dump( + epoch, + f, + default=hyperopt_serializer, + number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN, + ) f.write("\n") self.num_epochs_saved += 1 - logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} " - f"saved to '{self.results_file}'.") + logger.debug( + f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} " + f"saved to '{self.results_file}'." + ) # Store hyperopt filename latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN) - file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)}, - log=False) + file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False) def _get_params_details(self, params: Dict) -> Dict: """ @@ -194,23 +214,28 @@ class Hyperopt: """ result: Dict = {} - if HyperoptTools.has_space(self.config, 'buy'): - result['buy'] = {p.name: params.get(p.name) for p in self.buy_space} - if HyperoptTools.has_space(self.config, 'sell'): - result['sell'] = {p.name: params.get(p.name) for p in self.sell_space} - if HyperoptTools.has_space(self.config, 'protection'): - result['protection'] = {p.name: params.get(p.name) for p in self.protection_space} - if HyperoptTools.has_space(self.config, 'roi'): - result['roi'] = {str(k): v for k, v in - self.custom_hyperopt.generate_roi_table(params).items()} - if HyperoptTools.has_space(self.config, 'stoploss'): - result['stoploss'] = {p.name: params.get(p.name) for p in self.stoploss_space} - if HyperoptTools.has_space(self.config, 'trailing'): - result['trailing'] = self.custom_hyperopt.generate_trailing_params(params) - if HyperoptTools.has_space(self.config, 'trades'): - result['max_open_trades'] = { - 'max_open_trades': self.backtesting.strategy.max_open_trades - if self.backtesting.strategy.max_open_trades != float('inf') else -1} + if HyperoptTools.has_space(self.config, "buy"): + result["buy"] = {p.name: params.get(p.name) for p in self.buy_space} + if HyperoptTools.has_space(self.config, "sell"): + result["sell"] = {p.name: params.get(p.name) for p in self.sell_space} + if HyperoptTools.has_space(self.config, "protection"): + result["protection"] = {p.name: params.get(p.name) for p in self.protection_space} + if HyperoptTools.has_space(self.config, "roi"): + result["roi"] = { + str(k): v for k, v in self.custom_hyperopt.generate_roi_table(params).items() + } + if HyperoptTools.has_space(self.config, "stoploss"): + result["stoploss"] = {p.name: params.get(p.name) for p in self.stoploss_space} + if HyperoptTools.has_space(self.config, "trailing"): + result["trailing"] = self.custom_hyperopt.generate_trailing_params(params) + if HyperoptTools.has_space(self.config, "trades"): + result["max_open_trades"] = { + "max_open_trades": ( + self.backtesting.strategy.max_open_trades + if self.backtesting.strategy.max_open_trades != float("inf") + else -1 + ) + } return result @@ -220,19 +245,19 @@ class Hyperopt: """ result: Dict[str, Any] = {} strategy = self.backtesting.strategy - if not HyperoptTools.has_space(self.config, 'roi'): - result['roi'] = {str(k): v for k, v in strategy.minimal_roi.items()} - if not HyperoptTools.has_space(self.config, 'stoploss'): - result['stoploss'] = {'stoploss': strategy.stoploss} - if not HyperoptTools.has_space(self.config, 'trailing'): - result['trailing'] = { - 'trailing_stop': strategy.trailing_stop, - 'trailing_stop_positive': strategy.trailing_stop_positive, - 'trailing_stop_positive_offset': strategy.trailing_stop_positive_offset, - 'trailing_only_offset_is_reached': strategy.trailing_only_offset_is_reached, + if not HyperoptTools.has_space(self.config, "roi"): + result["roi"] = {str(k): v for k, v in strategy.minimal_roi.items()} + if not HyperoptTools.has_space(self.config, "stoploss"): + result["stoploss"] = {"stoploss": strategy.stoploss} + if not HyperoptTools.has_space(self.config, "trailing"): + result["trailing"] = { + "trailing_stop": strategy.trailing_stop, + "trailing_stop_positive": strategy.trailing_stop_positive, + "trailing_stop_positive_offset": strategy.trailing_stop_positive_offset, + "trailing_only_offset_is_reached": strategy.trailing_only_offset_is_reached, } - if not HyperoptTools.has_space(self.config, 'trades'): - result['max_open_trades'] = {'max_open_trades': strategy.max_open_trades} + if not HyperoptTools.has_space(self.config, "trades"): + result["max_open_trades"] = {"max_open_trades": strategy.max_open_trades} return result def print_results(self, results) -> None: @@ -240,14 +265,17 @@ class Hyperopt: Log results if it is better than any previous evaluation TODO: this should be moved to HyperoptTools too """ - is_best = results['is_best'] + is_best = results["is_best"] if self.print_all or is_best: print( HyperoptTools.get_result_table( - self.config, results, self.total_epochs, - self.print_all, self.print_colorized, - self.hyperopt_table_header + self.config, + results, + self.total_epochs, + self.print_all, + self.print_colorized, + self.hyperopt_table_header, ) ) self.hyperopt_table_header = 2 @@ -256,41 +284,47 @@ class Hyperopt: """ Assign the dimensions in the hyperoptimization space. """ - if HyperoptTools.has_space(self.config, 'protection'): + if HyperoptTools.has_space(self.config, "protection"): # Protections can only be optimized when using the Parameter interface logger.debug("Hyperopt has 'protection' space") # Enable Protections if protection space is selected. - self.config['enable_protections'] = True + self.config["enable_protections"] = True self.backtesting.enable_protections = True self.protection_space = self.custom_hyperopt.protection_space() - if HyperoptTools.has_space(self.config, 'buy'): + if HyperoptTools.has_space(self.config, "buy"): logger.debug("Hyperopt has 'buy' space") self.buy_space = self.custom_hyperopt.buy_indicator_space() - if HyperoptTools.has_space(self.config, 'sell'): + if HyperoptTools.has_space(self.config, "sell"): logger.debug("Hyperopt has 'sell' space") self.sell_space = self.custom_hyperopt.sell_indicator_space() - if HyperoptTools.has_space(self.config, 'roi'): + if HyperoptTools.has_space(self.config, "roi"): logger.debug("Hyperopt has 'roi' space") self.roi_space = self.custom_hyperopt.roi_space() - if HyperoptTools.has_space(self.config, 'stoploss'): + if HyperoptTools.has_space(self.config, "stoploss"): logger.debug("Hyperopt has 'stoploss' space") self.stoploss_space = self.custom_hyperopt.stoploss_space() - if HyperoptTools.has_space(self.config, 'trailing'): + if HyperoptTools.has_space(self.config, "trailing"): logger.debug("Hyperopt has 'trailing' space") self.trailing_space = self.custom_hyperopt.trailing_space() - if HyperoptTools.has_space(self.config, 'trades'): + if HyperoptTools.has_space(self.config, "trades"): logger.debug("Hyperopt has 'trades' space") self.max_open_trades_space = self.custom_hyperopt.max_open_trades_space() - self.dimensions = (self.buy_space + self.sell_space + self.protection_space - + self.roi_space + self.stoploss_space + self.trailing_space - + self.max_open_trades_space) + self.dimensions = ( + self.buy_space + + self.sell_space + + self.protection_space + + self.roi_space + + self.stoploss_space + + self.trailing_space + + self.max_open_trades_space + ) def assign_params(self, params_dict: Dict, category: str) -> None: """ @@ -312,104 +346,119 @@ class Hyperopt: params_dict = self._get_params_dict(self.dimensions, raw_params) # Apply parameters - if HyperoptTools.has_space(self.config, 'buy'): - self.assign_params(params_dict, 'buy') + if HyperoptTools.has_space(self.config, "buy"): + self.assign_params(params_dict, "buy") - if HyperoptTools.has_space(self.config, 'sell'): - self.assign_params(params_dict, 'sell') + if HyperoptTools.has_space(self.config, "sell"): + self.assign_params(params_dict, "sell") - if HyperoptTools.has_space(self.config, 'protection'): - self.assign_params(params_dict, 'protection') + if HyperoptTools.has_space(self.config, "protection"): + self.assign_params(params_dict, "protection") - if HyperoptTools.has_space(self.config, 'roi'): - self.backtesting.strategy.minimal_roi = ( - self.custom_hyperopt.generate_roi_table(params_dict)) + if HyperoptTools.has_space(self.config, "roi"): + self.backtesting.strategy.minimal_roi = self.custom_hyperopt.generate_roi_table( + params_dict + ) - if HyperoptTools.has_space(self.config, 'stoploss'): - self.backtesting.strategy.stoploss = params_dict['stoploss'] + if HyperoptTools.has_space(self.config, "stoploss"): + self.backtesting.strategy.stoploss = params_dict["stoploss"] - if HyperoptTools.has_space(self.config, 'trailing'): + if HyperoptTools.has_space(self.config, "trailing"): d = self.custom_hyperopt.generate_trailing_params(params_dict) - self.backtesting.strategy.trailing_stop = d['trailing_stop'] - self.backtesting.strategy.trailing_stop_positive = d['trailing_stop_positive'] - self.backtesting.strategy.trailing_stop_positive_offset = \ - d['trailing_stop_positive_offset'] - self.backtesting.strategy.trailing_only_offset_is_reached = \ - d['trailing_only_offset_is_reached'] + self.backtesting.strategy.trailing_stop = d["trailing_stop"] + self.backtesting.strategy.trailing_stop_positive = d["trailing_stop_positive"] + self.backtesting.strategy.trailing_stop_positive_offset = d[ + "trailing_stop_positive_offset" + ] + self.backtesting.strategy.trailing_only_offset_is_reached = d[ + "trailing_only_offset_is_reached" + ] - if HyperoptTools.has_space(self.config, 'trades'): - if self.config["stake_amount"] == "unlimited" and \ - (params_dict['max_open_trades'] == -1 or params_dict['max_open_trades'] == 0): + if HyperoptTools.has_space(self.config, "trades"): + if self.config["stake_amount"] == "unlimited" and ( + params_dict["max_open_trades"] == -1 or params_dict["max_open_trades"] == 0 + ): # Ignore unlimited max open trades if stake amount is unlimited - params_dict.update({'max_open_trades': self.config['max_open_trades']}) + params_dict.update({"max_open_trades": self.config["max_open_trades"]}) - updated_max_open_trades = int(params_dict['max_open_trades']) \ - if (params_dict['max_open_trades'] != -1 - and params_dict['max_open_trades'] != 0) else float('inf') + updated_max_open_trades = ( + int(params_dict["max_open_trades"]) + if (params_dict["max_open_trades"] != -1 and params_dict["max_open_trades"] != 0) + else float("inf") + ) - self.config.update({'max_open_trades': updated_max_open_trades}) + self.config.update({"max_open_trades": updated_max_open_trades}) self.backtesting.strategy.max_open_trades = updated_max_open_trades - with self.data_pickle_file.open('rb') as f: - processed = load(f, mmap_mode='r') + with self.data_pickle_file.open("rb") as f: + processed = load(f, mmap_mode="r") if self.analyze_per_epoch: # Data is not yet analyzed, rerun populate_indicators. processed = self.advise_and_trim(processed) bt_results = self.backtesting.backtest( - processed=processed, - start_date=self.min_date, - end_date=self.max_date + processed=processed, start_date=self.min_date, end_date=self.max_date ) backtest_end_time = datetime.now(timezone.utc) - bt_results.update({ - 'backtest_start_time': int(backtest_start_time.timestamp()), - 'backtest_end_time': int(backtest_end_time.timestamp()), - }) + bt_results.update( + { + "backtest_start_time": int(backtest_start_time.timestamp()), + "backtest_end_time": int(backtest_end_time.timestamp()), + } + ) - return self._get_results_dict(bt_results, self.min_date, self.max_date, - params_dict, - processed=processed) + return self._get_results_dict( + bt_results, self.min_date, self.max_date, params_dict, processed=processed + ) - def _get_results_dict(self, backtesting_results, min_date, max_date, - params_dict, processed: Dict[str, DataFrame] - ) -> Dict[str, Any]: + def _get_results_dict( + self, backtesting_results, min_date, max_date, params_dict, processed: Dict[str, DataFrame] + ) -> Dict[str, Any]: params_details = self._get_params_details(params_dict) strat_stats = generate_strategy_stats( - self.pairlist, self.backtesting.strategy.get_strategy_name(), - backtesting_results, min_date, max_date, market_change=self.market_change, + self.pairlist, + self.backtesting.strategy.get_strategy_name(), + backtesting_results, + min_date, + max_date, + market_change=self.market_change, is_hyperopt=True, ) results_explanation = HyperoptTools.format_results_explanation_string( - strat_stats, self.config['stake_currency']) + strat_stats, self.config["stake_currency"] + ) not_optimized = self.backtesting.strategy.get_no_optimize_params() not_optimized = deep_merge_dicts(not_optimized, self._get_no_optimize_details()) - trade_count = strat_stats['total_trades'] - total_profit = strat_stats['profit_total'] + trade_count = strat_stats["total_trades"] + total_profit = strat_stats["profit_total"] # If this evaluation contains too short amount of trades to be # interesting -- consider it as 'bad' (assigned max. loss value) # in order to cast this hyperspace point away from optimization # path. We do not want to optimize 'hodl' strategies. loss: float = MAX_LOSS - if trade_count >= self.config['hyperopt_min_trades']: - loss = self.calculate_loss(results=backtesting_results['results'], - trade_count=trade_count, - min_date=min_date, max_date=max_date, - config=self.config, processed=processed, - backtest_stats=strat_stats) + if trade_count >= self.config["hyperopt_min_trades"]: + loss = self.calculate_loss( + results=backtesting_results["results"], + trade_count=trade_count, + min_date=min_date, + max_date=max_date, + config=self.config, + processed=processed, + backtest_stats=strat_stats, + ) return { - 'loss': loss, - 'params_dict': params_dict, - 'params_details': params_details, - 'params_not_optimized': not_optimized, - 'results_metrics': strat_stats, - 'results_explanation': results_explanation, - 'total_profit': total_profit, + "loss": loss, + "params_dict": params_dict, + "params_details": params_details, + "params_not_optimized": not_optimized, + "results_metrics": strat_stats, + "results_explanation": results_explanation, + "total_profit": total_profit, } def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer: @@ -428,16 +477,16 @@ class Hyperopt: base_estimator=estimator, acq_optimizer=acq_optimizer, n_initial_points=INITIAL_POINTS, - acq_optimizer_kwargs={'n_jobs': cpu_count}, + acq_optimizer_kwargs={"n_jobs": cpu_count}, random_state=self.random_state, model_queue_size=SKOPT_MODEL_QUEUE_SIZE, ) - def run_optimizer_parallel( - self, parallel: Parallel, asked: List[List]) -> List[Dict[str, Any]]: - """ Start optimizer in a parallel way """ - return parallel(delayed( - wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked) + def run_optimizer_parallel(self, parallel: Parallel, asked: List[List]) -> List[Dict[str, Any]]: + """Start optimizer in a parallel way""" + return parallel( + delayed(wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked + ) def _set_random_state(self, random_state: Optional[int]) -> int: return random_state or random.randint(1, 2**16 - 1) @@ -451,7 +500,7 @@ class Hyperopt: trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup) self.min_date, self.max_date = get_timerange(trimmed) if not self.market_change: - self.market_change = calculate_market_change(trimmed, 'close') + self.market_change = calculate_market_change(trimmed, "close") # Real trimming will happen as part of backtesting. return preprocessed @@ -467,10 +516,12 @@ class Hyperopt: preprocessed = self.advise_and_trim(data) - logger.info(f'Hyperopting with data from ' - f'{self.min_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} ' - f'({(self.max_date - self.min_date).days} days)..') + logger.info( + f"Hyperopting with data from " + f"{self.min_date.strftime(DATETIME_PRINT_FORMAT)} " + f"up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} " + f"({(self.max_date - self.min_date).days} days).." + ) # Store non-trimmed data - will be trimmed after signal generation. dump(preprocessed, self.data_pickle_file) else: @@ -488,12 +539,14 @@ class Hyperopt: 5. Repeat until at least `n_points` points in the `asked_non_tried` list 6. Return a list with length truncated at `n_points` """ + def unique_list(a_list): new_list = [] for item in a_list: if item not in new_list: new_list.append(item) return new_list + i = 0 asked_non_tried: List[List[Any]] = [] is_random_non_tried: List[bool] = [] @@ -505,18 +558,20 @@ class Hyperopt: else: asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5)) is_random = [True for _ in range(len(asked))] - is_random_non_tried += [rand for x, rand in zip(asked, is_random) - if x not in self.opt.Xi - and x not in asked_non_tried] - asked_non_tried += [x for x in asked - if x not in self.opt.Xi - and x not in asked_non_tried] + is_random_non_tried += [ + rand + for x, rand in zip(asked, is_random) + if x not in self.opt.Xi and x not in asked_non_tried + ] + asked_non_tried += [ + x for x in asked if x not in self.opt.Xi and x not in asked_non_tried + ] i += 1 if asked_non_tried: return ( - asked_non_tried[:min(len(asked_non_tried), n_points)], - is_random_non_tried[:min(len(asked_non_tried), n_points)] + asked_non_tried[: min(len(asked_non_tried), n_points)], + is_random_non_tried[: min(len(asked_non_tried), n_points)], ) else: return self.opt.ask(n_points=n_points), [False for _ in range(n_points)] @@ -525,8 +580,8 @@ class Hyperopt: """ Evaluate results returned from generate_optimizer """ - val['current_epoch'] = current - val['is_initial_point'] = current <= INITIAL_POINTS + val["current_epoch"] = current + val["is_initial_point"] = current <= INITIAL_POINTS logger.debug("Optimizer epoch evaluated: %s", val) @@ -535,18 +590,18 @@ class Hyperopt: # to keep proper order in the list of results. That's because # evaluations can take different time. Here they are aligned in the # order they will be shown to the user. - val['is_best'] = is_best - val['is_random'] = is_random + val["is_best"] = is_best + val["is_random"] = is_random self.print_results(val) if is_best: - self.current_best_loss = val['loss'] + self.current_best_loss = val["loss"] self.current_best_epoch = val self._save_result(val) def start(self) -> None: - self.random_state = self._set_random_state(self.config.get('hyperopt_random_state')) + self.random_state = self._set_random_state(self.config.get("hyperopt_random_state")) logger.info(f"Using optimizer random state: {self.random_state}") self.hyperopt_table_header = -1 # Initialize spaces ... @@ -566,8 +621,8 @@ class Hyperopt: cpus = cpu_count() logger.info(f"Found {cpus} CPU cores. Let's make them scream!") - config_jobs = self.config.get('hyperopt_jobs', -1) - logger.info(f'Number of parallel jobs set as: {config_jobs}') + config_jobs = self.config.get("hyperopt_jobs", -1) + logger.info(f"Number of parallel jobs set as: {config_jobs}") self.opt = self.get_optimizer(self.dimensions, config_jobs) @@ -577,7 +632,7 @@ class Hyperopt: try: with Parallel(n_jobs=config_jobs) as parallel: jobs = parallel._effective_n_jobs() - logger.info(f'Effective number of parallel workers used: {jobs}') + logger.info(f"Effective number of parallel workers used: {jobs}") # Define progressbar with Progress( @@ -600,7 +655,7 @@ class Hyperopt: # This allows dataprovider to load it's informative cache. asked, is_random = self.get_asked_points(n_points=1) f_val0 = self.generate_optimizer(asked[0]) - self.opt.tell(asked, [f_val0['loss']]) + self.opt.tell(asked, [f_val0["loss"]]) self.evaluate_result(f_val0, 1, is_random[0]) pbar.update(task, advance=1) start += 1 @@ -614,7 +669,7 @@ class Hyperopt: asked, is_random = self.get_asked_points(n_points=current_jobs) f_val = self.run_optimizer_parallel(parallel, asked) - self.opt.tell(asked, [v['loss'] for v in f_val]) + self.opt.tell(asked, [v["loss"] for v in f_val]) for j, val in enumerate(f_val): # Use human-friendly indexes here (starting from 1) @@ -624,23 +679,26 @@ class Hyperopt: pbar.update(task, advance=1) except KeyboardInterrupt: - print('User interrupted..') + print("User interrupted..") - logger.info(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} " - f"saved to '{self.results_file}'.") + logger.info( + f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} " + f"saved to '{self.results_file}'." + ) if self.current_best_epoch: HyperoptTools.try_export_params( - self.config, - self.backtesting.strategy.get_strategy_name(), - self.current_best_epoch) + self.config, self.backtesting.strategy.get_strategy_name(), self.current_best_epoch + ) - HyperoptTools.show_epoch_details(self.current_best_epoch, self.total_epochs, - self.print_json) + HyperoptTools.show_epoch_details( + self.current_best_epoch, self.total_epochs, self.print_json + ) elif self.num_epochs_saved > 0: print( f"No good result found for given optimization function in {self.num_epochs_saved} " - f"{plural(self.num_epochs_saved, 'epoch')}.") + f"{plural(self.num_epochs_saved, 'epoch')}." + ) else: # This is printed when Ctrl+C is pressed quickly, before first epochs have # a chance to be evaluated. diff --git a/freqtrade/optimize/hyperopt_auto.py b/freqtrade/optimize/hyperopt_auto.py index a3e2ef058..cf0103162 100644 --- a/freqtrade/optimize/hyperopt_auto.py +++ b/freqtrade/optimize/hyperopt_auto.py @@ -3,6 +3,7 @@ HyperOptAuto class. This module implements a convenience auto-hyperopt class, which can be used together with strategies that implement IHyperStrategy interface. """ + import logging from contextlib import suppress from typing import Callable, Dict, List @@ -20,15 +21,17 @@ logger = logging.getLogger(__name__) def _format_exception_message(space: str, ignore_missing_space: bool) -> None: - msg = (f"The '{space}' space is included into the hyperoptimization " - f"but no parameter for this space was found in your Strategy. " - ) + msg = ( + f"The '{space}' space is included into the hyperoptimization " + f"but no parameter for this space was found in your Strategy. " + ) if ignore_missing_space: logger.warning(msg + "This space will be ignored.") else: raise OperationalException( msg + f"Please make sure to have parameters for this space enabled for optimization " - f"or remove the '{space}' space from hyperoptimization.") + f"or remove the '{space}' space from hyperoptimization." + ) class HyperOptAuto(IHyperOpt): @@ -44,7 +47,7 @@ class HyperOptAuto(IHyperOpt): :param name: function name. :return: a requested function. """ - hyperopt_cls = getattr(self.strategy, 'HyperOpt', None) + hyperopt_cls = getattr(self.strategy, "HyperOpt", None) default_func = getattr(super(), name) if hyperopt_cls: return getattr(hyperopt_cls, name, default_func) @@ -63,36 +66,36 @@ class HyperOptAuto(IHyperOpt): return indicator_space else: _format_exception_message( - category, - self.config.get("hyperopt_ignore_missing_space", False)) + category, self.config.get("hyperopt_ignore_missing_space", False) + ) return [] - def buy_indicator_space(self) -> List['Dimension']: - return self._get_indicator_space('buy') + def buy_indicator_space(self) -> List["Dimension"]: + return self._get_indicator_space("buy") - def sell_indicator_space(self) -> List['Dimension']: - return self._get_indicator_space('sell') + def sell_indicator_space(self) -> List["Dimension"]: + return self._get_indicator_space("sell") - def protection_space(self) -> List['Dimension']: - return self._get_indicator_space('protection') + def protection_space(self) -> List["Dimension"]: + return self._get_indicator_space("protection") def generate_roi_table(self, params: Dict) -> Dict[int, float]: - return self._get_func('generate_roi_table')(params) + return self._get_func("generate_roi_table")(params) - def roi_space(self) -> List['Dimension']: - return self._get_func('roi_space')() + def roi_space(self) -> List["Dimension"]: + return self._get_func("roi_space")() - def stoploss_space(self) -> List['Dimension']: - return self._get_func('stoploss_space')() + def stoploss_space(self) -> List["Dimension"]: + return self._get_func("stoploss_space")() def generate_trailing_params(self, params: Dict) -> Dict: - return self._get_func('generate_trailing_params')(params) + return self._get_func("generate_trailing_params")(params) - def trailing_space(self) -> List['Dimension']: - return self._get_func('trailing_space')() + def trailing_space(self) -> List["Dimension"]: + return self._get_func("trailing_space")() - def max_open_trades_space(self) -> List['Dimension']: - return self._get_func('max_open_trades_space')() + def max_open_trades_space(self) -> List["Dimension"]: + return self._get_func("max_open_trades_space")() - def generate_estimator(self, dimensions: List['Dimension'], **kwargs) -> EstimatorType: - return self._get_func('generate_estimator')(dimensions=dimensions, **kwargs) + def generate_estimator(self, dimensions: List["Dimension"], **kwargs) -> EstimatorType: + return self._get_func("generate_estimator")(dimensions=dimensions, **kwargs) diff --git a/freqtrade/optimize/hyperopt_epoch_filters.py b/freqtrade/optimize/hyperopt_epoch_filters.py index 80cc89d4b..0a88b9d65 100644 --- a/freqtrade/optimize/hyperopt_epoch_filters.py +++ b/freqtrade/optimize/hyperopt_epoch_filters.py @@ -11,11 +11,10 @@ def hyperopt_filter_epochs(epochs: List, filteroptions: dict, log: bool = True) """ Filter our items from the list of hyperopt results """ - if filteroptions['only_best']: - epochs = [x for x in epochs if x['is_best']] - if filteroptions['only_profitable']: - epochs = [x for x in epochs - if x['results_metrics'].get('profit_total', 0) > 0] + if filteroptions["only_best"]: + epochs = [x for x in epochs if x["is_best"]] + if filteroptions["only_profitable"]: + epochs = [x for x in epochs if x["results_metrics"].get("profit_total", 0) > 0] epochs = _hyperopt_filter_epochs_trade_count(epochs, filteroptions) @@ -25,10 +24,12 @@ def hyperopt_filter_epochs(epochs: List, filteroptions: dict, log: bool = True) epochs = _hyperopt_filter_epochs_objective(epochs, filteroptions) if log: - logger.info(f"{len(epochs)} " + - ("best " if filteroptions['only_best'] else "") + - ("profitable " if filteroptions['only_profitable'] else "") + - "epochs found.") + logger.info( + f"{len(epochs)} " + + ("best " if filteroptions["only_best"] else "") + + ("profitable " if filteroptions["only_profitable"] else "") + + "epochs found." + ) return epochs @@ -36,93 +37,87 @@ def _hyperopt_filter_epochs_trade(epochs: List, trade_count: int): """ Filter epochs with trade-counts > trades """ - return [ - x for x in epochs if x['results_metrics'].get('total_trades', 0) > trade_count - ] + return [x for x in epochs if x["results_metrics"].get("total_trades", 0) > trade_count] def _hyperopt_filter_epochs_trade_count(epochs: List, filteroptions: dict) -> List: + if filteroptions["filter_min_trades"] > 0: + epochs = _hyperopt_filter_epochs_trade(epochs, filteroptions["filter_min_trades"]) - if filteroptions['filter_min_trades'] > 0: - epochs = _hyperopt_filter_epochs_trade(epochs, filteroptions['filter_min_trades']) - - if filteroptions['filter_max_trades'] > 0: + if filteroptions["filter_max_trades"] > 0: epochs = [ - x for x in epochs - if x['results_metrics'].get('total_trades') < filteroptions['filter_max_trades'] + x + for x in epochs + if x["results_metrics"].get("total_trades") < filteroptions["filter_max_trades"] ] return epochs def _hyperopt_filter_epochs_duration(epochs: List, filteroptions: dict) -> List: - def get_duration_value(x): # Duration in minutes ... - if 'holding_avg_s' in x['results_metrics']: - avg = x['results_metrics']['holding_avg_s'] + if "holding_avg_s" in x["results_metrics"]: + avg = x["results_metrics"]["holding_avg_s"] return avg // 60 raise OperationalException( "Holding-average not available. Please omit the filter on average time, " - "or rerun hyperopt with this version") + "or rerun hyperopt with this version" + ) - if filteroptions['filter_min_avg_time'] is not None: + if filteroptions["filter_min_avg_time"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) - epochs = [ - x for x in epochs - if get_duration_value(x) > filteroptions['filter_min_avg_time'] - ] - if filteroptions['filter_max_avg_time'] is not None: + epochs = [x for x in epochs if get_duration_value(x) > filteroptions["filter_min_avg_time"]] + if filteroptions["filter_max_avg_time"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) - epochs = [ - x for x in epochs - if get_duration_value(x) < filteroptions['filter_max_avg_time'] - ] + epochs = [x for x in epochs if get_duration_value(x) < filteroptions["filter_max_avg_time"]] return epochs def _hyperopt_filter_epochs_profit(epochs: List, filteroptions: dict) -> List: - - if filteroptions['filter_min_avg_profit'] is not None: + if filteroptions["filter_min_avg_profit"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) epochs = [ - x for x in epochs - if x['results_metrics'].get('profit_mean', 0) * 100 - > filteroptions['filter_min_avg_profit'] + x + for x in epochs + if x["results_metrics"].get("profit_mean", 0) * 100 + > filteroptions["filter_min_avg_profit"] ] - if filteroptions['filter_max_avg_profit'] is not None: + if filteroptions["filter_max_avg_profit"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) epochs = [ - x for x in epochs - if x['results_metrics'].get('profit_mean', 0) * 100 - < filteroptions['filter_max_avg_profit'] + x + for x in epochs + if x["results_metrics"].get("profit_mean", 0) * 100 + < filteroptions["filter_max_avg_profit"] ] - if filteroptions['filter_min_total_profit'] is not None: + if filteroptions["filter_min_total_profit"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) epochs = [ - x for x in epochs - if x['results_metrics'].get('profit_total_abs', 0) - > filteroptions['filter_min_total_profit'] + x + for x in epochs + if x["results_metrics"].get("profit_total_abs", 0) + > filteroptions["filter_min_total_profit"] ] - if filteroptions['filter_max_total_profit'] is not None: + if filteroptions["filter_max_total_profit"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) epochs = [ - x for x in epochs - if x['results_metrics'].get('profit_total_abs', 0) - < filteroptions['filter_max_total_profit'] + x + for x in epochs + if x["results_metrics"].get("profit_total_abs", 0) + < filteroptions["filter_max_total_profit"] ] return epochs def _hyperopt_filter_epochs_objective(epochs: List, filteroptions: dict) -> List: - - if filteroptions['filter_min_objective'] is not None: + if filteroptions["filter_min_objective"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) - epochs = [x for x in epochs if x['loss'] < filteroptions['filter_min_objective']] - if filteroptions['filter_max_objective'] is not None: + epochs = [x for x in epochs if x["loss"] < filteroptions["filter_min_objective"]] + if filteroptions["filter_max_objective"] is not None: epochs = _hyperopt_filter_epochs_trade(epochs, 0) - epochs = [x for x in epochs if x['loss'] > filteroptions['filter_max_objective']] + epochs = [x for x in epochs if x["loss"] > filteroptions["filter_max_objective"]] return epochs diff --git a/freqtrade/optimize/hyperopt_interface.py b/freqtrade/optimize/hyperopt_interface.py index 65dd7ed87..216e40753 100644 --- a/freqtrade/optimize/hyperopt_interface.py +++ b/freqtrade/optimize/hyperopt_interface.py @@ -2,6 +2,7 @@ IHyperOpt interface This module defines the interface to apply for hyperopt """ + import logging import math from abc import ABC @@ -30,6 +31,7 @@ class IHyperOpt(ABC): Class attributes you can use: timeframe -> int: value of the timeframe to use for the strategy """ + timeframe: str strategy: IStrategy @@ -37,7 +39,7 @@ class IHyperOpt(ABC): self.config = config # Assign timeframe to be used in hyperopt - IHyperOpt.timeframe = str(config['timeframe']) + IHyperOpt.timeframe = str(config["timeframe"]) def generate_estimator(self, dimensions: List[Dimension], **kwargs) -> EstimatorType: """ @@ -45,7 +47,7 @@ class IHyperOpt(ABC): Can be any of "GP", "RF", "ET", "GBRT" or an instance of a class inheriting from RegressorMixin (from sklearn). """ - return 'ET' + return "ET" def generate_roi_table(self, params: Dict) -> Dict[int, float]: """ @@ -55,10 +57,10 @@ class IHyperOpt(ABC): You may override it in your custom Hyperopt class. """ roi_table = {} - roi_table[0] = params['roi_p1'] + params['roi_p2'] + params['roi_p3'] - roi_table[params['roi_t3']] = params['roi_p1'] + params['roi_p2'] - roi_table[params['roi_t3'] + params['roi_t2']] = params['roi_p1'] - roi_table[params['roi_t3'] + params['roi_t2'] + params['roi_t1']] = 0 + roi_table[0] = params["roi_p1"] + params["roi_p2"] + params["roi_p3"] + roi_table[params["roi_t3"]] = params["roi_p1"] + params["roi_p2"] + roi_table[params["roi_t3"] + params["roi_t2"]] = params["roi_p1"] + roi_table[params["roi_t3"] + params["roi_t2"] + params["roi_t1"]] = 0 return roi_table @@ -96,49 +98,52 @@ class IHyperOpt(ABC): roi_t_scale = timeframe_min / 5 roi_p_scale = math.log1p(timeframe_min) / math.log1p(5) roi_limits = { - 'roi_t1_min': int(10 * roi_t_scale * roi_t_alpha), - 'roi_t1_max': int(120 * roi_t_scale * roi_t_alpha), - 'roi_t2_min': int(10 * roi_t_scale * roi_t_alpha), - 'roi_t2_max': int(60 * roi_t_scale * roi_t_alpha), - 'roi_t3_min': int(10 * roi_t_scale * roi_t_alpha), - 'roi_t3_max': int(40 * roi_t_scale * roi_t_alpha), - 'roi_p1_min': 0.01 * roi_p_scale * roi_p_alpha, - 'roi_p1_max': 0.04 * roi_p_scale * roi_p_alpha, - 'roi_p2_min': 0.01 * roi_p_scale * roi_p_alpha, - 'roi_p2_max': 0.07 * roi_p_scale * roi_p_alpha, - 'roi_p3_min': 0.01 * roi_p_scale * roi_p_alpha, - 'roi_p3_max': 0.20 * roi_p_scale * roi_p_alpha, + "roi_t1_min": int(10 * roi_t_scale * roi_t_alpha), + "roi_t1_max": int(120 * roi_t_scale * roi_t_alpha), + "roi_t2_min": int(10 * roi_t_scale * roi_t_alpha), + "roi_t2_max": int(60 * roi_t_scale * roi_t_alpha), + "roi_t3_min": int(10 * roi_t_scale * roi_t_alpha), + "roi_t3_max": int(40 * roi_t_scale * roi_t_alpha), + "roi_p1_min": 0.01 * roi_p_scale * roi_p_alpha, + "roi_p1_max": 0.04 * roi_p_scale * roi_p_alpha, + "roi_p2_min": 0.01 * roi_p_scale * roi_p_alpha, + "roi_p2_max": 0.07 * roi_p_scale * roi_p_alpha, + "roi_p3_min": 0.01 * roi_p_scale * roi_p_alpha, + "roi_p3_max": 0.20 * roi_p_scale * roi_p_alpha, } logger.debug(f"Using roi space limits: {roi_limits}") p = { - 'roi_t1': roi_limits['roi_t1_min'], - 'roi_t2': roi_limits['roi_t2_min'], - 'roi_t3': roi_limits['roi_t3_min'], - 'roi_p1': roi_limits['roi_p1_min'], - 'roi_p2': roi_limits['roi_p2_min'], - 'roi_p3': roi_limits['roi_p3_min'], + "roi_t1": roi_limits["roi_t1_min"], + "roi_t2": roi_limits["roi_t2_min"], + "roi_t3": roi_limits["roi_t3_min"], + "roi_p1": roi_limits["roi_p1_min"], + "roi_p2": roi_limits["roi_p2_min"], + "roi_p3": roi_limits["roi_p3_min"], } logger.info(f"Min roi table: {round_dict(self.generate_roi_table(p), 3)}") p = { - 'roi_t1': roi_limits['roi_t1_max'], - 'roi_t2': roi_limits['roi_t2_max'], - 'roi_t3': roi_limits['roi_t3_max'], - 'roi_p1': roi_limits['roi_p1_max'], - 'roi_p2': roi_limits['roi_p2_max'], - 'roi_p3': roi_limits['roi_p3_max'], + "roi_t1": roi_limits["roi_t1_max"], + "roi_t2": roi_limits["roi_t2_max"], + "roi_t3": roi_limits["roi_t3_max"], + "roi_p1": roi_limits["roi_p1_max"], + "roi_p2": roi_limits["roi_p2_max"], + "roi_p3": roi_limits["roi_p3_max"], } logger.info(f"Max roi table: {round_dict(self.generate_roi_table(p), 3)}") return [ - Integer(roi_limits['roi_t1_min'], roi_limits['roi_t1_max'], name='roi_t1'), - Integer(roi_limits['roi_t2_min'], roi_limits['roi_t2_max'], name='roi_t2'), - Integer(roi_limits['roi_t3_min'], roi_limits['roi_t3_max'], name='roi_t3'), - SKDecimal(roi_limits['roi_p1_min'], roi_limits['roi_p1_max'], decimals=3, - name='roi_p1'), - SKDecimal(roi_limits['roi_p2_min'], roi_limits['roi_p2_max'], decimals=3, - name='roi_p2'), - SKDecimal(roi_limits['roi_p3_min'], roi_limits['roi_p3_max'], decimals=3, - name='roi_p3'), + Integer(roi_limits["roi_t1_min"], roi_limits["roi_t1_max"], name="roi_t1"), + Integer(roi_limits["roi_t2_min"], roi_limits["roi_t2_max"], name="roi_t2"), + Integer(roi_limits["roi_t3_min"], roi_limits["roi_t3_max"], name="roi_t3"), + SKDecimal( + roi_limits["roi_p1_min"], roi_limits["roi_p1_max"], decimals=3, name="roi_p1" + ), + SKDecimal( + roi_limits["roi_p2_min"], roi_limits["roi_p2_max"], decimals=3, name="roi_p2" + ), + SKDecimal( + roi_limits["roi_p3_min"], roi_limits["roi_p3_max"], decimals=3, name="roi_p3" + ), ] def stoploss_space(self) -> List[Dimension]: @@ -149,7 +154,7 @@ class IHyperOpt(ABC): You may override it in your custom Hyperopt class. """ return [ - SKDecimal(-0.35, -0.02, decimals=3, name='stoploss'), + SKDecimal(-0.35, -0.02, decimals=3, name="stoploss"), ] def generate_trailing_params(self, params: Dict) -> Dict: @@ -157,11 +162,12 @@ class IHyperOpt(ABC): Create dict with trailing stop parameters. """ return { - 'trailing_stop': params['trailing_stop'], - 'trailing_stop_positive': params['trailing_stop_positive'], - 'trailing_stop_positive_offset': (params['trailing_stop_positive'] + - params['trailing_stop_positive_offset_p1']), - 'trailing_only_offset_is_reached': params['trailing_only_offset_is_reached'], + "trailing_stop": params["trailing_stop"], + "trailing_stop_positive": params["trailing_stop_positive"], + "trailing_stop_positive_offset": ( + params["trailing_stop_positive"] + params["trailing_stop_positive_offset_p1"] + ), + "trailing_only_offset_is_reached": params["trailing_only_offset_is_reached"], } def trailing_space(self) -> List[Dimension]: @@ -177,18 +183,15 @@ class IHyperOpt(ABC): # This parameter is included into the hyperspace dimensions rather than assigning # it explicitly in the code in order to have it printed in the results along with # other 'trailing' hyperspace parameters. - Categorical([True], name='trailing_stop'), - - SKDecimal(0.01, 0.35, decimals=3, name='trailing_stop_positive'), - + Categorical([True], name="trailing_stop"), + SKDecimal(0.01, 0.35, decimals=3, name="trailing_stop_positive"), # 'trailing_stop_positive_offset' should be greater than 'trailing_stop_positive', # so this intermediate parameter is used as the value of the difference between # them. The value of the 'trailing_stop_positive_offset' is constructed in the # generate_trailing_params() method. # This is similar to the hyperspace dimensions used for constructing the ROI tables. - SKDecimal(0.001, 0.1, decimals=3, name='trailing_stop_positive_offset_p1'), - - Categorical([True, False], name='trailing_only_offset_is_reached'), + SKDecimal(0.001, 0.1, decimals=3, name="trailing_stop_positive_offset_p1"), + Categorical([True, False], name="trailing_only_offset_is_reached"), ] def max_open_trades_space(self) -> List[Dimension]: @@ -198,7 +201,7 @@ class IHyperOpt(ABC): You may override it in your custom Hyperopt class. """ return [ - Integer(-1, 10, name='max_open_trades'), + Integer(-1, 10, name="max_open_trades"), ] # This is needed for proper unpickling the class attribute timeframe @@ -206,9 +209,9 @@ class IHyperOpt(ABC): # Why do I still need such shamanic mantras in modern python? def __getstate__(self): state = self.__dict__.copy() - state['timeframe'] = self.timeframe + state["timeframe"] = self.timeframe return state def __setstate__(self, state): self.__dict__.update(state) - IHyperOpt.timeframe = state['timeframe'] + IHyperOpt.timeframe = state["timeframe"] diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_calmar.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_calmar.py index b8935b08e..f22d59e50 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_calmar.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_calmar.py @@ -4,6 +4,7 @@ CalmarHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from datetime import datetime from pandas import DataFrame @@ -21,15 +22,21 @@ class CalmarHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - config: Config, *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + config: Config, + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for more optimal results. Uses Calmar Ratio calculation. """ - starting_balance = config['dry_run_wallet'] + starting_balance = config["dry_run_wallet"] calmar_ratio = calculate_calmar(results, min_date, max_date, starting_balance) # print(expected_returns_mean, max_drawdown, calmar_ratio) return -calmar_ratio diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown.py index a8af704cd..ce1b29cf5 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown.py @@ -4,6 +4,7 @@ MaxDrawDownHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from datetime import datetime from pandas import DataFrame @@ -13,7 +14,6 @@ from freqtrade.optimize.hyperopt import IHyperOptLoss class MaxDrawDownHyperOptLoss(IHyperOptLoss): - """ Defines the loss function for hyperopt. @@ -22,20 +22,24 @@ class MaxDrawDownHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - *args, **kwargs) -> float: - + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + *args, + **kwargs, + ) -> float: """ Objective function. Uses profit ratio weighted max_drawdown when drawdown is available. Otherwise directly optimizes profit ratio. """ - total_profit = results['profit_abs'].sum() + total_profit = results["profit_abs"].sum() try: - max_drawdown = calculate_max_drawdown(results, value_col='profit_abs') + max_drawdown = calculate_max_drawdown(results, value_col="profit_abs") except ValueError: # No losing trade, therefore no drawdown. return -total_profit - return -total_profit / max_drawdown[0] + return -total_profit / max_drawdown.drawdown_abs diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown_relative.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown_relative.py index 669d12ddf..ee7088d75 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown_relative.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_max_drawdown_relative.py @@ -4,6 +4,7 @@ MaxDrawDownRelativeHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from pandas import DataFrame from freqtrade.constants import Config @@ -12,7 +13,6 @@ from freqtrade.optimize.hyperopt import IHyperOptLoss class MaxDrawDownRelativeHyperOptLoss(IHyperOptLoss): - """ Defines the loss function for hyperopt. @@ -21,24 +21,20 @@ class MaxDrawDownRelativeHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, config: Config, - *args, **kwargs) -> float: - + def hyperopt_loss_function(results: DataFrame, config: Config, *args, **kwargs) -> float: """ Objective function. Uses profit ratio weighted max_drawdown when drawdown is available. Otherwise directly optimizes profit ratio. """ - total_profit = results['profit_abs'].sum() + total_profit = results["profit_abs"].sum() try: drawdown_df = calculate_underwater( - results, - value_col='profit_abs', - starting_balance=config['dry_run_wallet'] + results, value_col="profit_abs", starting_balance=config["dry_run_wallet"] ) - max_drawdown = abs(min(drawdown_df['drawdown'])) - relative_drawdown = max(drawdown_df['drawdown_relative']) + max_drawdown = abs(min(drawdown_df["drawdown"])) + relative_drawdown = max(drawdown_df["drawdown_relative"]) if max_drawdown == 0: return -total_profit return -total_profit / max_drawdown / relative_drawdown diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_onlyprofit.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_onlyprofit.py index 4a3cf1b3b..dd4a448d5 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_onlyprofit.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_onlyprofit.py @@ -4,6 +4,7 @@ OnlyProfitHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from pandas import DataFrame from freqtrade.optimize.hyperopt import IHyperOptLoss @@ -17,10 +18,9 @@ class OnlyProfitHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - *args, **kwargs) -> float: + def hyperopt_loss_function(results: DataFrame, trade_count: int, *args, **kwargs) -> float: """ Objective function, returns smaller number for better results. """ - total_profit = results['profit_abs'].sum() + total_profit = results["profit_abs"].sum() return -1 * total_profit diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_profit_drawdown.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_profit_drawdown.py index ed689edba..61e2a6d32 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_profit_drawdown.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_profit_drawdown.py @@ -7,24 +7,31 @@ Drawdown objective which can be used for Hyperoptimization. Possible to change `DRAWDOWN_MULT` to penalize drawdown objective for individual needs. """ + from pandas import DataFrame +from freqtrade.constants import Config from freqtrade.data.metrics import calculate_max_drawdown from freqtrade.optimize.hyperopt import IHyperOptLoss -# higher numbers penalize drawdowns more severely +# smaller numbers penalize drawdowns more severely DRAWDOWN_MULT = 0.075 class ProfitDrawDownHyperOptLoss(IHyperOptLoss): @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, *args, **kwargs) -> float: + def hyperopt_loss_function(results: DataFrame, config: Config, *args, **kwargs) -> float: total_profit = results["profit_abs"].sum() try: - max_drawdown_abs = calculate_max_drawdown(results, value_col="profit_abs")[5] + drawdown = calculate_max_drawdown( + results, starting_balance=config["dry_run_wallet"], value_col="profit_abs" + ) + relative_account_drawdown = drawdown.relative_account_drawdown except ValueError: - max_drawdown_abs = 0 + relative_account_drawdown = 0 - return -1 * (total_profit * (1 - max_drawdown_abs * DRAWDOWN_MULT)) + return -1 * ( + total_profit - (relative_account_drawdown * total_profit) * (1 - DRAWDOWN_MULT) + ) diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe.py index 8ebb90fc5..2c7042a8a 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe.py @@ -4,6 +4,7 @@ SharpeHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from datetime import datetime from pandas import DataFrame @@ -21,15 +22,21 @@ class SharpeHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - config: Config, *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + config: Config, + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for more optimal results. Uses Sharpe Ratio calculation. """ - starting_balance = config['dry_run_wallet'] + starting_balance = config["dry_run_wallet"] sharp_ratio = calculate_sharpe(results, min_date, max_date, starting_balance) # print(expected_returns_mean, up_stdev, sharp_ratio) return -sharp_ratio diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe_daily.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe_daily.py index 88c97989a..ea1efe0e1 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe_daily.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sharpe_daily.py @@ -4,6 +4,7 @@ SharpeHyperOptLossDaily This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + import math from datetime import datetime @@ -20,31 +21,38 @@ class SharpeHyperOptLossDaily(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for more optimal results. Uses Sharpe Ratio calculation. """ - resample_freq = '1D' + resample_freq = "1D" slippage_per_trade_ratio = 0.0005 days_in_year = 365 annual_risk_free_rate = 0.0 risk_free_rate = annual_risk_free_rate / days_in_year # apply slippage per trade to profit_ratio - results.loc[:, 'profit_ratio_after_slippage'] = \ - results['profit_ratio'] - slippage_per_trade_ratio + results.loc[:, "profit_ratio_after_slippage"] = ( + results["profit_ratio"] - slippage_per_trade_ratio + ) # create the index within the min_date and end max_date - t_index = date_range(start=min_date, end=max_date, freq=resample_freq, - normalize=True) + t_index = date_range(start=min_date, end=max_date, freq=resample_freq, normalize=True) sum_daily = ( - results.resample(resample_freq, on='close_date').agg( - {"profit_ratio_after_slippage": 'sum'}).reindex(t_index).fillna(0) + results.resample(resample_freq, on="close_date") + .agg({"profit_ratio_after_slippage": "sum"}) + .reindex(t_index) + .fillna(0) ) total_profit = sum_daily["profit_ratio_after_slippage"] - risk_free_rate @@ -55,7 +63,7 @@ class SharpeHyperOptLossDaily(IHyperOptLoss): sharp_ratio = expected_returns_mean / up_stdev * math.sqrt(days_in_year) else: # Define high (negative) sharpe ratio to be clear that this is NOT optimal. - sharp_ratio = -20. + sharp_ratio = -20.0 # print(t_index, sum_daily, total_profit) # print(risk_free_rate, expected_returns_mean, up_stdev, sharp_ratio) diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_short_trade_dur.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_short_trade_dur.py index 3712fd9a6..12565f10e 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_short_trade_dur.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_short_trade_dur.py @@ -3,6 +3,7 @@ ShortTradeDurHyperOptLoss This module defines the default HyperoptLoss class which is being used for Hyperoptimization. """ + from math import exp from pandas import DataFrame @@ -32,8 +33,7 @@ class ShortTradeDurHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - *args, **kwargs) -> float: + def hyperopt_loss_function(results: DataFrame, trade_count: int, *args, **kwargs) -> float: """ Objective function, returns smaller number for better results This is the Default algorithm @@ -42,10 +42,10 @@ class ShortTradeDurHyperOptLoss(IHyperOptLoss): * 0.25: Avoiding trade loss * 1.0 to total profit, compared to the expected value (`EXPECTED_MAX_PROFIT`) defined above """ - total_profit = results['profit_ratio'].sum() - trade_duration = results['trade_duration'].mean() + total_profit = results["profit_ratio"].sum() + trade_duration = results["trade_duration"].mean() - trade_loss = 1 - 0.25 * exp(-(trade_count - TARGET_TRADES) ** 2 / 10 ** 5.8) + trade_loss = 1 - 0.25 * exp(-((trade_count - TARGET_TRADES) ** 2) / 10**5.8) profit_loss = max(0, 1 - total_profit / EXPECTED_MAX_PROFIT) duration_loss = 0.4 * min(trade_duration / MAX_ACCEPTED_TRADE_DURATION, 1) result = trade_loss + profit_loss + duration_loss diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino.py index a0122a0bf..32ff0c73f 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino.py @@ -4,6 +4,7 @@ SortinoHyperOptLoss This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + from datetime import datetime from pandas import DataFrame @@ -21,15 +22,21 @@ class SortinoHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - config: Config, *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + config: Config, + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for more optimal results. Uses Sortino Ratio calculation. """ - starting_balance = config['dry_run_wallet'] + starting_balance = config["dry_run_wallet"] sortino_ratio = calculate_sortino(results, min_date, max_date, starting_balance) # print(expected_returns_mean, down_stdev, sortino_ratio) return -sortino_ratio diff --git a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino_daily.py b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino_daily.py index 5beacc6fc..321b89dc2 100644 --- a/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino_daily.py +++ b/freqtrade/optimize/hyperopt_loss/hyperopt_loss_sortino_daily.py @@ -4,6 +4,7 @@ SortinoHyperOptLossDaily This module defines the alternative HyperOptLoss class which can be used for Hyperoptimization. """ + import math from datetime import datetime @@ -20,9 +21,14 @@ class SortinoHyperOptLossDaily(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for more optimal results. @@ -31,30 +37,32 @@ class SortinoHyperOptLossDaily(IHyperOptLoss): Sortino Ratio calculated as described in http://www.redrockcapital.com/Sortino__A__Sharper__Ratio_Red_Rock_Capital.pdf """ - resample_freq = '1D' + resample_freq = "1D" slippage_per_trade_ratio = 0.0005 days_in_year = 365 minimum_acceptable_return = 0.0 # apply slippage per trade to profit_ratio - results.loc[:, 'profit_ratio_after_slippage'] = \ - results['profit_ratio'] - slippage_per_trade_ratio + results.loc[:, "profit_ratio_after_slippage"] = ( + results["profit_ratio"] - slippage_per_trade_ratio + ) # create the index within the min_date and end max_date - t_index = date_range(start=min_date, end=max_date, freq=resample_freq, - normalize=True) + t_index = date_range(start=min_date, end=max_date, freq=resample_freq, normalize=True) sum_daily = ( - results.resample(resample_freq, on='close_date').agg( - {"profit_ratio_after_slippage": 'sum'}).reindex(t_index).fillna(0) + results.resample(resample_freq, on="close_date") + .agg({"profit_ratio_after_slippage": "sum"}) + .reindex(t_index) + .fillna(0) ) total_profit = sum_daily["profit_ratio_after_slippage"] - minimum_acceptable_return expected_returns_mean = total_profit.mean() - sum_daily['downside_returns'] = 0.0 - sum_daily.loc[total_profit < 0, 'downside_returns'] = total_profit - total_downside = sum_daily['downside_returns'] + sum_daily["downside_returns"] = 0.0 + sum_daily.loc[total_profit < 0, "downside_returns"] = total_profit + total_downside = sum_daily["downside_returns"] # Here total_downside contains min(0, P - MAR) values, # where P = sum_daily["profit_ratio_after_slippage"] down_stdev = math.sqrt((total_downside**2).sum() / len(total_downside)) @@ -63,7 +71,7 @@ class SortinoHyperOptLossDaily(IHyperOptLoss): sortino_ratio = expected_returns_mean / down_stdev * math.sqrt(days_in_year) else: # Define high (negative) sortino ratio to be clear that this is NOT optimal. - sortino_ratio = -20. + sortino_ratio = -20.0 # print(t_index, sum_daily, total_profit) # print(minimum_acceptable_return, expected_returns_mean, down_stdev, sortino_ratio) diff --git a/freqtrade/optimize/hyperopt_loss_interface.py b/freqtrade/optimize/hyperopt_loss_interface.py index d7b30dfd3..39457b753 100644 --- a/freqtrade/optimize/hyperopt_loss_interface.py +++ b/freqtrade/optimize/hyperopt_loss_interface.py @@ -17,15 +17,22 @@ class IHyperOptLoss(ABC): Interface for freqtrade hyperopt Loss functions. Defines the custom loss function (`hyperopt_loss_function()` which is evaluated every epoch.) """ + timeframe: str @staticmethod @abstractmethod - def hyperopt_loss_function(*, results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - config: Config, processed: Dict[str, DataFrame], - backtest_stats: Dict[str, Any], - **kwargs) -> float: + def hyperopt_loss_function( + *, + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + config: Config, + processed: Dict[str, DataFrame], + backtest_stats: Dict[str, Any], + **kwargs, + ) -> float: """ Objective function, returns smaller number for better results """ diff --git a/freqtrade/optimize/hyperopt_tools.py b/freqtrade/optimize/hyperopt_tools.py index 5a09d92b5..50c55c43d 100644 --- a/freqtrade/optimize/hyperopt_tools.py +++ b/freqtrade/optimize/hyperopt_tools.py @@ -37,7 +37,8 @@ def hyperopt_serializer(x): class HyperoptStateContainer: - """ Singleton class to track state of hyperopt""" + """Singleton class to track state of hyperopt""" + state: HyperoptState = HyperoptState.OPTIMIZE @classmethod @@ -46,20 +47,21 @@ class HyperoptStateContainer: class HyperoptTools: - @staticmethod def get_strategy_filename(config: Config, strategy_name: str) -> Optional[Path]: """ Get Strategy-location (filename) from strategy_name """ from freqtrade.resolvers.strategy_resolver import StrategyResolver + strategy_objs = StrategyResolver.search_all_objects( - config, False, config.get('recursive_strategy_search', False)) - strategies = [s for s in strategy_objs if s['name'] == strategy_name] + config, False, config.get("recursive_strategy_search", False) + ) + strategies = [s for s in strategy_objs if s["name"] == strategy_name] if strategies: strategy = strategies[0] - return Path(strategy['location']) + return Path(strategy["location"]) return None @staticmethod @@ -67,37 +69,40 @@ class HyperoptTools: """ Generate files """ - final_params = deepcopy(params['params_not_optimized']) - final_params = deep_merge_dicts(params['params_details'], final_params) + final_params = deepcopy(params["params_not_optimized"]) + final_params = deep_merge_dicts(params["params_details"], final_params) final_params = { - 'strategy_name': strategy_name, - 'params': final_params, - 'ft_stratparam_v': 1, - 'export_time': datetime.now(timezone.utc), + "strategy_name": strategy_name, + "params": final_params, + "ft_stratparam_v": 1, + "export_time": datetime.now(timezone.utc), } logger.info(f"Dumping parameters to {filename}") - with filename.open('w') as f: - rapidjson.dump(final_params, f, indent=2, - default=hyperopt_serializer, - number_mode=HYPER_PARAMS_FILE_FORMAT - ) + with filename.open("w") as f: + rapidjson.dump( + final_params, + f, + indent=2, + default=hyperopt_serializer, + number_mode=HYPER_PARAMS_FILE_FORMAT, + ) @staticmethod def load_params(filename: Path) -> Dict: """ Load parameters from file """ - with filename.open('r') as f: + with filename.open("r") as f: params = rapidjson.load(f, number_mode=HYPER_PARAMS_FILE_FORMAT) return params @staticmethod def try_export_params(config: Config, strategy_name: str, params: Dict): - if params.get(FTHYPT_FILEVERSION, 1) >= 2 and not config.get('disableparamexport', False): + if params.get(FTHYPT_FILEVERSION, 1) >= 2 and not config.get("disableparamexport", False): # Export parameters ... fn = HyperoptTools.get_strategy_filename(config, strategy_name) if fn: - HyperoptTools.export_params(params, strategy_name, fn.with_suffix('.json')) + HyperoptTools.export_params(params, strategy_name, fn.with_suffix(".json")) else: logger.warning("Strategy not found, not exporting parameter file.") @@ -107,10 +112,10 @@ class HyperoptTools: Tell if the space value is contained in the configuration """ # 'trailing' and 'protection spaces are not included in the 'default' set of spaces - if space in ('trailing', 'protection', 'trades'): - return any(s in config['spaces'] for s in [space, 'all']) + if space in ("trailing", "protection", "trades"): + return any(s in config["spaces"] for s in [space, "all"]) else: - return any(s in config['spaces'] for s in [space, 'all', 'default']) + return any(s in config["spaces"] for s in [space, "all", "default"]) @staticmethod def _read_results(results_file: Path, batch_size: int = 10) -> Iterator[List[Any]]: @@ -118,8 +123,9 @@ class HyperoptTools: Stream hyperopt results from file """ import rapidjson + logger.info(f"Reading epochs from '{results_file}'") - with results_file.open('r') as f: + with results_file.open("r") as f: data = [] for line in f: data += [rapidjson.loads(line)] @@ -131,7 +137,7 @@ class HyperoptTools: @staticmethod def _test_hyperopt_results_exist(results_file) -> bool: if results_file.is_file() and results_file.stat().st_size > 0: - if results_file.suffix == '.pickle': + if results_file.suffix == ".pickle": raise OperationalException( "Legacy hyperopt results are no longer supported." "Please rerun hyperopt or use an older version to load this file." @@ -144,18 +150,18 @@ class HyperoptTools: @staticmethod def load_filtered_results(results_file: Path, config: Config) -> Tuple[List, int]: filteroptions = { - 'only_best': config.get('hyperopt_list_best', False), - 'only_profitable': config.get('hyperopt_list_profitable', False), - 'filter_min_trades': config.get('hyperopt_list_min_trades', 0), - 'filter_max_trades': config.get('hyperopt_list_max_trades', 0), - 'filter_min_avg_time': config.get('hyperopt_list_min_avg_time'), - 'filter_max_avg_time': config.get('hyperopt_list_max_avg_time'), - 'filter_min_avg_profit': config.get('hyperopt_list_min_avg_profit'), - 'filter_max_avg_profit': config.get('hyperopt_list_max_avg_profit'), - 'filter_min_total_profit': config.get('hyperopt_list_min_total_profit'), - 'filter_max_total_profit': config.get('hyperopt_list_max_total_profit'), - 'filter_min_objective': config.get('hyperopt_list_min_objective'), - 'filter_max_objective': config.get('hyperopt_list_max_objective'), + "only_best": config.get("hyperopt_list_best", False), + "only_profitable": config.get("hyperopt_list_profitable", False), + "filter_min_trades": config.get("hyperopt_list_min_trades", 0), + "filter_max_trades": config.get("hyperopt_list_max_trades", 0), + "filter_min_avg_time": config.get("hyperopt_list_min_avg_time"), + "filter_max_avg_time": config.get("hyperopt_list_max_avg_time"), + "filter_min_avg_profit": config.get("hyperopt_list_min_avg_profit"), + "filter_max_avg_profit": config.get("hyperopt_list_max_avg_profit"), + "filter_min_total_profit": config.get("hyperopt_list_min_total_profit"), + "filter_max_total_profit": config.get("hyperopt_list_max_total_profit"), + "filter_min_objective": config.get("hyperopt_list_min_objective"), + "filter_max_objective": config.get("hyperopt_list_max_objective"), } if not HyperoptTools._test_hyperopt_results_exist(results_file): # No file found. @@ -165,10 +171,11 @@ class HyperoptTools: epochs = [] total_epochs = 0 for epochs_tmp in HyperoptTools._read_results(results_file): - if total_epochs == 0 and epochs_tmp[0].get('is_best') is None: + if total_epochs == 0 and epochs_tmp[0].get("is_best") is None: raise OperationalException( "The file with HyperoptTools results is incompatible with this version " - "of Freqtrade and cannot be loaded.") + "of Freqtrade and cannot be loaded." + ) total_epochs += len(epochs_tmp) epochs += hyperopt_filter_epochs(epochs_tmp, filteroptions, log=False) @@ -180,13 +187,18 @@ class HyperoptTools: return epochs, total_epochs @staticmethod - def show_epoch_details(results, total_epochs: int, print_json: bool, - no_header: bool = False, header_str: Optional[str] = None) -> None: + def show_epoch_details( + results, + total_epochs: int, + print_json: bool, + no_header: bool = False, + header_str: Optional[str] = None, + ) -> None: """ Display details of the hyperopt result """ - params = results.get('params_details', {}) - non_optimized = results.get('params_not_optimized', {}) + params = results.get("params_details", {}) + non_optimized = results.get("params_not_optimized", {}) # Default header string if header_str is None: @@ -198,23 +210,34 @@ class HyperoptTools: if print_json: result_dict: Dict = {} - for s in ['buy', 'sell', 'protection', - 'roi', 'stoploss', 'trailing', 'max_open_trades']: + for s in [ + "buy", + "sell", + "protection", + "roi", + "stoploss", + "trailing", + "max_open_trades", + ]: HyperoptTools._params_update_for_json(result_dict, params, non_optimized, s) print(rapidjson.dumps(result_dict, default=str, number_mode=HYPER_PARAMS_FILE_FORMAT)) else: - HyperoptTools._params_pretty_print(params, 'buy', "Buy hyperspace params:", - non_optimized) - HyperoptTools._params_pretty_print(params, 'sell', "Sell hyperspace params:", - non_optimized) - HyperoptTools._params_pretty_print(params, 'protection', - "Protection hyperspace params:", non_optimized) - HyperoptTools._params_pretty_print(params, 'roi', "ROI table:", non_optimized) - HyperoptTools._params_pretty_print(params, 'stoploss', "Stoploss:", non_optimized) - HyperoptTools._params_pretty_print(params, 'trailing', "Trailing stop:", non_optimized) HyperoptTools._params_pretty_print( - params, 'max_open_trades', "Max Open Trades:", non_optimized) + params, "buy", "Buy hyperspace params:", non_optimized + ) + HyperoptTools._params_pretty_print( + params, "sell", "Sell hyperspace params:", non_optimized + ) + HyperoptTools._params_pretty_print( + params, "protection", "Protection hyperspace params:", non_optimized + ) + HyperoptTools._params_pretty_print(params, "roi", "ROI table:", non_optimized) + HyperoptTools._params_pretty_print(params, "stoploss", "Stoploss:", non_optimized) + HyperoptTools._params_pretty_print(params, "trailing", "Trailing stop:", non_optimized) + HyperoptTools._params_pretty_print( + params, "max_open_trades", "Max Open Trades:", non_optimized + ) @staticmethod def _params_update_for_json(result_dict, params, non_optimized, space: str) -> None: @@ -227,23 +250,23 @@ class HyperoptTools: if len(space_non_optimized) > 0: all_space_params = {**space_params, **space_non_optimized} - if space in ['buy', 'sell']: - result_dict.setdefault('params', {}).update(all_space_params) - elif space == 'roi': + if space in ["buy", "sell"]: + result_dict.setdefault("params", {}).update(all_space_params) + elif space == "roi": # Convert keys in min_roi dict to strings because # rapidjson cannot dump dicts with integer keys... - result_dict['minimal_roi'] = {str(k): v for k, v in all_space_params.items()} + result_dict["minimal_roi"] = {str(k): v for k, v in all_space_params.items()} else: # 'stoploss', 'trailing' result_dict.update(all_space_params) @staticmethod def _params_pretty_print( - params, space: str, header: str, non_optimized: Optional[Dict] = None) -> None: - + params, space: str, header: str, non_optimized: Optional[Dict] = None + ) -> None: if space in params or (non_optimized and space in non_optimized): space_params = HyperoptTools._space_params(params, space, 5) no_params = HyperoptTools._space_params(non_optimized, space, 5) - appendix = '' + appendix = "" if not space_params and not no_params: # No parameters - don't print return @@ -254,15 +277,18 @@ class HyperoptTools: result = f"\n# {header}\n" if space == "stoploss": stoploss = safe_value_fallback2(space_params, no_params, space, space) - result += (f"stoploss = {stoploss}{appendix}") + result += f"stoploss = {stoploss}{appendix}" elif space == "max_open_trades": max_open_trades = safe_value_fallback2(space_params, no_params, space, space) - result += (f"max_open_trades = {max_open_trades}{appendix}") + result += f"max_open_trades = {max_open_trades}{appendix}" elif space == "roi": - result = result[:-1] + f'{appendix}\n' - minimal_roi_result = rapidjson.dumps({ - str(k): v for k, v in (space_params or no_params).items() - }, default=str, indent=4, number_mode=rapidjson.NM_NATIVE) + result = result[:-1] + f"{appendix}\n" + minimal_roi_result = rapidjson.dumps( + {str(k): v for k, v in (space_params or no_params).items()}, + default=str, + indent=4, + number_mode=rapidjson.NM_NATIVE, + ) result += f"minimal_roi = {minimal_roi_result}" elif space == "trailing": for k, v in (space_params or no_params).items(): @@ -291,177 +317,212 @@ class HyperoptTools: """ p = params.copy() p.update(non_optimized) - result = '{\n' + result = "{\n" for k, param in p.items(): result += " " * indent + f'"{k}": ' - result += f'"{param}",' if isinstance(param, str) else f'{param},' + result += f'"{param}",' if isinstance(param, str) else f"{param}," if k in non_optimized: result += NON_OPT_PARAM_APPENDIX result += "\n" - result += '}' + result += "}" return result @staticmethod def is_best_loss(results, current_best_loss: float) -> bool: - return bool(results['loss'] < current_best_loss) + return bool(results["loss"] < current_best_loss) @staticmethod def format_results_explanation_string(results_metrics: Dict, stake_currency: str) -> str: """ Return the formatted results explanation in a string """ - return (f"{results_metrics['total_trades']:6d} trades. " - f"{results_metrics['wins']}/{results_metrics['draws']}" - f"/{results_metrics['losses']} Wins/Draws/Losses. " - f"Avg profit {results_metrics['profit_mean']:7.2%}. " - f"Median profit {results_metrics['profit_median']:7.2%}. " - f"Total profit {results_metrics['profit_total_abs']:11.8f} {stake_currency} " - f"({results_metrics['profit_total']:8.2%}). " - f"Avg duration {results_metrics['holding_avg']} min." - ) + return ( + f"{results_metrics['total_trades']:6d} trades. " + f"{results_metrics['wins']}/{results_metrics['draws']}" + f"/{results_metrics['losses']} Wins/Draws/Losses. " + f"Avg profit {results_metrics['profit_mean']:7.2%}. " + f"Median profit {results_metrics['profit_median']:7.2%}. " + f"Total profit {results_metrics['profit_total_abs']:11.8f} {stake_currency} " + f"({results_metrics['profit_total']:8.2%}). " + f"Avg duration {results_metrics['holding_avg']} min." + ) @staticmethod def _format_explanation_string(results, total_epochs) -> str: - return (("*" if results['is_initial_point'] else " ") + - f"{results['current_epoch']:5d}/{total_epochs}: " + - f"{results['results_explanation']} " + - f"Objective: {results['loss']:.5f}") + return ( + ("*" if results["is_initial_point"] else " ") + + f"{results['current_epoch']:5d}/{total_epochs}: " + + f"{results['results_explanation']} " + + f"Objective: {results['loss']:.5f}" + ) @staticmethod - def prepare_trials_columns(trials: pd.DataFrame, has_drawdown: bool) -> pd.DataFrame: - trials['Best'] = '' + def prepare_trials_columns(trials: pd.DataFrame) -> pd.DataFrame: + trials["Best"] = "" - if 'results_metrics.winsdrawslosses' not in trials.columns: + if "results_metrics.winsdrawslosses" not in trials.columns: # Ensure compatibility with older versions of hyperopt results - trials['results_metrics.winsdrawslosses'] = 'N/A' + trials["results_metrics.winsdrawslosses"] = "N/A" - if not has_drawdown: + has_account_drawdown = "results_metrics.max_drawdown_account" in trials.columns + if not has_account_drawdown: # Ensure compatibility with older versions of hyperopt results - trials['results_metrics.max_drawdown_account'] = None - if 'is_random' not in trials.columns: - trials['is_random'] = False + trials["results_metrics.max_drawdown_account"] = None + if "is_random" not in trials.columns: + trials["is_random"] = False # New mode, using backtest result for metrics - trials['results_metrics.winsdrawslosses'] = trials.apply( + trials["results_metrics.winsdrawslosses"] = trials.apply( lambda x: generate_wins_draws_losses( - x['results_metrics.wins'], x['results_metrics.draws'], - x['results_metrics.losses'] - ), axis=1) + x["results_metrics.wins"], x["results_metrics.draws"], x["results_metrics.losses"] + ), + axis=1, + ) - trials = trials[['Best', 'current_epoch', 'results_metrics.total_trades', - 'results_metrics.winsdrawslosses', - 'results_metrics.profit_mean', 'results_metrics.profit_total_abs', - 'results_metrics.profit_total', 'results_metrics.holding_avg', - 'results_metrics.max_drawdown', - 'results_metrics.max_drawdown_account', 'results_metrics.max_drawdown_abs', - 'loss', 'is_initial_point', 'is_random', 'is_best']] + trials = trials[ + [ + "Best", + "current_epoch", + "results_metrics.total_trades", + "results_metrics.winsdrawslosses", + "results_metrics.profit_mean", + "results_metrics.profit_total_abs", + "results_metrics.profit_total", + "results_metrics.holding_avg", + "results_metrics.max_drawdown_account", + "results_metrics.max_drawdown_abs", + "loss", + "is_initial_point", + "is_random", + "is_best", + ] + ] trials.columns = [ - 'Best', 'Epoch', 'Trades', ' Win Draw Loss Win%', 'Avg profit', - 'Total profit', 'Profit', 'Avg duration', 'max_drawdown', 'max_drawdown_account', - 'max_drawdown_abs', 'Objective', 'is_initial_point', 'is_random', 'is_best' - ] + "Best", + "Epoch", + "Trades", + " Win Draw Loss Win%", + "Avg profit", + "Total profit", + "Profit", + "Avg duration", + "max_drawdown_account", + "max_drawdown_abs", + "Objective", + "is_initial_point", + "is_random", + "is_best", + ] return trials @staticmethod - def get_result_table(config: Config, results: list, total_epochs: int, highlight_best: bool, - print_colorized: bool, remove_header: int) -> str: + def get_result_table( + config: Config, + results: list, + total_epochs: int, + highlight_best: bool, + print_colorized: bool, + remove_header: int, + ) -> str: """ Log result table """ if not results: - return '' + return "" tabulate.PRESERVE_WHITESPACE = True trials = json_normalize(results, max_level=1) - has_account_drawdown = 'results_metrics.max_drawdown_account' in trials.columns + trials = HyperoptTools.prepare_trials_columns(trials) - trials = HyperoptTools.prepare_trials_columns(trials, has_account_drawdown) - - trials['is_profit'] = False - trials.loc[trials['is_initial_point'] | trials['is_random'], 'Best'] = '* ' - trials.loc[trials['is_best'], 'Best'] = 'Best' + trials["is_profit"] = False + trials.loc[trials["is_initial_point"] | trials["is_random"], "Best"] = "* " + trials.loc[trials["is_best"], "Best"] = "Best" trials.loc[ - (trials['is_initial_point'] | trials['is_random']) & trials['is_best'], - 'Best'] = '* Best' - trials.loc[trials['Total profit'] > 0, 'is_profit'] = True - trials['Trades'] = trials['Trades'].astype(str) + (trials["is_initial_point"] | trials["is_random"]) & trials["is_best"], "Best" + ] = "* Best" + trials.loc[trials["Total profit"] > 0, "is_profit"] = True + trials["Trades"] = trials["Trades"].astype(str) # perc_multi = 1 if legacy_mode else 100 - trials['Epoch'] = trials['Epoch'].apply( - lambda x: '{}/{}'.format(str(x).rjust(len(str(total_epochs)), ' '), total_epochs) + trials["Epoch"] = trials["Epoch"].apply( + lambda x: "{}/{}".format(str(x).rjust(len(str(total_epochs)), " "), total_epochs) ) - trials['Avg profit'] = trials['Avg profit'].apply( - lambda x: f'{x:,.2%}'.rjust(7, ' ') if not isna(x) else "--".rjust(7, ' ') + trials["Avg profit"] = trials["Avg profit"].apply( + lambda x: f"{x:,.2%}".rjust(7, " ") if not isna(x) else "--".rjust(7, " ") ) - trials['Avg duration'] = trials['Avg duration'].apply( - lambda x: f'{x:,.1f} m'.rjust(7, ' ') if isinstance(x, float) else f"{x}" - if not isna(x) else "--".rjust(7, ' ') + trials["Avg duration"] = trials["Avg duration"].apply( + lambda x: ( + f"{x:,.1f} m".rjust(7, " ") + if isinstance(x, float) + else f"{x}" + if not isna(x) + else "--".rjust(7, " ") + ) ) - trials['Objective'] = trials['Objective'].apply( - lambda x: f'{x:,.5f}'.rjust(8, ' ') if x != 100000 else "N/A".rjust(8, ' ') + trials["Objective"] = trials["Objective"].apply( + lambda x: f"{x:,.5f}".rjust(8, " ") if x != 100000 else "N/A".rjust(8, " ") ) - stake_currency = config['stake_currency'] + stake_currency = config["stake_currency"] - trials[f"Max Drawdown{' (Acct)' if has_account_drawdown else ''}"] = trials.apply( - lambda x: "{} {}".format( - fmt_coin(x['max_drawdown_abs'], stake_currency, keep_trailing_zeros=True), - (f"({x['max_drawdown_account']:,.2%})" - if has_account_drawdown - else f"({x['max_drawdown']:,.2%})" - ).rjust(10, ' ') - ).rjust(25 + len(stake_currency)) - if x['max_drawdown'] != 0.0 or x['max_drawdown_account'] != 0.0 - else '--'.rjust(25 + len(stake_currency)), - axis=1 + trials["Max Drawdown (Acct)"] = trials.apply( + lambda x: ( + "{} {}".format( + fmt_coin(x["max_drawdown_abs"], stake_currency, keep_trailing_zeros=True), + (f"({x['max_drawdown_account']:,.2%})").rjust(10, " "), + ).rjust(25 + len(stake_currency)) + if x["max_drawdown_account"] != 0.0 + else "--".rjust(25 + len(stake_currency)) + ), + axis=1, ) - trials = trials.drop(columns=['max_drawdown_abs', 'max_drawdown', 'max_drawdown_account']) + trials = trials.drop(columns=["max_drawdown_abs", "max_drawdown_account"]) - trials['Profit'] = trials.apply( - lambda x: '{} {}'.format( - fmt_coin(x['Total profit'], stake_currency, keep_trailing_zeros=True), - f"({x['Profit']:,.2%})".rjust(10, ' ') - ).rjust(25 + len(stake_currency)) - if x['Total profit'] != 0.0 else '--'.rjust(25 + len(stake_currency)), - axis=1 + trials["Profit"] = trials.apply( + lambda x: ( + "{} {}".format( + fmt_coin(x["Total profit"], stake_currency, keep_trailing_zeros=True), + f"({x['Profit']:,.2%})".rjust(10, " "), + ).rjust(25 + len(stake_currency)) + if x["Total profit"] != 0.0 + else "--".rjust(25 + len(stake_currency)) + ), + axis=1, ) - trials = trials.drop(columns=['Total profit']) + trials = trials.drop(columns=["Total profit"]) if print_colorized: trials2 = trials.astype(str) for i in range(len(trials)): - if trials.loc[i]['is_profit']: + if trials.loc[i]["is_profit"]: for j in range(len(trials.loc[i]) - 3): trials2.iat[i, j] = f"{Fore.GREEN}{str(trials.iloc[i, j])}{Fore.RESET}" - if trials.loc[i]['is_best'] and highlight_best: + if trials.loc[i]["is_best"] and highlight_best: for j in range(len(trials.loc[i]) - 3): trials2.iat[i, j] = ( f"{Style.BRIGHT}{str(trials.iloc[i, j])}{Style.RESET_ALL}" ) trials = trials2 del trials2 - trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit', 'is_random']) + trials = trials.drop(columns=["is_initial_point", "is_best", "is_profit", "is_random"]) if remove_header > 0: table = tabulate.tabulate( - trials.to_dict(orient='list'), tablefmt='orgtbl', - headers='keys', stralign="right" + trials.to_dict(orient="list"), tablefmt="orgtbl", headers="keys", stralign="right" ) table = table.split("\n", remove_header)[remove_header] elif remove_header < 0: table = tabulate.tabulate( - trials.to_dict(orient='list'), tablefmt='psql', - headers='keys', stralign="right" + trials.to_dict(orient="list"), tablefmt="psql", headers="keys", stralign="right" ) table = "\n".join(table.split("\n")[0:remove_header]) else: table = tabulate.tabulate( - trials.to_dict(orient='list'), tablefmt='psql', - headers='keys', stralign="right" + trials.to_dict(orient="list"), tablefmt="psql", headers="keys", stralign="right" ) return table @@ -479,56 +540,75 @@ class HyperoptTools: return try: - Path(csv_file).open('w+').close() + Path(csv_file).open("w+").close() except OSError: logger.error(f"Failed to create CSV file: {csv_file}") return trials = json_normalize(results, max_level=1) - trials['Best'] = '' - trials['Stake currency'] = config['stake_currency'] + trials["Best"] = "" + trials["Stake currency"] = config["stake_currency"] - base_metrics = ['Best', 'current_epoch', 'results_metrics.total_trades', - 'results_metrics.profit_mean', 'results_metrics.profit_median', - 'results_metrics.profit_total', 'Stake currency', - 'results_metrics.profit_total_abs', 'results_metrics.holding_avg', - 'results_metrics.trade_count_long', 'results_metrics.trade_count_short', - 'loss', 'is_initial_point', 'is_best'] + base_metrics = [ + "Best", + "current_epoch", + "results_metrics.total_trades", + "results_metrics.profit_mean", + "results_metrics.profit_median", + "results_metrics.profit_total", + "Stake currency", + "results_metrics.profit_total_abs", + "results_metrics.holding_avg", + "results_metrics.trade_count_long", + "results_metrics.trade_count_short", + "loss", + "is_initial_point", + "is_best", + ] perc_multi = 100 - param_metrics = [("params_dict." + param) for param in results[0]['params_dict'].keys()] + param_metrics = [("params_dict." + param) for param in results[0]["params_dict"].keys()] trials = trials[base_metrics + param_metrics] - base_columns = ['Best', 'Epoch', 'Trades', 'Avg profit', 'Median profit', 'Total profit', - 'Stake currency', 'Profit', 'Avg duration', - 'Trade count long', 'Trade count short', - 'Objective', - 'is_initial_point', 'is_best'] - param_columns = list(results[0]['params_dict'].keys()) + base_columns = [ + "Best", + "Epoch", + "Trades", + "Avg profit", + "Median profit", + "Total profit", + "Stake currency", + "Profit", + "Avg duration", + "Trade count long", + "Trade count short", + "Objective", + "is_initial_point", + "is_best", + ] + param_columns = list(results[0]["params_dict"].keys()) trials.columns = base_columns + param_columns - trials['is_profit'] = False - trials.loc[trials['is_initial_point'], 'Best'] = '*' - trials.loc[trials['is_best'], 'Best'] = 'Best' - trials.loc[trials['is_initial_point'] & trials['is_best'], 'Best'] = '* Best' - trials.loc[trials['Total profit'] > 0, 'is_profit'] = True - trials['Epoch'] = trials['Epoch'].astype(str) - trials['Trades'] = trials['Trades'].astype(str) - trials['Median profit'] = trials['Median profit'] * perc_multi + trials["is_profit"] = False + trials.loc[trials["is_initial_point"], "Best"] = "*" + trials.loc[trials["is_best"], "Best"] = "Best" + trials.loc[trials["is_initial_point"] & trials["is_best"], "Best"] = "* Best" + trials.loc[trials["Total profit"] > 0, "is_profit"] = True + trials["Epoch"] = trials["Epoch"].astype(str) + trials["Trades"] = trials["Trades"].astype(str) + trials["Median profit"] = trials["Median profit"] * perc_multi - trials['Total profit'] = trials['Total profit'].apply( - lambda x: f'{x:,.8f}' if x != 0.0 else "" + trials["Total profit"] = trials["Total profit"].apply( + lambda x: f"{x:,.8f}" if x != 0.0 else "" ) - trials['Profit'] = trials['Profit'].apply( - lambda x: f'{x:,.2f}' if not isna(x) else "" + trials["Profit"] = trials["Profit"].apply(lambda x: f"{x:,.2f}" if not isna(x) else "") + trials["Avg profit"] = trials["Avg profit"].apply( + lambda x: f"{x * perc_multi:,.2f}%" if not isna(x) else "" ) - trials['Avg profit'] = trials['Avg profit'].apply( - lambda x: f'{x * perc_multi:,.2f}%' if not isna(x) else "" - ) - trials['Objective'] = trials['Objective'].apply( - lambda x: f'{x:,.5f}' if x != 100000 else "" + trials["Objective"] = trials["Objective"].apply( + lambda x: f"{x:,.5f}" if x != 100000 else "" ) - trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit']) - trials.to_csv(csv_file, index=False, header=True, mode='w', encoding='UTF-8') + trials = trials.drop(columns=["is_initial_point", "is_best", "is_profit"]) + trials.to_csv(csv_file, index=False, header=True, mode="w", encoding="UTF-8") logger.info(f"CSV file created: {csv_file}") diff --git a/freqtrade/optimize/optimize_reports/__init__.py b/freqtrade/optimize/optimize_reports/__init__.py index bb91bf33c..6f3278a1c 100644 --- a/freqtrade/optimize/optimize_reports/__init__.py +++ b/freqtrade/optimize/optimize_reports/__init__.py @@ -1,17 +1,30 @@ # flake8: noqa: F401 -from freqtrade.optimize.optimize_reports.bt_output import (generate_edge_table, - generate_wins_draws_losses, - show_backtest_result, - show_backtest_results, - show_sorted_pairlist, - text_table_add_metrics, - text_table_bt_results, - text_table_periodic_breakdown, - text_table_strategy, text_table_tags) -from freqtrade.optimize.optimize_reports.bt_storage import (store_backtest_analysis_results, - store_backtest_stats) +from freqtrade.optimize.optimize_reports.bt_output import ( + generate_edge_table, + generate_wins_draws_losses, + show_backtest_result, + show_backtest_results, + show_sorted_pairlist, + text_table_add_metrics, + text_table_bt_results, + text_table_periodic_breakdown, + text_table_strategy, + text_table_tags, +) +from freqtrade.optimize.optimize_reports.bt_storage import ( + store_backtest_analysis_results, + store_backtest_stats, +) from freqtrade.optimize.optimize_reports.optimize_reports import ( - generate_all_periodic_breakdown_stats, generate_backtest_stats, generate_daily_stats, - generate_pair_metrics, generate_periodic_breakdown_stats, generate_rejected_signals, - generate_strategy_comparison, generate_strategy_stats, generate_tag_metrics, - generate_trade_signal_candles, generate_trading_stats) + generate_all_periodic_breakdown_stats, + generate_backtest_stats, + generate_daily_stats, + generate_pair_metrics, + generate_periodic_breakdown_stats, + generate_rejected_signals, + generate_strategy_comparison, + generate_strategy_stats, + generate_tag_metrics, + generate_trade_signal_candles, + generate_trading_stats, +) diff --git a/freqtrade/optimize/optimize_reports/bt_output.py b/freqtrade/optimize/optimize_reports/bt_output.py index f90a35469..00e980e6b 100644 --- a/freqtrade/optimize/optimize_reports/bt_output.py +++ b/freqtrade/optimize/optimize_reports/bt_output.py @@ -16,28 +16,34 @@ def _get_line_floatfmt(stake_currency: str) -> List[str]: """ Generate floatformat (goes in line with _generate_result_line()) """ - return ['s', 'd', '.2f', f'.{decimals_per_coin(stake_currency)}f', - '.2f', 'd', 's', 's'] + return ["s", "d", ".2f", f".{decimals_per_coin(stake_currency)}f", ".2f", "d", "s", "s"] -def _get_line_header(first_column: str, stake_currency: str, - direction: str = 'Entries') -> List[str]: +def _get_line_header( + first_column: str, stake_currency: str, direction: str = "Entries" +) -> List[str]: """ Generate header lines (goes in line with _generate_result_line()) """ - return [first_column, direction, 'Avg Profit %', - f'Tot Profit {stake_currency}', 'Tot Profit %', 'Avg Duration', - 'Win Draw Loss Win%'] + return [ + first_column, + direction, + "Avg Profit %", + f"Tot Profit {stake_currency}", + "Tot Profit %", + "Avg Duration", + "Win Draw Loss Win%", + ] def generate_wins_draws_losses(wins, draws, losses): if wins > 0 and losses == 0: - wl_ratio = '100' + wl_ratio = "100" elif wins == 0: - wl_ratio = '0' + wl_ratio = "0" else: - wl_ratio = f'{100.0 / (wins + draws + losses) * wins:.1f}' if losses > 0 else '100' - return f'{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}' + wl_ratio = f"{100.0 / (wins + draws + losses) * wins:.1f}" if losses > 0 else "100" + return f"{wins:>4} {draws:>4} {losses:>4} {wl_ratio:>4}" def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: str) -> str: @@ -48,16 +54,22 @@ def text_table_bt_results(pair_results: List[Dict[str, Any]], stake_currency: st :return: pretty printed table with tabulate as string """ - headers = _get_line_header('Pair', stake_currency) + headers = _get_line_header("Pair", stake_currency) floatfmt = _get_line_floatfmt(stake_currency) - output = [[ - t['key'], t['trades'], t['profit_mean_pct'], t['profit_total_abs'], - t['profit_total_pct'], t['duration_avg'], - generate_wins_draws_losses(t['wins'], t['draws'], t['losses']) - ] for t in pair_results] + output = [ + [ + t["key"], + t["trades"], + t["profit_mean_pct"], + t["profit_total_abs"], + t["profit_total_pct"], + t["duration_avg"], + generate_wins_draws_losses(t["wins"], t["draws"], t["losses"]), + ] + for t in pair_results + ] # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + return tabulate(output, headers=headers, floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_currency: str) -> str: @@ -67,34 +79,37 @@ def text_table_tags(tag_type: str, tag_results: List[Dict[str, Any]], stake_curr :param stake_currency: stake-currency - used to correctly name headers :return: pretty printed table with tabulate as string """ - fallback: str = '' - if (tag_type == "enter_tag"): + fallback: str = "" + if tag_type == "enter_tag": headers = _get_line_header("TAG", stake_currency) else: - headers = _get_line_header("Exit Reason", stake_currency, 'Exits') - fallback = 'exit_reason' + headers = _get_line_header("Exit Reason", stake_currency, "Exits") + fallback = "exit_reason" floatfmt = _get_line_floatfmt(stake_currency) output = [ [ - t['key'] if t.get('key') is not None and len( - str(t['key'])) > 0 else t.get(fallback, "OTHER"), - t['trades'], - t['profit_mean_pct'], - t['profit_total_abs'], - t['profit_total_pct'], - t.get('duration_avg'), - generate_wins_draws_losses( - t['wins'], - t['draws'], - t['losses'])] for t in tag_results] + ( + t["key"] + if t.get("key") is not None and len(str(t["key"])) > 0 + else t.get(fallback, "OTHER") + ), + t["trades"], + t["profit_mean_pct"], + t["profit_total_abs"], + t["profit_total_pct"], + t.get("duration_avg"), + generate_wins_draws_losses(t["wins"], t["draws"], t["losses"]), + ] + for t in tag_results + ] # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + return tabulate(output, headers=headers, floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") -def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]], - stake_currency: str, period: str) -> str: +def text_table_periodic_breakdown( + days_breakdown_stats: List[Dict[str, Any]], stake_currency: str, period: str +) -> str: """ Generate small table with Backtest results by days :param days_breakdown_stats: Days breakdown metrics @@ -103,15 +118,21 @@ def text_table_periodic_breakdown(days_breakdown_stats: List[Dict[str, Any]], """ headers = [ period.capitalize(), - f'Tot Profit {stake_currency}', - 'Wins', - 'Draws', - 'Losses', + f"Tot Profit {stake_currency}", + "Wins", + "Draws", + "Losses", + ] + output = [ + [ + d["date"], + fmt_coin(d["profit_abs"], stake_currency, False), + d["wins"], + d["draws"], + d["loses"], + ] + for d in days_breakdown_stats ] - output = [[ - d['date'], fmt_coin(d['profit_abs'], stake_currency, False), - d['wins'], d['draws'], d['loses'], - ] for d in days_breakdown_stats] return tabulate(output, headers=headers, tablefmt="orgtbl", stralign="right") @@ -123,264 +144,361 @@ def text_table_strategy(strategy_results, stake_currency: str) -> str: :return: pretty printed table with tabulate as string """ floatfmt = _get_line_floatfmt(stake_currency) - headers = _get_line_header('Strategy', stake_currency) + headers = _get_line_header("Strategy", stake_currency) # _get_line_header() is also used for per-pair summary. Per-pair drawdown is mostly useless # therefore we slip this column in only for strategy summary here. - headers.append('Drawdown') + headers.append("Drawdown") # Align drawdown string on the center two space separator. - if 'max_drawdown_account' in strategy_results[0]: + if "max_drawdown_account" in strategy_results[0]: drawdown = [f'{t["max_drawdown_account"] * 100:.2f}' for t in strategy_results] else: # Support for prior backtest results drawdown = [f'{t["max_drawdown_per"]:.2f}' for t in strategy_results] - dd_pad_abs = max([len(t['max_drawdown_abs']) for t in strategy_results]) + dd_pad_abs = max([len(t["max_drawdown_abs"]) for t in strategy_results]) dd_pad_per = max([len(dd) for dd in drawdown]) - drawdown = [f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%' - for t, dd in zip(strategy_results, drawdown)] + drawdown = [ + f'{t["max_drawdown_abs"]:>{dd_pad_abs}} {stake_currency} {dd:>{dd_pad_per}}%' + for t, dd in zip(strategy_results, drawdown) + ] - output = [[ - t['key'], t['trades'], t['profit_mean_pct'], t['profit_total_abs'], - t['profit_total_pct'], t['duration_avg'], - generate_wins_draws_losses(t['wins'], t['draws'], t['losses']), drawdown] - for t, drawdown in zip(strategy_results, drawdown)] + output = [ + [ + t["key"], + t["trades"], + t["profit_mean_pct"], + t["profit_total_abs"], + t["profit_total_pct"], + t["duration_avg"], + generate_wins_draws_losses(t["wins"], t["draws"], t["losses"]), + drawdown, + ] + for t, drawdown in zip(strategy_results, drawdown) + ] # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(output, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + return tabulate(output, headers=headers, floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") def text_table_add_metrics(strat_results: Dict) -> str: - if len(strat_results['trades']) > 0: - best_trade = max(strat_results['trades'], key=lambda x: x['profit_ratio']) - worst_trade = min(strat_results['trades'], key=lambda x: x['profit_ratio']) + if len(strat_results["trades"]) > 0: + best_trade = max(strat_results["trades"], key=lambda x: x["profit_ratio"]) + worst_trade = min(strat_results["trades"], key=lambda x: x["profit_ratio"]) - short_metrics = [ - ('', ''), # Empty line to improve readability - ('Long / Short', - f"{strat_results.get('trade_count_long', 'total_trades')} / " - f"{strat_results.get('trade_count_short', 0)}"), - ('Total profit Long %', f"{strat_results['profit_total_long']:.2%}"), - ('Total profit Short %', f"{strat_results['profit_total_short']:.2%}"), - ('Absolute profit Long', fmt_coin(strat_results['profit_total_long_abs'], - strat_results['stake_currency'])), - ('Absolute profit Short', fmt_coin(strat_results['profit_total_short_abs'], - strat_results['stake_currency'])), - ] if strat_results.get('trade_count_short', 0) > 0 else [] + short_metrics = ( + [ + ("", ""), # Empty line to improve readability + ( + "Long / Short", + f"{strat_results.get('trade_count_long', 'total_trades')} / " + f"{strat_results.get('trade_count_short', 0)}", + ), + ("Total profit Long %", f"{strat_results['profit_total_long']:.2%}"), + ("Total profit Short %", f"{strat_results['profit_total_short']:.2%}"), + ( + "Absolute profit Long", + fmt_coin( + strat_results["profit_total_long_abs"], strat_results["stake_currency"] + ), + ), + ( + "Absolute profit Short", + fmt_coin( + strat_results["profit_total_short_abs"], strat_results["stake_currency"] + ), + ), + ] + if strat_results.get("trade_count_short", 0) > 0 + else [] + ) drawdown_metrics = [] - if 'max_relative_drawdown' in strat_results: + if "max_relative_drawdown" in strat_results: # Compatibility to show old hyperopt results drawdown_metrics.append( - ('Max % of account underwater', f"{strat_results['max_relative_drawdown']:.2%}") + ("Max % of account underwater", f"{strat_results['max_relative_drawdown']:.2%}") ) - drawdown_metrics.extend([ - ('Absolute Drawdown (Account)', f"{strat_results['max_drawdown_account']:.2%}") - if 'max_drawdown_account' in strat_results else ( - 'Drawdown', f"{strat_results['max_drawdown']:.2%}"), - ('Absolute Drawdown', fmt_coin(strat_results['max_drawdown_abs'], - strat_results['stake_currency'])), - ('Drawdown high', fmt_coin(strat_results['max_drawdown_high'], - strat_results['stake_currency'])), - ('Drawdown low', fmt_coin(strat_results['max_drawdown_low'], - strat_results['stake_currency'])), - ('Drawdown Start', strat_results['drawdown_start']), - ('Drawdown End', strat_results['drawdown_end']), - ]) + drawdown_metrics.extend( + [ + ( + ("Absolute Drawdown (Account)", f"{strat_results['max_drawdown_account']:.2%}") + if "max_drawdown_account" in strat_results + else ("Drawdown", f"{strat_results['max_drawdown']:.2%}") + ), + ( + "Absolute Drawdown", + fmt_coin(strat_results["max_drawdown_abs"], strat_results["stake_currency"]), + ), + ( + "Drawdown high", + fmt_coin(strat_results["max_drawdown_high"], strat_results["stake_currency"]), + ), + ( + "Drawdown low", + fmt_coin(strat_results["max_drawdown_low"], strat_results["stake_currency"]), + ), + ("Drawdown Start", strat_results["drawdown_start"]), + ("Drawdown End", strat_results["drawdown_end"]), + ] + ) - entry_adjustment_metrics = [ - ('Canceled Trade Entries', strat_results.get('canceled_trade_entries', 'N/A')), - ('Canceled Entry Orders', strat_results.get('canceled_entry_orders', 'N/A')), - ('Replaced Entry Orders', strat_results.get('replaced_entry_orders', 'N/A')), - ] if strat_results.get('canceled_entry_orders', 0) > 0 else [] + entry_adjustment_metrics = ( + [ + ("Canceled Trade Entries", strat_results.get("canceled_trade_entries", "N/A")), + ("Canceled Entry Orders", strat_results.get("canceled_entry_orders", "N/A")), + ("Replaced Entry Orders", strat_results.get("replaced_entry_orders", "N/A")), + ] + if strat_results.get("canceled_entry_orders", 0) > 0 + else [] + ) # Newly added fields should be ignored if they are missing in strat_results. hyperopt-show # command stores these results and newer version of freqtrade must be able to handle old # results with missing new fields. metrics = [ - ('Backtesting from', strat_results['backtest_start']), - ('Backtesting to', strat_results['backtest_end']), - ('Max open trades', strat_results['max_open_trades']), - ('', ''), # Empty line to improve readability - ('Total/Daily Avg Trades', - f"{strat_results['total_trades']} / {strat_results['trades_per_day']}"), - - ('Starting balance', fmt_coin(strat_results['starting_balance'], - strat_results['stake_currency'])), - ('Final balance', fmt_coin(strat_results['final_balance'], - strat_results['stake_currency'])), - ('Absolute profit ', fmt_coin(strat_results['profit_total_abs'], - strat_results['stake_currency'])), - ('Total profit %', f"{strat_results['profit_total']:.2%}"), - ('CAGR %', f"{strat_results['cagr']:.2%}" if 'cagr' in strat_results else 'N/A'), - ('Sortino', f"{strat_results['sortino']:.2f}" if 'sortino' in strat_results else 'N/A'), - ('Sharpe', f"{strat_results['sharpe']:.2f}" if 'sharpe' in strat_results else 'N/A'), - ('Calmar', f"{strat_results['calmar']:.2f}" if 'calmar' in strat_results else 'N/A'), - ('Profit factor', f'{strat_results["profit_factor"]:.2f}' if 'profit_factor' - in strat_results else 'N/A'), - ('Expectancy (Ratio)', ( - f"{strat_results['expectancy']:.2f} ({strat_results['expectancy_ratio']:.2f})" if - 'expectancy_ratio' in strat_results else 'N/A')), - ('Trades per day', strat_results['trades_per_day']), - ('Avg. daily profit %', - f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}"), - ('Avg. stake amount', fmt_coin(strat_results['avg_stake_amount'], - strat_results['stake_currency'])), - ('Total trade volume', fmt_coin(strat_results['total_volume'], - strat_results['stake_currency'])), + ("Backtesting from", strat_results["backtest_start"]), + ("Backtesting to", strat_results["backtest_end"]), + ("Max open trades", strat_results["max_open_trades"]), + ("", ""), # Empty line to improve readability + ( + "Total/Daily Avg Trades", + f"{strat_results['total_trades']} / {strat_results['trades_per_day']}", + ), + ( + "Starting balance", + fmt_coin(strat_results["starting_balance"], strat_results["stake_currency"]), + ), + ( + "Final balance", + fmt_coin(strat_results["final_balance"], strat_results["stake_currency"]), + ), + ( + "Absolute profit ", + fmt_coin(strat_results["profit_total_abs"], strat_results["stake_currency"]), + ), + ("Total profit %", f"{strat_results['profit_total']:.2%}"), + ("CAGR %", f"{strat_results['cagr']:.2%}" if "cagr" in strat_results else "N/A"), + ("Sortino", f"{strat_results['sortino']:.2f}" if "sortino" in strat_results else "N/A"), + ("Sharpe", f"{strat_results['sharpe']:.2f}" if "sharpe" in strat_results else "N/A"), + ("Calmar", f"{strat_results['calmar']:.2f}" if "calmar" in strat_results else "N/A"), + ( + "Profit factor", + ( + f'{strat_results["profit_factor"]:.2f}' + if "profit_factor" in strat_results + else "N/A" + ), + ), + ( + "Expectancy (Ratio)", + ( + f"{strat_results['expectancy']:.2f} ({strat_results['expectancy_ratio']:.2f})" + if "expectancy_ratio" in strat_results + else "N/A" + ), + ), + ( + "Avg. daily profit %", + f"{(strat_results['profit_total'] / strat_results['backtest_days']):.2%}", + ), + ( + "Avg. stake amount", + fmt_coin(strat_results["avg_stake_amount"], strat_results["stake_currency"]), + ), + ( + "Total trade volume", + fmt_coin(strat_results["total_volume"], strat_results["stake_currency"]), + ), *short_metrics, - ('', ''), # Empty line to improve readability - ('Best Pair', f"{strat_results['best_pair']['key']} " - f"{strat_results['best_pair']['profit_total']:.2%}"), - ('Worst Pair', f"{strat_results['worst_pair']['key']} " - f"{strat_results['worst_pair']['profit_total']:.2%}"), - ('Best trade', f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"), - ('Worst trade', f"{worst_trade['pair']} " - f"{worst_trade['profit_ratio']:.2%}"), - - ('Best day', fmt_coin(strat_results['backtest_best_day_abs'], - strat_results['stake_currency'])), - ('Worst day', fmt_coin(strat_results['backtest_worst_day_abs'], - strat_results['stake_currency'])), - ('Days win/draw/lose', f"{strat_results['winning_days']} / " - f"{strat_results['draw_days']} / {strat_results['losing_days']}"), - ('Avg. Duration Winners', f"{strat_results['winner_holding_avg']}"), - ('Avg. Duration Loser', f"{strat_results['loser_holding_avg']}"), - ('Max Consecutive Wins / Loss', - f"{strat_results['max_consecutive_wins']} / {strat_results['max_consecutive_losses']}" - if 'max_consecutive_losses' in strat_results else 'N/A'), - ('Rejected Entry signals', strat_results.get('rejected_signals', 'N/A')), - ('Entry/Exit Timeouts', - f"{strat_results.get('timedout_entry_orders', 'N/A')} / " - f"{strat_results.get('timedout_exit_orders', 'N/A')}"), + ("", ""), # Empty line to improve readability + ( + "Best Pair", + f"{strat_results['best_pair']['key']} " + f"{strat_results['best_pair']['profit_total']:.2%}", + ), + ( + "Worst Pair", + f"{strat_results['worst_pair']['key']} " + f"{strat_results['worst_pair']['profit_total']:.2%}", + ), + ("Best trade", f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"), + ("Worst trade", f"{worst_trade['pair']} {worst_trade['profit_ratio']:.2%}"), + ( + "Best day", + fmt_coin(strat_results["backtest_best_day_abs"], strat_results["stake_currency"]), + ), + ( + "Worst day", + fmt_coin(strat_results["backtest_worst_day_abs"], strat_results["stake_currency"]), + ), + ( + "Days win/draw/lose", + f"{strat_results['winning_days']} / " + f"{strat_results['draw_days']} / {strat_results['losing_days']}", + ), + ("Avg. Duration Winners", f"{strat_results['winner_holding_avg']}"), + ("Avg. Duration Loser", f"{strat_results['loser_holding_avg']}"), + ( + "Max Consecutive Wins / Loss", + ( + ( + f"{strat_results['max_consecutive_wins']} / " + f"{strat_results['max_consecutive_losses']}" + ) + if "max_consecutive_losses" in strat_results + else "N/A" + ), + ), + ("Rejected Entry signals", strat_results.get("rejected_signals", "N/A")), + ( + "Entry/Exit Timeouts", + f"{strat_results.get('timedout_entry_orders', 'N/A')} / " + f"{strat_results.get('timedout_exit_orders', 'N/A')}", + ), *entry_adjustment_metrics, - ('', ''), # Empty line to improve readability - - ('Min balance', fmt_coin(strat_results['csum_min'], strat_results['stake_currency'])), - ('Max balance', fmt_coin(strat_results['csum_max'], strat_results['stake_currency'])), - + ("", ""), # Empty line to improve readability + ("Min balance", fmt_coin(strat_results["csum_min"], strat_results["stake_currency"])), + ("Max balance", fmt_coin(strat_results["csum_max"], strat_results["stake_currency"])), *drawdown_metrics, - ('Market change', f"{strat_results['market_change']:.2%}"), + ("Market change", f"{strat_results['market_change']:.2%}"), ] return tabulate(metrics, headers=["Metric", "Value"], tablefmt="orgtbl") else: - start_balance = fmt_coin(strat_results['starting_balance'], strat_results['stake_currency']) - stake_amount = fmt_coin( - strat_results['stake_amount'], strat_results['stake_currency'] - ) if strat_results['stake_amount'] != UNLIMITED_STAKE_AMOUNT else 'unlimited' + start_balance = fmt_coin(strat_results["starting_balance"], strat_results["stake_currency"]) + stake_amount = ( + fmt_coin(strat_results["stake_amount"], strat_results["stake_currency"]) + if strat_results["stake_amount"] != UNLIMITED_STAKE_AMOUNT + else "unlimited" + ) - message = ("No trades made. " - f"Your starting balance was {start_balance}, " - f"and your stake was {stake_amount}." - ) + message = ( + "No trades made. " + f"Your starting balance was {start_balance}, " + f"and your stake was {stake_amount}." + ) return message -def show_backtest_result(strategy: str, results: Dict[str, Any], stake_currency: str, - backtest_breakdown: List[str]): +def show_backtest_result( + strategy: str, results: Dict[str, Any], stake_currency: str, backtest_breakdown: List[str] +): """ Print results for one strategy """ # Print results print(f"Result for strategy {strategy}") - table = text_table_bt_results(results['results_per_pair'], stake_currency=stake_currency) + table = text_table_bt_results(results["results_per_pair"], stake_currency=stake_currency) if isinstance(table, str): - print(' BACKTESTING REPORT '.center(len(table.splitlines()[0]), '=')) + print(" BACKTESTING REPORT ".center(len(table.splitlines()[0]), "=")) print(table) - table = text_table_bt_results(results['left_open_trades'], stake_currency=stake_currency) + table = text_table_bt_results(results["left_open_trades"], stake_currency=stake_currency) if isinstance(table, str) and len(table) > 0: - print(' LEFT OPEN TRADES REPORT '.center(len(table.splitlines()[0]), '=')) + print(" LEFT OPEN TRADES REPORT ".center(len(table.splitlines()[0]), "=")) print(table) - if (enter_tags := results.get('results_per_enter_tag')) is not None: + if (enter_tags := results.get("results_per_enter_tag")) is not None: table = text_table_tags("enter_tag", enter_tags, stake_currency) if isinstance(table, str) and len(table) > 0: - print(' ENTER TAG STATS '.center(len(table.splitlines()[0]), '=')) + print(" ENTER TAG STATS ".center(len(table.splitlines()[0]), "=")) print(table) - if (exit_reasons := results.get('exit_reason_summary')) is not None: + if (exit_reasons := results.get("exit_reason_summary")) is not None: table = text_table_tags("exit_tag", exit_reasons, stake_currency) if isinstance(table, str) and len(table) > 0: - print(' EXIT REASON STATS '.center(len(table.splitlines()[0]), '=')) + print(" EXIT REASON STATS ".center(len(table.splitlines()[0]), "=")) print(table) for period in backtest_breakdown: - if period in results.get('periodic_breakdown', {}): - days_breakdown_stats = results['periodic_breakdown'][period] + if period in results.get("periodic_breakdown", {}): + days_breakdown_stats = results["periodic_breakdown"][period] else: days_breakdown_stats = generate_periodic_breakdown_stats( - trade_list=results['trades'], period=period) - table = text_table_periodic_breakdown(days_breakdown_stats=days_breakdown_stats, - stake_currency=stake_currency, period=period) + trade_list=results["trades"], period=period + ) + table = text_table_periodic_breakdown( + days_breakdown_stats=days_breakdown_stats, stake_currency=stake_currency, period=period + ) if isinstance(table, str) and len(table) > 0: - print(f' {period.upper()} BREAKDOWN '.center(len(table.splitlines()[0]), '=')) + print(f" {period.upper()} BREAKDOWN ".center(len(table.splitlines()[0]), "=")) print(table) table = text_table_add_metrics(results) if isinstance(table, str) and len(table) > 0: - print(' SUMMARY METRICS '.center(len(table.splitlines()[0]), '=')) + print(" SUMMARY METRICS ".center(len(table.splitlines()[0]), "=")) print(table) if isinstance(table, str) and len(table) > 0: - print('=' * len(table.splitlines()[0])) + print("=" * len(table.splitlines()[0])) print() def show_backtest_results(config: Config, backtest_stats: BacktestResultType): - stake_currency = config['stake_currency'] + stake_currency = config["stake_currency"] - for strategy, results in backtest_stats['strategy'].items(): + for strategy, results in backtest_stats["strategy"].items(): show_backtest_result( - strategy, results, stake_currency, - config.get('backtest_breakdown', [])) + strategy, results, stake_currency, config.get("backtest_breakdown", []) + ) - if len(backtest_stats['strategy']) > 0: + if len(backtest_stats["strategy"]) > 0: # Print Strategy summary table - table = text_table_strategy(backtest_stats['strategy_comparison'], stake_currency) - print(f"Backtested {results['backtest_start']} -> {results['backtest_end']} |" - f" Max open trades : {results['max_open_trades']}") - print(' STRATEGY SUMMARY '.center(len(table.splitlines()[0]), '=')) + table = text_table_strategy(backtest_stats["strategy_comparison"], stake_currency) + print( + f"Backtested {results['backtest_start']} -> {results['backtest_end']} |" + f" Max open trades : {results['max_open_trades']}" + ) + print(" STRATEGY SUMMARY ".center(len(table.splitlines()[0]), "=")) print(table) - print('=' * len(table.splitlines()[0])) - print('\nFor more details, please look at the detail tables above') + print("=" * len(table.splitlines()[0])) + print("\nFor more details, please look at the detail tables above") def show_sorted_pairlist(config: Config, backtest_stats: BacktestResultType): - if config.get('backtest_show_pair_list', False): - for strategy, results in backtest_stats['strategy'].items(): + if config.get("backtest_show_pair_list", False): + for strategy, results in backtest_stats["strategy"].items(): print(f"Pairs for Strategy {strategy}: \n[") - for result in results['results_per_pair']: - if result["key"] != 'TOTAL': + for result in results["results_per_pair"]: + if result["key"] != "TOTAL": print(f'"{result["key"]}", // {result["profit_mean"]:.2%}') print("]") def generate_edge_table(results: dict) -> str: - floatfmt = ('s', '.10g', '.2f', '.2f', '.2f', '.2f', 'd', 'd', 'd') + floatfmt = ("s", ".10g", ".2f", ".2f", ".2f", ".2f", "d", "d", "d") tabular_data = [] - headers = ['Pair', 'Stoploss', 'Win Rate', 'Risk Reward Ratio', - 'Required Risk Reward', 'Expectancy', 'Total Number of Trades', - 'Average Duration (min)'] + headers = [ + "Pair", + "Stoploss", + "Win Rate", + "Risk Reward Ratio", + "Required Risk Reward", + "Expectancy", + "Total Number of Trades", + "Average Duration (min)", + ] for result in results.items(): if result[1].nb_trades > 0: - tabular_data.append([ - result[0], - result[1].stoploss, - result[1].winrate, - result[1].risk_reward_ratio, - result[1].required_risk_reward, - result[1].expectancy, - result[1].nb_trades, - round(result[1].avg_trade_duration) - ]) + tabular_data.append( + [ + result[0], + result[1].stoploss, + result[1].winrate, + result[1].risk_reward_ratio, + result[1].required_risk_reward, + result[1].expectancy, + result[1].nb_trades, + round(result[1].avg_trade_duration), + ] + ) # Ignore type as floatfmt does allow tuples but mypy does not know that - return tabulate(tabular_data, headers=headers, - floatfmt=floatfmt, tablefmt="orgtbl", stralign="right") + return tabulate( + tabular_data, headers=headers, floatfmt=floatfmt, tablefmt="orgtbl", stralign="right" + ) diff --git a/freqtrade/optimize/optimize_reports/bt_storage.py b/freqtrade/optimize/optimize_reports/bt_storage.py index a8a8bf7f2..ea8991337 100644 --- a/freqtrade/optimize/optimize_reports/bt_storage.py +++ b/freqtrade/optimize/optimize_reports/bt_storage.py @@ -22,17 +22,21 @@ def _generate_filename(recordfilename: Path, appendix: str, suffix: str) -> Path :return: Generated filename as a Path object """ if recordfilename.is_dir(): - filename = (recordfilename / f'backtest-result-{appendix}').with_suffix(suffix) + filename = (recordfilename / f"backtest-result-{appendix}").with_suffix(suffix) else: filename = Path.joinpath( - recordfilename.parent, f'{recordfilename.stem}-{appendix}' + recordfilename.parent, f"{recordfilename.stem}-{appendix}" ).with_suffix(suffix) return filename def store_backtest_stats( - recordfilename: Path, stats: BacktestResultType, dtappendix: str, *, - market_change_data: Optional[DataFrame] = None) -> Path: + recordfilename: Path, + stats: BacktestResultType, + dtappendix: str, + *, + market_change_data: Optional[DataFrame] = None, +) -> Path: """ Stores backtest results :param recordfilename: Path object, which can either be a filename or a directory. @@ -41,32 +45,33 @@ def store_backtest_stats( :param stats: Dataframe containing the backtesting statistics :param dtappendix: Datetime to use for the filename """ - filename = _generate_filename(recordfilename, dtappendix, '.json') + filename = _generate_filename(recordfilename, dtappendix, ".json") # Store metadata separately. - file_dump_json(get_backtest_metadata_filename(filename), stats['metadata']) + file_dump_json(get_backtest_metadata_filename(filename), stats["metadata"]) # Don't mutate the original stats dict. stats_copy = { - 'strategy': stats['strategy'], - 'strategy_comparison': stats['strategy_comparison'], + "strategy": stats["strategy"], + "strategy_comparison": stats["strategy_comparison"], } file_dump_json(filename, stats_copy) latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN) - file_dump_json(latest_filename, {'latest_backtest': str(filename.name)}) + file_dump_json(latest_filename, {"latest_backtest": str(filename.name)}) if market_change_data is not None: - filename_mc = _generate_filename(recordfilename, f"{dtappendix}_market_change", '.feather') + filename_mc = _generate_filename(recordfilename, f"{dtappendix}_market_change", ".feather") market_change_data.reset_index().to_feather( - filename_mc, compression_level=9, compression='lz4') + filename_mc, compression_level=9, compression="lz4" + ) return filename def _store_backtest_analysis_data( - recordfilename: Path, data: Dict[str, Dict], - dtappendix: str, name: str) -> Path: + recordfilename: Path, data: Dict[str, Dict], dtappendix: str, name: str +) -> Path: """ Stores backtest trade candles for analysis :param recordfilename: Path object, which can either be a filename or a directory. @@ -77,7 +82,7 @@ def _store_backtest_analysis_data( :param dtappendix: Datetime to use for the filename :param name: Name to use for the file, e.g. signals, rejected """ - filename = _generate_filename(recordfilename, f"{dtappendix}_{name}", '.pkl') + filename = _generate_filename(recordfilename, f"{dtappendix}_{name}", ".pkl") file_dump_joblib(filename, data) @@ -85,7 +90,7 @@ def _store_backtest_analysis_data( def store_backtest_analysis_results( - recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict], - dtappendix: str) -> None: + recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict], dtappendix: str +) -> None: _store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals") _store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected") diff --git a/freqtrade/optimize/optimize_reports/optimize_reports.py b/freqtrade/optimize/optimize_reports/optimize_reports.py index 1bf73d714..2ca467eb6 100644 --- a/freqtrade/optimize/optimize_reports/optimize_reports.py +++ b/freqtrade/optimize/optimize_reports/optimize_reports.py @@ -7,9 +7,16 @@ import numpy as np from pandas import DataFrame, Series, concat, to_datetime from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT -from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum, - calculate_expectancy, calculate_market_change, - calculate_max_drawdown, calculate_sharpe, calculate_sortino) +from freqtrade.data.metrics import ( + calculate_cagr, + calculate_calmar, + calculate_csum, + calculate_expectancy, + calculate_market_change, + calculate_max_drawdown, + calculate_sharpe, + calculate_sortino, +) from freqtrade.types import BacktestResultType from freqtrade.util import decimals_per_coin, fmt_coin @@ -17,43 +24,45 @@ from freqtrade.util import decimals_per_coin, fmt_coin logger = logging.getLogger(__name__) -def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame], - bt_results: Dict[str, Any]) -> DataFrame: +def generate_trade_signal_candles( + preprocessed_df: Dict[str, DataFrame], bt_results: Dict[str, Any] +) -> DataFrame: signal_candles_only = {} for pair in preprocessed_df.keys(): signal_candles_only_df = DataFrame() pairdf = preprocessed_df[pair] - resdf = bt_results['results'] + resdf = bt_results["results"] pairresults = resdf.loc[(resdf["pair"] == pair)] if pairdf.shape[0] > 0: for t, v in pairresults.open_date.items(): - allinds = pairdf.loc[(pairdf['date'] < v)] + allinds = pairdf.loc[(pairdf["date"] < v)] signal_inds = allinds.iloc[[-1]] - signal_candles_only_df = concat([ - signal_candles_only_df.infer_objects(), - signal_inds.infer_objects()]) + signal_candles_only_df = concat( + [signal_candles_only_df.infer_objects(), signal_inds.infer_objects()] + ) signal_candles_only[pair] = signal_candles_only_df return signal_candles_only -def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame], - rejected_dict: Dict[str, DataFrame]) -> Dict[str, DataFrame]: +def generate_rejected_signals( + preprocessed_df: Dict[str, DataFrame], rejected_dict: Dict[str, DataFrame] +) -> Dict[str, DataFrame]: rejected_candles_only = {} for pair, signals in rejected_dict.items(): rejected_signals_only_df = DataFrame() pairdf = preprocessed_df[pair] for t in signals: - data_df_row = pairdf.loc[(pairdf['date'] == t[0])].copy() - data_df_row['pair'] = pair - data_df_row['enter_tag'] = t[1] + data_df_row = pairdf.loc[(pairdf["date"] == t[0])].copy() + data_df_row["pair"] = pair + data_df_row["enter_tag"] = t[1] - rejected_signals_only_df = concat([ - rejected_signals_only_df.infer_objects(), - data_df_row.infer_objects()]) + rejected_signals_only_df = concat( + [rejected_signals_only_df.infer_objects(), data_df_row.infer_objects()] + ) rejected_candles_only[pair] = rejected_signals_only_df return rejected_candles_only @@ -63,39 +72,47 @@ def _generate_result_line(result: DataFrame, starting_balance: int, first_column """ Generate one result dict, with "first_column" as key. """ - profit_sum = result['profit_ratio'].sum() + profit_sum = result["profit_ratio"].sum() # (end-capital - starting capital) / starting capital - profit_total = result['profit_abs'].sum() / starting_balance + profit_total = result["profit_abs"].sum() / starting_balance return { - 'key': first_column, - 'trades': len(result), - 'profit_mean': result['profit_ratio'].mean() if len(result) > 0 else 0.0, - 'profit_mean_pct': round(result['profit_ratio'].mean() * 100.0, 2 - ) if len(result) > 0 else 0.0, - 'profit_sum': profit_sum, - 'profit_sum_pct': round(profit_sum * 100.0, 2), - 'profit_total_abs': result['profit_abs'].sum(), - 'profit_total': profit_total, - 'profit_total_pct': round(profit_total * 100.0, 2), - 'duration_avg': str(timedelta( - minutes=round(result['trade_duration'].mean())) - ) if not result.empty else '0:00', + "key": first_column, + "trades": len(result), + "profit_mean": result["profit_ratio"].mean() if len(result) > 0 else 0.0, + "profit_mean_pct": ( + round(result["profit_ratio"].mean() * 100.0, 2) if len(result) > 0 else 0.0 + ), + "profit_sum": profit_sum, + "profit_sum_pct": round(profit_sum * 100.0, 2), + "profit_total_abs": result["profit_abs"].sum(), + "profit_total": profit_total, + "profit_total_pct": round(profit_total * 100.0, 2), + "duration_avg": ( + str(timedelta(minutes=round(result["trade_duration"].mean()))) + if not result.empty + else "0:00" + ), # 'duration_max': str(timedelta( # minutes=round(result['trade_duration'].max())) # ) if not result.empty else '0:00', # 'duration_min': str(timedelta( # minutes=round(result['trade_duration'].min())) # ) if not result.empty else '0:00', - 'wins': len(result[result['profit_abs'] > 0]), - 'draws': len(result[result['profit_abs'] == 0]), - 'losses': len(result[result['profit_abs'] < 0]), - 'winrate': len(result[result['profit_abs'] > 0]) / len(result) if len(result) else 0.0, + "wins": len(result[result["profit_abs"] > 0]), + "draws": len(result[result["profit_abs"] == 0]), + "losses": len(result[result["profit_abs"] < 0]), + "winrate": len(result[result["profit_abs"] > 0]) / len(result) if len(result) else 0.0, } -def generate_pair_metrics(pairlist: List[str], stake_currency: str, starting_balance: int, - results: DataFrame, skip_nan: bool = False) -> List[Dict]: +def generate_pair_metrics( + pairlist: List[str], + stake_currency: str, + starting_balance: int, + results: DataFrame, + skip_nan: bool = False, +) -> List[Dict]: """ Generates and returns a list for the given backtest data and the results dataframe :param pairlist: Pairlist used @@ -109,24 +126,23 @@ def generate_pair_metrics(pairlist: List[str], stake_currency: str, starting_bal tabular_data = [] for pair in pairlist: - result = results[results['pair'] == pair] - if skip_nan and result['profit_abs'].isnull().all(): + result = results[results["pair"] == pair] + if skip_nan and result["profit_abs"].isnull().all(): continue tabular_data.append(_generate_result_line(result, starting_balance, pair)) # Sort by total profit %: - tabular_data = sorted(tabular_data, key=lambda k: k['profit_total_abs'], reverse=True) + tabular_data = sorted(tabular_data, key=lambda k: k["profit_total_abs"], reverse=True) # Append Total - tabular_data.append(_generate_result_line(results, starting_balance, 'TOTAL')) + tabular_data.append(_generate_result_line(results, starting_balance, "TOTAL")) return tabular_data -def generate_tag_metrics(tag_type: str, - starting_balance: int, - results: DataFrame, - skip_nan: bool = False) -> List[Dict]: +def generate_tag_metrics( + tag_type: str, starting_balance: int, results: DataFrame, skip_nan: bool = False +) -> List[Dict]: """ Generates and returns a list of metrics for the given tag trades and the results dataframe :param starting_balance: Starting balance @@ -140,16 +156,16 @@ def generate_tag_metrics(tag_type: str, if tag_type in results.columns: for tag, count in results[tag_type].value_counts().items(): result = results[results[tag_type] == tag] - if skip_nan and result['profit_abs'].isnull().all(): + if skip_nan and result["profit_abs"].isnull().all(): continue tabular_data.append(_generate_result_line(result, starting_balance, tag)) # Sort by total profit %: - tabular_data = sorted(tabular_data, key=lambda k: k['profit_total_abs'], reverse=True) + tabular_data = sorted(tabular_data, key=lambda k: k["profit_total_abs"], reverse=True) # Append Total - tabular_data.append(_generate_result_line(results, starting_balance, 'TOTAL')) + tabular_data.append(_generate_result_line(results, starting_balance, "TOTAL")) return tabular_data else: return [] @@ -164,51 +180,52 @@ def generate_strategy_comparison(bt_stats: Dict) -> List[Dict]: tabular_data = [] for strategy, result in bt_stats.items(): - tabular_data.append(deepcopy(result['results_per_pair'][-1])) + tabular_data.append(deepcopy(result["results_per_pair"][-1])) # Update "key" to strategy (results_per_pair has it as "Total"). - tabular_data[-1]['key'] = strategy - tabular_data[-1]['max_drawdown_account'] = result['max_drawdown_account'] - tabular_data[-1]['max_drawdown_abs'] = fmt_coin( - result['max_drawdown_abs'], result['stake_currency'], False) + tabular_data[-1]["key"] = strategy + tabular_data[-1]["max_drawdown_account"] = result["max_drawdown_account"] + tabular_data[-1]["max_drawdown_abs"] = fmt_coin( + result["max_drawdown_abs"], result["stake_currency"], False + ) return tabular_data def _get_resample_from_period(period: str) -> str: - if period == 'day': - return '1d' - if period == 'week': + if period == "day": + return "1d" + if period == "week": # Weekly defaulting to Monday. - return '1W-MON' - if period == 'month': - return '1ME' + return "1W-MON" + if period == "month": + return "1ME" raise ValueError(f"Period {period} is not supported.") def generate_periodic_breakdown_stats( - trade_list: Union[List, DataFrame], period: str) -> List[Dict[str, Any]]: - + trade_list: Union[List, DataFrame], period: str +) -> List[Dict[str, Any]]: results = trade_list if not isinstance(trade_list, list) else DataFrame.from_records(trade_list) if len(results) == 0: return [] - results['close_date'] = to_datetime(results['close_date'], utc=True) + results["close_date"] = to_datetime(results["close_date"], utc=True) resample_period = _get_resample_from_period(period) - resampled = results.resample(resample_period, on='close_date') + resampled = results.resample(resample_period, on="close_date") stats = [] for name, day in resampled: - profit_abs = day['profit_abs'].sum().round(10) - wins = sum(day['profit_abs'] > 0) - draws = sum(day['profit_abs'] == 0) - loses = sum(day['profit_abs'] < 0) - trades = (wins + draws + loses) + profit_abs = day["profit_abs"].sum().round(10) + wins = sum(day["profit_abs"] > 0) + draws = sum(day["profit_abs"] == 0) + loses = sum(day["profit_abs"] < 0) + trades = wins + draws + loses stats.append( { - 'date': name.strftime('%d/%m/%Y'), - 'date_ts': int(name.to_pydatetime().timestamp() * 1000), - 'profit_abs': profit_abs, - 'wins': wins, - 'draws': draws, - 'loses': loses, - 'winrate': wins / trades if trades else 0.0, + "date": name.strftime("%d/%m/%Y"), + "date_ts": int(name.to_pydatetime().timestamp() * 1000), + "profit_abs": profit_abs, + "wins": wins, + "draws": draws, + "loses": loses, + "winrate": wins / trades if trades else 0.0, } ) return stats @@ -228,74 +245,83 @@ def calc_streak(dataframe: DataFrame) -> Tuple[int, int]: :return: Tuple containing consecutive wins and losses """ - df = Series(np.where(dataframe['profit_ratio'] > 0, 'win', 'loss')).to_frame('result') - df['streaks'] = df['result'].ne(df['result'].shift()).cumsum().rename('streaks') - df['counter'] = df['streaks'].groupby(df['streaks']).cumcount() + 1 - res = df.groupby(df['result']).max() + df = Series(np.where(dataframe["profit_ratio"] > 0, "win", "loss")).to_frame("result") + df["streaks"] = df["result"].ne(df["result"].shift()).cumsum().rename("streaks") + df["counter"] = df["streaks"].groupby(df["streaks"]).cumcount() + 1 + res = df.groupby(df["result"]).max() # - cons_wins = int(res.loc['win', 'counter']) if 'win' in res.index else 0 - cons_losses = int(res.loc['loss', 'counter']) if 'loss' in res.index else 0 + cons_wins = int(res.loc["win", "counter"]) if "win" in res.index else 0 + cons_losses = int(res.loc["loss", "counter"]) if "loss" in res.index else 0 return cons_wins, cons_losses def generate_trading_stats(results: DataFrame) -> Dict[str, Any]: - """ Generate overall trade statistics """ + """Generate overall trade statistics""" if len(results) == 0: return { - 'wins': 0, - 'losses': 0, - 'draws': 0, - 'winrate': 0, - 'holding_avg': timedelta(), - 'winner_holding_avg': timedelta(), - 'loser_holding_avg': timedelta(), - 'max_consecutive_wins': 0, - 'max_consecutive_losses': 0, + "wins": 0, + "losses": 0, + "draws": 0, + "winrate": 0, + "holding_avg": timedelta(), + "winner_holding_avg": timedelta(), + "loser_holding_avg": timedelta(), + "max_consecutive_wins": 0, + "max_consecutive_losses": 0, } - winning_trades = results.loc[results['profit_ratio'] > 0] - draw_trades = results.loc[results['profit_ratio'] == 0] - losing_trades = results.loc[results['profit_ratio'] < 0] + winning_trades = results.loc[results["profit_ratio"] > 0] + draw_trades = results.loc[results["profit_ratio"] == 0] + losing_trades = results.loc[results["profit_ratio"] < 0] - holding_avg = (timedelta(minutes=round(results['trade_duration'].mean())) - if not results.empty else timedelta()) - winner_holding_avg = (timedelta(minutes=round(winning_trades['trade_duration'].mean())) - if not winning_trades.empty else timedelta()) - loser_holding_avg = (timedelta(minutes=round(losing_trades['trade_duration'].mean())) - if not losing_trades.empty else timedelta()) + holding_avg = ( + timedelta(minutes=round(results["trade_duration"].mean())) + if not results.empty + else timedelta() + ) + winner_holding_avg = ( + timedelta(minutes=round(winning_trades["trade_duration"].mean())) + if not winning_trades.empty + else timedelta() + ) + loser_holding_avg = ( + timedelta(minutes=round(losing_trades["trade_duration"].mean())) + if not losing_trades.empty + else timedelta() + ) winstreak, loss_streak = calc_streak(results) return { - 'wins': len(winning_trades), - 'losses': len(losing_trades), - 'draws': len(draw_trades), - 'winrate': len(winning_trades) / len(results) if len(results) else 0.0, - 'holding_avg': holding_avg, - 'holding_avg_s': holding_avg.total_seconds(), - 'winner_holding_avg': winner_holding_avg, - 'winner_holding_avg_s': winner_holding_avg.total_seconds(), - 'loser_holding_avg': loser_holding_avg, - 'loser_holding_avg_s': loser_holding_avg.total_seconds(), - 'max_consecutive_wins': winstreak, - 'max_consecutive_losses': loss_streak, + "wins": len(winning_trades), + "losses": len(losing_trades), + "draws": len(draw_trades), + "winrate": len(winning_trades) / len(results) if len(results) else 0.0, + "holding_avg": holding_avg, + "holding_avg_s": holding_avg.total_seconds(), + "winner_holding_avg": winner_holding_avg, + "winner_holding_avg_s": winner_holding_avg.total_seconds(), + "loser_holding_avg": loser_holding_avg, + "loser_holding_avg_s": loser_holding_avg.total_seconds(), + "max_consecutive_wins": winstreak, + "max_consecutive_losses": loss_streak, } def generate_daily_stats(results: DataFrame) -> Dict[str, Any]: - """ Generate daily statistics """ + """Generate daily statistics""" if len(results) == 0: return { - 'backtest_best_day': 0, - 'backtest_worst_day': 0, - 'backtest_best_day_abs': 0, - 'backtest_worst_day_abs': 0, - 'winning_days': 0, - 'draw_days': 0, - 'losing_days': 0, - 'daily_profit_list': [], + "backtest_best_day": 0, + "backtest_worst_day": 0, + "backtest_best_day_abs": 0, + "backtest_worst_day_abs": 0, + "winning_days": 0, + "draw_days": 0, + "losing_days": 0, + "daily_profit_list": [], } - daily_profit_rel = results.resample('1d', on='close_date')['profit_ratio'].sum() - daily_profit = results.resample('1d', on='close_date')['profit_abs'].sum().round(10) + daily_profit_rel = results.resample("1d", on="close_date")["profit_ratio"].sum() + daily_profit = results.resample("1d", on="close_date")["profit_abs"].sum().round(10) worst_rel = min(daily_profit_rel) best_rel = max(daily_profit_rel) worst = min(daily_profit) @@ -306,24 +332,26 @@ def generate_daily_stats(results: DataFrame) -> Dict[str, Any]: daily_profit_list = [(str(idx.date()), val) for idx, val in daily_profit.items()] return { - 'backtest_best_day': best_rel, - 'backtest_worst_day': worst_rel, - 'backtest_best_day_abs': best, - 'backtest_worst_day_abs': worst, - 'winning_days': winning_days, - 'draw_days': draw_days, - 'losing_days': losing_days, - 'daily_profit': daily_profit_list, + "backtest_best_day": best_rel, + "backtest_worst_day": worst_rel, + "backtest_best_day_abs": best, + "backtest_worst_day_abs": worst, + "winning_days": winning_days, + "draw_days": draw_days, + "losing_days": losing_days, + "daily_profit": daily_profit_list, } -def generate_strategy_stats(pairlist: List[str], - strategy: str, - content: Dict[str, Any], - min_date: datetime, max_date: datetime, - market_change: float, - is_hyperopt: bool = False, - ) -> Dict[str, Any]: +def generate_strategy_stats( + pairlist: List[str], + strategy: str, + content: Dict[str, Any], + min_date: datetime, + max_date: datetime, + market_change: float, + is_hyperopt: bool = False, +) -> Dict[str, Any]: """ :param pairlist: List of pairs to backtest :param strategy: Strategy name @@ -334,175 +362,192 @@ def generate_strategy_stats(pairlist: List[str], :param market_change: float indicating the market change :return: Dictionary containing results per strategy and a strategy summary. """ - results: Dict[str, DataFrame] = content['results'] + results: Dict[str, DataFrame] = content["results"] if not isinstance(results, DataFrame): return {} - config = content['config'] - max_open_trades = min(config['max_open_trades'], len(pairlist)) - start_balance = config['dry_run_wallet'] - stake_currency = config['stake_currency'] + config = content["config"] + max_open_trades = min(config["max_open_trades"], len(pairlist)) + start_balance = config["dry_run_wallet"] + stake_currency = config["stake_currency"] - pair_results = generate_pair_metrics(pairlist, stake_currency=stake_currency, - starting_balance=start_balance, - results=results, skip_nan=False) + pair_results = generate_pair_metrics( + pairlist, + stake_currency=stake_currency, + starting_balance=start_balance, + results=results, + skip_nan=False, + ) - enter_tag_results = generate_tag_metrics("enter_tag", starting_balance=start_balance, - results=results, skip_nan=False) - exit_reason_stats = generate_tag_metrics('exit_reason', starting_balance=start_balance, - results=results, skip_nan=False) + enter_tag_results = generate_tag_metrics( + "enter_tag", starting_balance=start_balance, results=results, skip_nan=False + ) + exit_reason_stats = generate_tag_metrics( + "exit_reason", starting_balance=start_balance, results=results, skip_nan=False + ) left_open_results = generate_pair_metrics( - pairlist, stake_currency=stake_currency, starting_balance=start_balance, - results=results.loc[results['exit_reason'] == 'force_exit'], skip_nan=True) + pairlist, + stake_currency=stake_currency, + starting_balance=start_balance, + results=results.loc[results["exit_reason"] == "force_exit"], + skip_nan=True, + ) daily_stats = generate_daily_stats(results) trade_stats = generate_trading_stats(results) periodic_breakdown = {} if not is_hyperopt: - periodic_breakdown = {'periodic_breakdown': generate_all_periodic_breakdown_stats(results)} + periodic_breakdown = {"periodic_breakdown": generate_all_periodic_breakdown_stats(results)} - best_pair = max([pair for pair in pair_results if pair['key'] != 'TOTAL'], - key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None - worst_pair = min([pair for pair in pair_results if pair['key'] != 'TOTAL'], - key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None - winning_profit = results.loc[results['profit_abs'] > 0, 'profit_abs'].sum() - losing_profit = results.loc[results['profit_abs'] < 0, 'profit_abs'].sum() + best_pair = ( + max( + [pair for pair in pair_results if pair["key"] != "TOTAL"], key=lambda x: x["profit_sum"] + ) + if len(pair_results) > 1 + else None + ) + worst_pair = ( + min( + [pair for pair in pair_results if pair["key"] != "TOTAL"], key=lambda x: x["profit_sum"] + ) + if len(pair_results) > 1 + else None + ) + winning_profit = results.loc[results["profit_abs"] > 0, "profit_abs"].sum() + losing_profit = results.loc[results["profit_abs"] < 0, "profit_abs"].sum() profit_factor = winning_profit / abs(losing_profit) if losing_profit else 0.0 expectancy, expectancy_ratio = calculate_expectancy(results) backtest_days = (max_date - min_date).days or 1 strat_stats = { - 'trades': results.to_dict(orient='records'), - 'locks': [lock.to_json() for lock in content['locks']], - 'best_pair': best_pair, - 'worst_pair': worst_pair, - 'results_per_pair': pair_results, - 'results_per_enter_tag': enter_tag_results, - 'exit_reason_summary': exit_reason_stats, - 'left_open_trades': left_open_results, - - 'total_trades': len(results), - 'trade_count_long': len(results.loc[~results['is_short']]), - 'trade_count_short': len(results.loc[results['is_short']]), - 'total_volume': float(results['stake_amount'].sum()), - 'avg_stake_amount': results['stake_amount'].mean() if len(results) > 0 else 0, - 'profit_mean': results['profit_ratio'].mean() if len(results) > 0 else 0, - 'profit_median': results['profit_ratio'].median() if len(results) > 0 else 0, - 'profit_total': results['profit_abs'].sum() / start_balance, - 'profit_total_long': results.loc[~results['is_short'], 'profit_abs'].sum() / start_balance, - 'profit_total_short': results.loc[results['is_short'], 'profit_abs'].sum() / start_balance, - 'profit_total_abs': results['profit_abs'].sum(), - 'profit_total_long_abs': results.loc[~results['is_short'], 'profit_abs'].sum(), - 'profit_total_short_abs': results.loc[results['is_short'], 'profit_abs'].sum(), - 'cagr': calculate_cagr(backtest_days, start_balance, content['final_balance']), - 'expectancy': expectancy, - 'expectancy_ratio': expectancy_ratio, - 'sortino': calculate_sortino(results, min_date, max_date, start_balance), - 'sharpe': calculate_sharpe(results, min_date, max_date, start_balance), - 'calmar': calculate_calmar(results, min_date, max_date, start_balance), - 'profit_factor': profit_factor, - 'backtest_start': min_date.strftime(DATETIME_PRINT_FORMAT), - 'backtest_start_ts': int(min_date.timestamp() * 1000), - 'backtest_end': max_date.strftime(DATETIME_PRINT_FORMAT), - 'backtest_end_ts': int(max_date.timestamp() * 1000), - 'backtest_days': backtest_days, - - 'backtest_run_start_ts': content['backtest_start_time'], - 'backtest_run_end_ts': content['backtest_end_time'], - - 'trades_per_day': round(len(results) / backtest_days, 2), - 'market_change': market_change, - 'pairlist': pairlist, - 'stake_amount': config['stake_amount'], - 'stake_currency': config['stake_currency'], - 'stake_currency_decimals': decimals_per_coin(config['stake_currency']), - 'starting_balance': start_balance, - 'dry_run_wallet': start_balance, - 'final_balance': content['final_balance'], - 'rejected_signals': content['rejected_signals'], - 'timedout_entry_orders': content['timedout_entry_orders'], - 'timedout_exit_orders': content['timedout_exit_orders'], - 'canceled_trade_entries': content['canceled_trade_entries'], - 'canceled_entry_orders': content['canceled_entry_orders'], - 'replaced_entry_orders': content['replaced_entry_orders'], - 'max_open_trades': max_open_trades, - 'max_open_trades_setting': (config['max_open_trades'] - if config['max_open_trades'] != float('inf') else -1), - 'timeframe': config['timeframe'], - 'timeframe_detail': config.get('timeframe_detail', ''), - 'timerange': config.get('timerange', ''), - 'enable_protections': config.get('enable_protections', False), - 'strategy_name': strategy, + "trades": results.to_dict(orient="records"), + "locks": [lock.to_json() for lock in content["locks"]], + "best_pair": best_pair, + "worst_pair": worst_pair, + "results_per_pair": pair_results, + "results_per_enter_tag": enter_tag_results, + "exit_reason_summary": exit_reason_stats, + "left_open_trades": left_open_results, + "total_trades": len(results), + "trade_count_long": len(results.loc[~results["is_short"]]), + "trade_count_short": len(results.loc[results["is_short"]]), + "total_volume": float(results["stake_amount"].sum()), + "avg_stake_amount": results["stake_amount"].mean() if len(results) > 0 else 0, + "profit_mean": results["profit_ratio"].mean() if len(results) > 0 else 0, + "profit_median": results["profit_ratio"].median() if len(results) > 0 else 0, + "profit_total": results["profit_abs"].sum() / start_balance, + "profit_total_long": results.loc[~results["is_short"], "profit_abs"].sum() / start_balance, + "profit_total_short": results.loc[results["is_short"], "profit_abs"].sum() / start_balance, + "profit_total_abs": results["profit_abs"].sum(), + "profit_total_long_abs": results.loc[~results["is_short"], "profit_abs"].sum(), + "profit_total_short_abs": results.loc[results["is_short"], "profit_abs"].sum(), + "cagr": calculate_cagr(backtest_days, start_balance, content["final_balance"]), + "expectancy": expectancy, + "expectancy_ratio": expectancy_ratio, + "sortino": calculate_sortino(results, min_date, max_date, start_balance), + "sharpe": calculate_sharpe(results, min_date, max_date, start_balance), + "calmar": calculate_calmar(results, min_date, max_date, start_balance), + "profit_factor": profit_factor, + "backtest_start": min_date.strftime(DATETIME_PRINT_FORMAT), + "backtest_start_ts": int(min_date.timestamp() * 1000), + "backtest_end": max_date.strftime(DATETIME_PRINT_FORMAT), + "backtest_end_ts": int(max_date.timestamp() * 1000), + "backtest_days": backtest_days, + "backtest_run_start_ts": content["backtest_start_time"], + "backtest_run_end_ts": content["backtest_end_time"], + "trades_per_day": round(len(results) / backtest_days, 2), + "market_change": market_change, + "pairlist": pairlist, + "stake_amount": config["stake_amount"], + "stake_currency": config["stake_currency"], + "stake_currency_decimals": decimals_per_coin(config["stake_currency"]), + "starting_balance": start_balance, + "dry_run_wallet": start_balance, + "final_balance": content["final_balance"], + "rejected_signals": content["rejected_signals"], + "timedout_entry_orders": content["timedout_entry_orders"], + "timedout_exit_orders": content["timedout_exit_orders"], + "canceled_trade_entries": content["canceled_trade_entries"], + "canceled_entry_orders": content["canceled_entry_orders"], + "replaced_entry_orders": content["replaced_entry_orders"], + "max_open_trades": max_open_trades, + "max_open_trades_setting": ( + config["max_open_trades"] if config["max_open_trades"] != float("inf") else -1 + ), + "timeframe": config["timeframe"], + "timeframe_detail": config.get("timeframe_detail", ""), + "timerange": config.get("timerange", ""), + "enable_protections": config.get("enable_protections", False), + "strategy_name": strategy, # Parameters relevant for backtesting - 'stoploss': config['stoploss'], - 'trailing_stop': config.get('trailing_stop', False), - 'trailing_stop_positive': config.get('trailing_stop_positive'), - 'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset', 0.0), - 'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached', False), - 'use_custom_stoploss': config.get('use_custom_stoploss', False), - 'minimal_roi': config['minimal_roi'], - 'use_exit_signal': config['use_exit_signal'], - 'exit_profit_only': config['exit_profit_only'], - 'exit_profit_offset': config['exit_profit_offset'], - 'ignore_roi_if_entry_signal': config['ignore_roi_if_entry_signal'], + "stoploss": config["stoploss"], + "trailing_stop": config.get("trailing_stop", False), + "trailing_stop_positive": config.get("trailing_stop_positive"), + "trailing_stop_positive_offset": config.get("trailing_stop_positive_offset", 0.0), + "trailing_only_offset_is_reached": config.get("trailing_only_offset_is_reached", False), + "use_custom_stoploss": config.get("use_custom_stoploss", False), + "minimal_roi": config["minimal_roi"], + "use_exit_signal": config["use_exit_signal"], + "exit_profit_only": config["exit_profit_only"], + "exit_profit_offset": config["exit_profit_offset"], + "ignore_roi_if_entry_signal": config["ignore_roi_if_entry_signal"], **periodic_breakdown, **daily_stats, - **trade_stats + **trade_stats, } try: - max_drawdown_legacy, _, _, _, _, _ = calculate_max_drawdown( - results, value_col='profit_ratio') - (drawdown_abs, drawdown_start, drawdown_end, high_val, low_val, - max_drawdown) = calculate_max_drawdown( - results, value_col='profit_abs', starting_balance=start_balance) + drawdown = calculate_max_drawdown( + results, value_col="profit_abs", starting_balance=start_balance + ) # max_relative_drawdown = Underwater - (_, _, _, _, _, max_relative_drawdown) = calculate_max_drawdown( - results, value_col='profit_abs', starting_balance=start_balance, relative=True) + underwater = calculate_max_drawdown( + results, value_col="profit_abs", starting_balance=start_balance, relative=True + ) - strat_stats.update({ - 'max_drawdown': max_drawdown_legacy, # Deprecated - do not use - 'max_drawdown_account': max_drawdown, - 'max_relative_drawdown': max_relative_drawdown, - 'max_drawdown_abs': drawdown_abs, - 'drawdown_start': drawdown_start.strftime(DATETIME_PRINT_FORMAT), - 'drawdown_start_ts': drawdown_start.timestamp() * 1000, - 'drawdown_end': drawdown_end.strftime(DATETIME_PRINT_FORMAT), - 'drawdown_end_ts': drawdown_end.timestamp() * 1000, - - 'max_drawdown_low': low_val, - 'max_drawdown_high': high_val, - }) + strat_stats.update( + { + "max_drawdown_account": drawdown.relative_account_drawdown, + "max_relative_drawdown": underwater.relative_account_drawdown, + "max_drawdown_abs": drawdown.drawdown_abs, + "drawdown_start": drawdown.high_date.strftime(DATETIME_PRINT_FORMAT), + "drawdown_start_ts": drawdown.high_date.timestamp() * 1000, + "drawdown_end": drawdown.low_date.strftime(DATETIME_PRINT_FORMAT), + "drawdown_end_ts": drawdown.low_date.timestamp() * 1000, + "max_drawdown_low": drawdown.low_value, + "max_drawdown_high": drawdown.high_value, + } + ) csum_min, csum_max = calculate_csum(results, start_balance) - strat_stats.update({ - 'csum_min': csum_min, - 'csum_max': csum_max - }) + strat_stats.update({"csum_min": csum_min, "csum_max": csum_max}) except ValueError: - strat_stats.update({ - 'max_drawdown': 0.0, - 'max_drawdown_account': 0.0, - 'max_relative_drawdown': 0.0, - 'max_drawdown_abs': 0.0, - 'max_drawdown_low': 0.0, - 'max_drawdown_high': 0.0, - 'drawdown_start': datetime(1970, 1, 1, tzinfo=timezone.utc), - 'drawdown_start_ts': 0, - 'drawdown_end': datetime(1970, 1, 1, tzinfo=timezone.utc), - 'drawdown_end_ts': 0, - 'csum_min': 0, - 'csum_max': 0 - }) + strat_stats.update( + { + "max_drawdown_account": 0.0, + "max_relative_drawdown": 0.0, + "max_drawdown_abs": 0.0, + "max_drawdown_low": 0.0, + "max_drawdown_high": 0.0, + "drawdown_start": datetime(1970, 1, 1, tzinfo=timezone.utc), + "drawdown_start_ts": 0, + "drawdown_end": datetime(1970, 1, 1, tzinfo=timezone.utc), + "drawdown_end_ts": 0, + "csum_min": 0, + "csum_max": 0, + } + ) return strat_stats -def generate_backtest_stats(btdata: Dict[str, DataFrame], - all_results: Dict[str, Dict[str, Union[DataFrame, Dict]]], - min_date: datetime, max_date: datetime - ) -> BacktestResultType: +def generate_backtest_stats( + btdata: Dict[str, DataFrame], + all_results: Dict[str, Dict[str, Union[DataFrame, Dict]]], + min_date: datetime, + max_date: datetime, +) -> BacktestResultType: """ :param btdata: Backtest data :param all_results: backtest result - dictionary in the form: @@ -512,29 +557,30 @@ def generate_backtest_stats(btdata: Dict[str, DataFrame], :return: Dictionary containing results per strategy and a strategy summary. """ result: BacktestResultType = { - 'metadata': {}, - 'strategy': {}, - 'strategy_comparison': [], + "metadata": {}, + "strategy": {}, + "strategy_comparison": [], } - market_change = calculate_market_change(btdata, 'close') + market_change = calculate_market_change(btdata, "close") metadata = {} pairlist = list(btdata.keys()) for strategy, content in all_results.items(): - strat_stats = generate_strategy_stats(pairlist, strategy, content, - min_date, max_date, market_change=market_change) + strat_stats = generate_strategy_stats( + pairlist, strategy, content, min_date, max_date, market_change=market_change + ) metadata[strategy] = { - 'run_id': content['run_id'], - 'backtest_start_time': content['backtest_start_time'], - 'timeframe': content['config']['timeframe'], - 'timeframe_detail': content['config'].get('timeframe_detail', None), - 'backtest_start_ts': int(min_date.timestamp()), - 'backtest_end_ts': int(max_date.timestamp()), + "run_id": content["run_id"], + "backtest_start_time": content["backtest_start_time"], + "timeframe": content["config"]["timeframe"], + "timeframe_detail": content["config"].get("timeframe_detail", None), + "backtest_start_ts": int(min_date.timestamp()), + "backtest_end_ts": int(max_date.timestamp()), } - result['strategy'][strategy] = strat_stats + result["strategy"][strategy] = strat_stats - strategy_results = generate_strategy_comparison(bt_stats=result['strategy']) + strategy_results = generate_strategy_comparison(bt_stats=result["strategy"]) - result['metadata'] = metadata - result['strategy_comparison'] = strategy_results + result["metadata"] = metadata + result["strategy_comparison"] = strategy_results return result diff --git a/freqtrade/optimize/space/decimalspace.py b/freqtrade/optimize/space/decimalspace.py index 61aad0597..f5c122fb3 100644 --- a/freqtrade/optimize/space/decimalspace.py +++ b/freqtrade/optimize/space/decimalspace.py @@ -3,9 +3,17 @@ from skopt.space import Integer class SKDecimal(Integer): - - def __init__(self, low, high, decimals=3, prior="uniform", base=10, transform=None, - name=None, dtype=np.int64): + def __init__( + self, + low, + high, + decimals=3, + prior="uniform", + base=10, + transform=None, + name=None, + dtype=np.int64, + ): self.decimals = decimals self.pow_dot_one = pow(0.1, self.decimals) diff --git a/freqtrade/persistence/__init__.py b/freqtrade/persistence/__init__.py index d5584c22c..3612544ee 100644 --- a/freqtrade/persistence/__init__.py +++ b/freqtrade/persistence/__init__.py @@ -5,5 +5,8 @@ from freqtrade.persistence.key_value_store import KeyStoreKeys, KeyValueStore from freqtrade.persistence.models import init_db from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.persistence.trade_model import LocalTrade, Order, Trade -from freqtrade.persistence.usedb_context import (FtNoDBContext, disable_database_use, - enable_database_use) +from freqtrade.persistence.usedb_context import ( + FtNoDBContext, + disable_database_use, + enable_database_use, +) diff --git a/freqtrade/persistence/base.py b/freqtrade/persistence/base.py index fc2dac75e..5f5c40dea 100644 --- a/freqtrade/persistence/base.py +++ b/freqtrade/persistence/base.py @@ -1,4 +1,3 @@ - from sqlalchemy.orm import DeclarativeBase, Session, scoped_session diff --git a/freqtrade/persistence/custom_data.py b/freqtrade/persistence/custom_data.py index 4d3bd5218..5b37a50eb 100644 --- a/freqtrade/persistence/custom_data.py +++ b/freqtrade/persistence/custom_data.py @@ -23,16 +23,17 @@ class _CustomData(ModelBase): - One trade can have many metadata entries - One metadata entry can only be associated with one Trade """ - __tablename__ = 'trade_custom_data' + + __tablename__ = "trade_custom_data" __allow_unmapped__ = True session: ClassVar[SessionType] # Uniqueness should be ensured over pair, order_id # its likely that order_id is unique per Pair on some exchanges. - __table_args__ = (UniqueConstraint('ft_trade_id', 'cd_key', name="_trade_id_cd_key"),) + __table_args__ = (UniqueConstraint("ft_trade_id", "cd_key", name="_trade_id_cd_key"),) id = mapped_column(Integer, primary_key=True) - ft_trade_id = mapped_column(Integer, ForeignKey('trades.id'), index=True) + ft_trade_id = mapped_column(Integer, ForeignKey("trades.id"), index=True) trade = relationship("Trade", back_populates="custom_data") @@ -46,17 +47,22 @@ class _CustomData(ModelBase): value: Any = None def __repr__(self): - create_time = (self.created_at.strftime(DATETIME_PRINT_FORMAT) - if self.created_at is not None else None) - update_time = (self.updated_at.strftime(DATETIME_PRINT_FORMAT) - if self.updated_at is not None else None) - return (f'CustomData(id={self.id}, key={self.cd_key}, type={self.cd_type}, ' + - f'value={self.cd_value}, trade_id={self.ft_trade_id}, created={create_time}, ' + - f'updated={update_time})') + create_time = ( + self.created_at.strftime(DATETIME_PRINT_FORMAT) if self.created_at is not None else None + ) + update_time = ( + self.updated_at.strftime(DATETIME_PRINT_FORMAT) if self.updated_at is not None else None + ) + return ( + f"CustomData(id={self.id}, key={self.cd_key}, type={self.cd_type}, " + + f"value={self.cd_value}, trade_id={self.ft_trade_id}, created={create_time}, " + + f"updated={update_time})" + ) @classmethod - def query_cd(cls, key: Optional[str] = None, - trade_id: Optional[int] = None) -> Sequence['_CustomData']: + def query_cd( + cls, key: Optional[str] = None, trade_id: Optional[int] = None + ) -> Sequence["_CustomData"]: """ Get all CustomData, if trade_id is not specified return will be for generic values not tied to a trade @@ -80,17 +86,17 @@ class CustomDataWrapper: use_db = True custom_data: List[_CustomData] = [] - unserialized_types = ['bool', 'float', 'int', 'str'] + unserialized_types = ["bool", "float", "int", "str"] @staticmethod def _convert_custom_data(data: _CustomData) -> _CustomData: if data.cd_type in CustomDataWrapper.unserialized_types: data.value = data.cd_value - if data.cd_type == 'bool': - data.value = data.cd_value.lower() == 'true' - elif data.cd_type == 'int': + if data.cd_type == "bool": + data.value = data.cd_value.lower() == "true" + elif data.cd_type == "int": data.value = int(data.cd_value) - elif data.cd_type == 'float': + elif data.cd_type == "float": data.value = float(data.cd_value) else: data.value = json.loads(data.cd_value) @@ -111,31 +117,32 @@ class CustomDataWrapper: @staticmethod def get_custom_data(*, trade_id: int, key: Optional[str] = None) -> List[_CustomData]: - if CustomDataWrapper.use_db: filters = [ _CustomData.ft_trade_id == trade_id, ] if key is not None: filters.append(_CustomData.cd_key.ilike(key)) - filtered_custom_data = _CustomData.session.scalars(select(_CustomData).filter( - *filters)).all() + filtered_custom_data = _CustomData.session.scalars( + select(_CustomData).filter(*filters) + ).all() else: filtered_custom_data = [ - data_entry for data_entry in CustomDataWrapper.custom_data + data_entry + for data_entry in CustomDataWrapper.custom_data if (data_entry.ft_trade_id == trade_id) ] if key is not None: filtered_custom_data = [ - data_entry for data_entry in filtered_custom_data + data_entry + for data_entry in filtered_custom_data if (data_entry.cd_key.casefold() == key.casefold()) ] return [CustomDataWrapper._convert_custom_data(d) for d in filtered_custom_data] @staticmethod def set_custom_data(trade_id: int, key: str, value: Any) -> None: - value_type = type(value).__name__ if value_type not in CustomDataWrapper.unserialized_types: diff --git a/freqtrade/persistence/key_value_store.py b/freqtrade/persistence/key_value_store.py index 6da7265d6..93960a102 100644 --- a/freqtrade/persistence/key_value_store.py +++ b/freqtrade/persistence/key_value_store.py @@ -12,22 +12,23 @@ ValueTypes = Union[str, datetime, float, int] class ValueTypesEnum(str, Enum): - STRING = 'str' - DATETIME = 'datetime' - FLOAT = 'float' - INT = 'int' + STRING = "str" + DATETIME = "datetime" + FLOAT = "float" + INT = "int" class KeyStoreKeys(str, Enum): - BOT_START_TIME = 'bot_start_time' - STARTUP_TIME = 'startup_time' + BOT_START_TIME = "bot_start_time" + STARTUP_TIME = "startup_time" class _KeyValueStoreModel(ModelBase): """ Pair Locks database model. """ - __tablename__ = 'KeyValueStore' + + __tablename__ = "KeyValueStore" session: ClassVar[SessionType] id: Mapped[int] = mapped_column(primary_key=True) @@ -56,8 +57,11 @@ class KeyValueStore: :param key: Key to store the value for - can be used in get-value to retrieve the key :param value: Value to store - can be str, datetime, float or int """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter(_KeyValueStoreModel.key == key) + .first() + ) if kv is None: kv = _KeyValueStoreModel(key=key) if isinstance(value, str): @@ -73,7 +77,7 @@ class KeyValueStore: kv.value_type = ValueTypesEnum.INT kv.int_value = value else: - raise ValueError(f'Unknown value type {kv.value_type}') + raise ValueError(f"Unknown value type {kv.value_type}") _KeyValueStoreModel.session.add(kv) _KeyValueStoreModel.session.commit() @@ -83,8 +87,11 @@ class KeyValueStore: Delete the value for the given key. :param key: Key to delete the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter(_KeyValueStoreModel.key == key) + .first() + ) if kv is not None: _KeyValueStoreModel.session.delete(kv) _KeyValueStoreModel.session.commit() @@ -95,8 +102,11 @@ class KeyValueStore: Get the value for the given key. :param key: Key to get the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter(_KeyValueStoreModel.key == key) + .first() + ) if kv is None: return None if kv.value_type == ValueTypesEnum.STRING: @@ -108,7 +118,7 @@ class KeyValueStore: if kv.value_type == ValueTypesEnum.INT: return kv.int_value # This should never happen unless someone messed with the database manually - raise ValueError(f'Unknown value type {kv.value_type}') # pragma: no cover + raise ValueError(f"Unknown value type {kv.value_type}") # pragma: no cover @staticmethod def get_string_value(key: KeyStoreKeys) -> Optional[str]: @@ -116,9 +126,14 @@ class KeyValueStore: Get the value for the given key. :param key: Key to get the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key, - _KeyValueStoreModel.value_type == ValueTypesEnum.STRING).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter( + _KeyValueStoreModel.key == key, + _KeyValueStoreModel.value_type == ValueTypesEnum.STRING, + ) + .first() + ) if kv is None: return None return kv.string_value @@ -129,9 +144,14 @@ class KeyValueStore: Get the value for the given key. :param key: Key to get the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key, - _KeyValueStoreModel.value_type == ValueTypesEnum.DATETIME).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter( + _KeyValueStoreModel.key == key, + _KeyValueStoreModel.value_type == ValueTypesEnum.DATETIME, + ) + .first() + ) if kv is None or kv.datetime_value is None: return None return kv.datetime_value.replace(tzinfo=timezone.utc) @@ -142,9 +162,14 @@ class KeyValueStore: Get the value for the given key. :param key: Key to get the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key, - _KeyValueStoreModel.value_type == ValueTypesEnum.FLOAT).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter( + _KeyValueStoreModel.key == key, + _KeyValueStoreModel.value_type == ValueTypesEnum.FLOAT, + ) + .first() + ) if kv is None: return None return kv.float_value @@ -155,9 +180,13 @@ class KeyValueStore: Get the value for the given key. :param key: Key to get the value for """ - kv = _KeyValueStoreModel.session.query(_KeyValueStoreModel).filter( - _KeyValueStoreModel.key == key, - _KeyValueStoreModel.value_type == ValueTypesEnum.INT).first() + kv = ( + _KeyValueStoreModel.session.query(_KeyValueStoreModel) + .filter( + _KeyValueStoreModel.key == key, _KeyValueStoreModel.value_type == ValueTypesEnum.INT + ) + .first() + ) if kv is None: return None return kv.int_value @@ -168,12 +197,13 @@ def set_startup_time(): sets bot_start_time to the first trade open date - or "now" on new databases. sets startup_time to "now" """ - st = KeyValueStore.get_value('bot_start_time') + st = KeyValueStore.get_value("bot_start_time") if st is None: from freqtrade.persistence import Trade + t = Trade.session.query(Trade).order_by(Trade.open_date.asc()).first() if t is not None: - KeyValueStore.store_value('bot_start_time', t.open_date_utc) + KeyValueStore.store_value("bot_start_time", t.open_date_utc) else: - KeyValueStore.store_value('bot_start_time', datetime.now(timezone.utc)) - KeyValueStore.store_value('startup_time', datetime.now(timezone.utc)) + KeyValueStore.store_value("bot_start_time", datetime.now(timezone.utc)) + KeyValueStore.store_value("startup_time", datetime.now(timezone.utc)) diff --git a/freqtrade/persistence/migrations.py b/freqtrade/persistence/migrations.py index b07a05632..e2e3b2175 100644 --- a/freqtrade/persistence/migrations.py +++ b/freqtrade/persistence/migrations.py @@ -25,8 +25,8 @@ def get_column_def(columns: List, column: str, default: str) -> str: def get_backup_name(tabs: List[str], backup_prefix: str): table_back_name = backup_prefix for i, table_back_name in enumerate(tabs): - table_back_name = f'{backup_prefix}{i}' - logger.debug(f'trying {table_back_name}') + table_back_name = f"{backup_prefix}{i}" + logger.debug(f"trying {table_back_name}") return table_back_name @@ -35,21 +35,22 @@ def get_last_sequence_ids(engine, trade_back_name: str, order_back_name: str): order_id: Optional[int] = None trade_id: Optional[int] = None - if engine.name == 'postgresql': + if engine.name == "postgresql": with engine.begin() as connection: trade_id = connection.execute(text("select nextval('trades_id_seq')")).fetchone()[0] order_id = connection.execute(text("select nextval('orders_id_seq')")).fetchone()[0] with engine.begin() as connection: - connection.execute(text( - f"ALTER SEQUENCE orders_id_seq rename to {order_back_name}_id_seq_bak")) - connection.execute(text( - f"ALTER SEQUENCE trades_id_seq rename to {trade_back_name}_id_seq_bak")) + connection.execute( + text(f"ALTER SEQUENCE orders_id_seq rename to {order_back_name}_id_seq_bak") + ) + connection.execute( + text(f"ALTER SEQUENCE trades_id_seq rename to {trade_back_name}_id_seq_bak") + ) return order_id, trade_id def set_sequence_ids(engine, order_id, trade_id, pairlock_id=None): - - if engine.name == 'postgresql': + if engine.name == "postgresql": with engine.begin() as connection: if order_id: connection.execute(text(f"ALTER SEQUENCE orders_id_seq RESTART WITH {order_id}")) @@ -57,84 +58,95 @@ def set_sequence_ids(engine, order_id, trade_id, pairlock_id=None): connection.execute(text(f"ALTER SEQUENCE trades_id_seq RESTART WITH {trade_id}")) if pairlock_id: connection.execute( - text(f"ALTER SEQUENCE pairlocks_id_seq RESTART WITH {pairlock_id}")) + text(f"ALTER SEQUENCE pairlocks_id_seq RESTART WITH {pairlock_id}") + ) def drop_index_on_table(engine, inspector, table_bak_name): with engine.begin() as connection: # drop indexes on backup table in new session for index in inspector.get_indexes(table_bak_name): - if engine.name == 'mysql': + if engine.name == "mysql": connection.execute(text(f"drop index {index['name']} on {table_bak_name}")) else: connection.execute(text(f"drop index {index['name']}")) def migrate_trades_and_orders_table( - decl_base, inspector, engine, - trade_back_name: str, cols: List, - order_back_name: str, cols_order: List): - base_currency = get_column_def(cols, 'base_currency', 'null') - stake_currency = get_column_def(cols, 'stake_currency', 'null') - fee_open = get_column_def(cols, 'fee_open', 'fee') - fee_open_cost = get_column_def(cols, 'fee_open_cost', 'null') - fee_open_currency = get_column_def(cols, 'fee_open_currency', 'null') - fee_close = get_column_def(cols, 'fee_close', 'fee') - fee_close_cost = get_column_def(cols, 'fee_close_cost', 'null') - fee_close_currency = get_column_def(cols, 'fee_close_currency', 'null') - open_rate_requested = get_column_def(cols, 'open_rate_requested', 'null') - close_rate_requested = get_column_def(cols, 'close_rate_requested', 'null') - stop_loss = get_column_def(cols, 'stop_loss', '0.0') - stop_loss_pct = get_column_def(cols, 'stop_loss_pct', 'null') - initial_stop_loss = get_column_def(cols, 'initial_stop_loss', '0.0') - initial_stop_loss_pct = get_column_def(cols, 'initial_stop_loss_pct', 'null') + decl_base, + inspector, + engine, + trade_back_name: str, + cols: List, + order_back_name: str, + cols_order: List, +): + base_currency = get_column_def(cols, "base_currency", "null") + stake_currency = get_column_def(cols, "stake_currency", "null") + fee_open = get_column_def(cols, "fee_open", "fee") + fee_open_cost = get_column_def(cols, "fee_open_cost", "null") + fee_open_currency = get_column_def(cols, "fee_open_currency", "null") + fee_close = get_column_def(cols, "fee_close", "fee") + fee_close_cost = get_column_def(cols, "fee_close_cost", "null") + fee_close_currency = get_column_def(cols, "fee_close_currency", "null") + open_rate_requested = get_column_def(cols, "open_rate_requested", "null") + close_rate_requested = get_column_def(cols, "close_rate_requested", "null") + stop_loss = get_column_def(cols, "stop_loss", "0.0") + stop_loss_pct = get_column_def(cols, "stop_loss_pct", "null") + initial_stop_loss = get_column_def(cols, "initial_stop_loss", "0.0") + initial_stop_loss_pct = get_column_def(cols, "initial_stop_loss_pct", "null") is_stop_loss_trailing = get_column_def( - cols, 'is_stop_loss_trailing', - f'coalesce({stop_loss_pct}, 0.0) <> coalesce({initial_stop_loss_pct}, 0.0)') - max_rate = get_column_def(cols, 'max_rate', '0.0') - min_rate = get_column_def(cols, 'min_rate', 'null') - exit_reason = get_column_def(cols, 'sell_reason', get_column_def(cols, 'exit_reason', 'null')) - strategy = get_column_def(cols, 'strategy', 'null') - enter_tag = get_column_def(cols, 'buy_tag', get_column_def(cols, 'enter_tag', 'null')) - realized_profit = get_column_def(cols, 'realized_profit', '0.0') + cols, + "is_stop_loss_trailing", + f"coalesce({stop_loss_pct}, 0.0) <> coalesce({initial_stop_loss_pct}, 0.0)", + ) + max_rate = get_column_def(cols, "max_rate", "0.0") + min_rate = get_column_def(cols, "min_rate", "null") + exit_reason = get_column_def(cols, "sell_reason", get_column_def(cols, "exit_reason", "null")) + strategy = get_column_def(cols, "strategy", "null") + enter_tag = get_column_def(cols, "buy_tag", get_column_def(cols, "enter_tag", "null")) + realized_profit = get_column_def(cols, "realized_profit", "0.0") - trading_mode = get_column_def(cols, 'trading_mode', 'null') + trading_mode = get_column_def(cols, "trading_mode", "null") # Leverage Properties - leverage = get_column_def(cols, 'leverage', '1.0') - liquidation_price = get_column_def(cols, 'liquidation_price', - get_column_def(cols, 'isolated_liq', 'null')) + leverage = get_column_def(cols, "leverage", "1.0") + liquidation_price = get_column_def( + cols, "liquidation_price", get_column_def(cols, "isolated_liq", "null") + ) # sqlite does not support literals for booleans - if engine.name == 'postgresql': - is_short = get_column_def(cols, 'is_short', 'false') + if engine.name == "postgresql": + is_short = get_column_def(cols, "is_short", "false") else: - is_short = get_column_def(cols, 'is_short', '0') + is_short = get_column_def(cols, "is_short", "0") # Futures Properties - interest_rate = get_column_def(cols, 'interest_rate', '0.0') - funding_fees = get_column_def(cols, 'funding_fees', '0.0') - funding_fee_running = get_column_def(cols, 'funding_fee_running', 'null') - max_stake_amount = get_column_def(cols, 'max_stake_amount', 'stake_amount') + interest_rate = get_column_def(cols, "interest_rate", "0.0") + funding_fees = get_column_def(cols, "funding_fees", "0.0") + funding_fee_running = get_column_def(cols, "funding_fee_running", "null") + max_stake_amount = get_column_def(cols, "max_stake_amount", "stake_amount") # If ticker-interval existed use that, else null. - if has_column(cols, 'ticker_interval'): - timeframe = get_column_def(cols, 'timeframe', 'ticker_interval') + if has_column(cols, "ticker_interval"): + timeframe = get_column_def(cols, "timeframe", "ticker_interval") else: - timeframe = get_column_def(cols, 'timeframe', 'null') + timeframe = get_column_def(cols, "timeframe", "null") - open_trade_value = get_column_def(cols, 'open_trade_value', - f'amount * open_rate * (1 + {fee_open})') + open_trade_value = get_column_def( + cols, "open_trade_value", f"amount * open_rate * (1 + {fee_open})" + ) close_profit_abs = get_column_def( - cols, 'close_profit_abs', - f"(amount * close_rate * (1 - {fee_close})) - {open_trade_value}") - exit_order_status = get_column_def(cols, 'exit_order_status', - get_column_def(cols, 'sell_order_status', 'null')) - amount_requested = get_column_def(cols, 'amount_requested', 'amount') + cols, "close_profit_abs", f"(amount * close_rate * (1 - {fee_close})) - {open_trade_value}" + ) + exit_order_status = get_column_def( + cols, "exit_order_status", get_column_def(cols, "sell_order_status", "null") + ) + amount_requested = get_column_def(cols, "amount_requested", "amount") - amount_precision = get_column_def(cols, 'amount_precision', 'null') - price_precision = get_column_def(cols, 'price_precision', 'null') - precision_mode = get_column_def(cols, 'precision_mode', 'null') - contract_size = get_column_def(cols, 'contract_size', 'null') + amount_precision = get_column_def(cols, "amount_precision", "null") + price_precision = get_column_def(cols, "price_precision", "null") + precision_mode = get_column_def(cols, "precision_mode", "null") + contract_size = get_column_def(cols, "contract_size", "null") # Schema migration necessary with engine.begin() as connection: @@ -151,7 +163,9 @@ def migrate_trades_and_orders_table( # Copy data back - following the correct schema with engine.begin() as connection: - connection.execute(text(f"""insert into trades + connection.execute( + text( + f"""insert into trades (id, exchange, pair, base_currency, stake_currency, is_open, fee_open, fee_open_cost, fee_open_currency, fee_close, fee_close_cost, fee_close_currency, open_rate, @@ -196,7 +210,9 @@ def migrate_trades_and_orders_table( {precision_mode} precision_mode, {contract_size} contract_size, {max_stake_amount} max_stake_amount from {trade_back_name} - """)) + """ + ) + ) migrate_orders_table(engine, order_back_name, cols_order) set_sequence_ids(engine, order_id, trade_id) @@ -212,19 +228,20 @@ def drop_orders_table(engine, table_back_name: str): def migrate_orders_table(engine, table_back_name: str, cols_order: List): - - ft_fee_base = get_column_def(cols_order, 'ft_fee_base', 'null') - average = get_column_def(cols_order, 'average', 'null') - stop_price = get_column_def(cols_order, 'stop_price', 'null') - funding_fee = get_column_def(cols_order, 'funding_fee', '0.0') - ft_amount = get_column_def(cols_order, 'ft_amount', 'coalesce(amount, 0.0)') - ft_price = get_column_def(cols_order, 'ft_price', 'coalesce(price, 0.0)') - ft_cancel_reason = get_column_def(cols_order, 'ft_cancel_reason', 'null') - ft_order_tag = get_column_def(cols_order, 'ft_order_tag', 'null') + ft_fee_base = get_column_def(cols_order, "ft_fee_base", "null") + average = get_column_def(cols_order, "average", "null") + stop_price = get_column_def(cols_order, "stop_price", "null") + funding_fee = get_column_def(cols_order, "funding_fee", "0.0") + ft_amount = get_column_def(cols_order, "ft_amount", "coalesce(amount, 0.0)") + ft_price = get_column_def(cols_order, "ft_price", "coalesce(price, 0.0)") + ft_cancel_reason = get_column_def(cols_order, "ft_cancel_reason", "null") + ft_order_tag = get_column_def(cols_order, "ft_order_tag", "null") # sqlite does not support literals for booleans with engine.begin() as connection: - connection.execute(text(f""" + connection.execute( + text( + f""" insert into orders (id, ft_trade_id, ft_order_side, ft_pair, ft_is_open, order_id, status, symbol, order_type, side, price, amount, filled, average, remaining, cost, stop_price, order_date, order_filled_date, order_update_date, ft_fee_base, funding_fee, @@ -237,36 +254,39 @@ def migrate_orders_table(engine, table_back_name: str, cols_order: List): {ft_amount} ft_amount, {ft_price} ft_price, {ft_cancel_reason} ft_cancel_reason, {ft_order_tag} ft_order_tag from {table_back_name} - """)) + """ + ) + ) -def migrate_pairlocks_table( - decl_base, inspector, engine, - pairlock_back_name: str, cols: List): - +def migrate_pairlocks_table(decl_base, inspector, engine, pairlock_back_name: str, cols: List): # Schema migration necessary with engine.begin() as connection: connection.execute(text(f"alter table pairlocks rename to {pairlock_back_name}")) drop_index_on_table(engine, inspector, pairlock_back_name) - side = get_column_def(cols, 'side', "'*'") + side = get_column_def(cols, "side", "'*'") # let SQLAlchemy create the schema as required decl_base.metadata.create_all(engine) # Copy data back - following the correct schema with engine.begin() as connection: - connection.execute(text(f"""insert into pairlocks + connection.execute( + text( + f"""insert into pairlocks (id, pair, side, reason, lock_time, lock_end_time, active) select id, pair, {side} side, reason, lock_time, lock_end_time, active from {pairlock_back_name} - """)) + """ + ) + ) def set_sqlite_to_wal(engine): - if engine.name == 'sqlite' and str(engine.url) != 'sqlite://': + if engine.name == "sqlite" and str(engine.url) != "sqlite://": # Set Mode to with engine.begin() as connection: connection.execute(text("PRAGMA journal_mode=wal")) @@ -274,7 +294,6 @@ def set_sqlite_to_wal(engine): def fix_old_dry_orders(engine): with engine.begin() as connection: - # Update current dry-run Orders where # - stoploss order is Open (will be replaced eventually) # 2nd query: @@ -283,26 +302,28 @@ def fix_old_dry_orders(engine): # - current Order trade_id not equal to current Trade.id # - current Order not stoploss - stmt = update(Order).where( - Order.ft_is_open.is_(True), - Order.ft_order_side == 'stoploss', - Order.order_id.like('dry%'), - - ).values(ft_is_open=False) + stmt = ( + update(Order) + .where( + Order.ft_is_open.is_(True), + Order.ft_order_side == "stoploss", + Order.order_id.like("dry%"), + ) + .values(ft_is_open=False) + ) connection.execute(stmt) # Close dry-run orders for closed trades. - stmt = update(Order).where( - Order.ft_is_open.is_(True), - Order.ft_trade_id.not_in( - select( - Trade.id - ).where(Trade.is_open.is_(True)) - ), - Order.ft_order_side != 'stoploss', - Order.order_id.like('dry%') - - ).values(ft_is_open=False) + stmt = ( + update(Order) + .where( + Order.ft_is_open.is_(True), + Order.ft_trade_id.not_in(select(Trade.id).where(Trade.is_open.is_(True))), + Order.ft_order_side != "stoploss", + Order.order_id.like("dry%"), + ) + .values(ft_is_open=False) + ) connection.execute(stmt) @@ -312,15 +333,15 @@ def check_migrate(engine, decl_base, previous_tables) -> None: """ inspector = inspect(engine) - cols_trades = inspector.get_columns('trades') - cols_orders = inspector.get_columns('orders') - cols_pairlocks = inspector.get_columns('pairlocks') - tabs = get_table_names_for_table(inspector, 'trades') - table_back_name = get_backup_name(tabs, 'trades_bak') - order_tabs = get_table_names_for_table(inspector, 'orders') - order_table_bak_name = get_backup_name(order_tabs, 'orders_bak') - pairlock_tabs = get_table_names_for_table(inspector, 'pairlocks') - pairlock_table_bak_name = get_backup_name(pairlock_tabs, 'pairlocks_bak') + cols_trades = inspector.get_columns("trades") + cols_orders = inspector.get_columns("orders") + cols_pairlocks = inspector.get_columns("pairlocks") + tabs = get_table_names_for_table(inspector, "trades") + table_back_name = get_backup_name(tabs, "trades_bak") + order_tabs = get_table_names_for_table(inspector, "orders") + order_table_bak_name = get_backup_name(order_tabs, "orders_bak") + pairlock_tabs = get_table_names_for_table(inspector, "pairlocks") + pairlock_table_bak_name = get_backup_name(pairlock_tabs, "pairlocks_bak") # Check if migration necessary # Migrates both trades and orders table! @@ -328,27 +349,35 @@ def check_migrate(engine, decl_base, previous_tables) -> None: # or not has_column(cols_orders, 'funding_fee')): migrating = False # if not has_column(cols_trades, 'funding_fee_running'): - if not has_column(cols_orders, 'ft_order_tag'): + if not has_column(cols_orders, "ft_order_tag"): migrating = True - logger.info(f"Running database migration for trades - " - f"backup: {table_back_name}, {order_table_bak_name}") + logger.info( + f"Running database migration for trades - " + f"backup: {table_back_name}, {order_table_bak_name}" + ) migrate_trades_and_orders_table( - decl_base, inspector, engine, table_back_name, cols_trades, - order_table_bak_name, cols_orders) + decl_base, + inspector, + engine, + table_back_name, + cols_trades, + order_table_bak_name, + cols_orders, + ) - if not has_column(cols_pairlocks, 'side'): + if not has_column(cols_pairlocks, "side"): migrating = True - logger.info(f"Running database migration for pairlocks - " - f"backup: {pairlock_table_bak_name}") + logger.info(f"Running database migration for pairlocks - backup: {pairlock_table_bak_name}") migrate_pairlocks_table( decl_base, inspector, engine, pairlock_table_bak_name, cols_pairlocks ) - if 'orders' not in previous_tables and 'trades' in previous_tables: + if "orders" not in previous_tables and "trades" in previous_tables: raise OperationalException( "Your database seems to be very old. " "Please update to freqtrade 2022.3 to migrate this database or " - "start with a fresh database.") + "start with a fresh database." + ) set_sqlite_to_wal(engine) fix_old_dry_orders(engine) diff --git a/freqtrade/persistence/models.py b/freqtrade/persistence/models.py index 1a69b271c..261148baa 100644 --- a/freqtrade/persistence/models.py +++ b/freqtrade/persistence/models.py @@ -1,6 +1,7 @@ """ This module contains the class to persist trades into SQLite """ + import logging import threading from contextvars import ContextVar @@ -23,7 +24,7 @@ from freqtrade.persistence.trade_model import Order, Trade logger = logging.getLogger(__name__) -REQUEST_ID_CTX_KEY: Final[str] = 'request_id' +REQUEST_ID_CTX_KEY: Final[str] = "request_id" _request_id_ctx_var: ContextVar[Optional[str]] = ContextVar(REQUEST_ID_CTX_KEY, default=None) @@ -39,7 +40,7 @@ def get_request_or_thread_id() -> Optional[str]: return id -_SQL_DOCS_URL = 'http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls' +_SQL_DOCS_URL = "http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls" def init_db(db_url: str) -> None: @@ -52,35 +53,44 @@ def init_db(db_url: str) -> None: """ kwargs: Dict[str, Any] = {} - if db_url == 'sqlite:///': + if db_url == "sqlite:///": raise OperationalException( - f'Bad db-url {db_url}. For in-memory database, please use `sqlite://`.') - if db_url == 'sqlite://': - kwargs.update({ - 'poolclass': StaticPool, - }) + f"Bad db-url {db_url}. For in-memory database, please use `sqlite://`." + ) + if db_url == "sqlite://": + kwargs.update( + { + "poolclass": StaticPool, + } + ) # Take care of thread ownership - if db_url.startswith('sqlite://'): - kwargs.update({ - 'connect_args': {'check_same_thread': False}, - }) + if db_url.startswith("sqlite://"): + kwargs.update( + { + "connect_args": {"check_same_thread": False}, + } + ) try: engine = create_engine(db_url, future=True, **kwargs) except NoSuchModuleError: - raise OperationalException(f"Given value for db_url: '{db_url}' " - f"is no valid database URL! (See {_SQL_DOCS_URL})") + raise OperationalException( + f"Given value for db_url: '{db_url}' " + f"is no valid database URL! (See {_SQL_DOCS_URL})" + ) # https://docs.sqlalchemy.org/en/13/orm/contextual.html#thread-local-scope # Scoped sessions proxy requests to the appropriate thread-local session. # Since we also use fastAPI, we need to make it aware of the request id, too - Trade.session = scoped_session(sessionmaker( - bind=engine, autoflush=False), scopefunc=get_request_or_thread_id) + Trade.session = scoped_session( + sessionmaker(bind=engine, autoflush=False), scopefunc=get_request_or_thread_id + ) Order.session = Trade.session PairLock.session = Trade.session _KeyValueStoreModel.session = Trade.session - _CustomData.session = scoped_session(sessionmaker(bind=engine, autoflush=True), - scopefunc=get_request_or_thread_id) + _CustomData.session = scoped_session( + sessionmaker(bind=engine, autoflush=True), scopefunc=get_request_or_thread_id + ) previous_tables = inspect(engine).get_table_names() ModelBase.metadata.create_all(engine) diff --git a/freqtrade/persistence/pairlock.py b/freqtrade/persistence/pairlock.py index 1b254c2b2..2ea2991c2 100644 --- a/freqtrade/persistence/pairlock.py +++ b/freqtrade/persistence/pairlock.py @@ -12,7 +12,8 @@ class PairLock(ModelBase): """ Pair Locks database model. """ - __tablename__ = 'pairlocks' + + __tablename__ = "pairlocks" session: ClassVar[SessionType] id: Mapped[int] = mapped_column(primary_key=True) @@ -32,43 +33,48 @@ class PairLock(ModelBase): lock_time = self.lock_time.strftime(DATETIME_PRINT_FORMAT) lock_end_time = self.lock_end_time.strftime(DATETIME_PRINT_FORMAT) return ( - f'PairLock(id={self.id}, pair={self.pair}, side={self.side}, lock_time={lock_time}, ' - f'lock_end_time={lock_end_time}, reason={self.reason}, active={self.active})') + f"PairLock(id={self.id}, pair={self.pair}, side={self.side}, lock_time={lock_time}, " + f"lock_end_time={lock_end_time}, reason={self.reason}, active={self.active})" + ) @staticmethod def query_pair_locks( - pair: Optional[str], now: datetime, side: str = '*') -> ScalarResult['PairLock']: + pair: Optional[str], now: datetime, side: str = "*" + ) -> ScalarResult["PairLock"]: """ Get all currently active locks for this pair :param pair: Pair to check for. Returns all current locks if pair is empty :param now: Datetime object (generated via datetime.now(timezone.utc)). """ - filters = [PairLock.lock_end_time > now, - # Only active locks - PairLock.active.is_(True), ] + filters = [ + PairLock.lock_end_time > now, + # Only active locks + PairLock.active.is_(True), + ] if pair: filters.append(PairLock.pair == pair) - if side != '*': - filters.append(or_(PairLock.side == side, PairLock.side == '*')) + if side != "*": + filters.append(or_(PairLock.side == side, PairLock.side == "*")) else: - filters.append(PairLock.side == '*') + filters.append(PairLock.side == "*") return PairLock.session.scalars(select(PairLock).filter(*filters)) @staticmethod - def get_all_locks() -> ScalarResult['PairLock']: + def get_all_locks() -> ScalarResult["PairLock"]: return PairLock.session.scalars(select(PairLock)) def to_json(self) -> Dict[str, Any]: return { - 'id': self.id, - 'pair': self.pair, - 'lock_time': self.lock_time.strftime(DATETIME_PRINT_FORMAT), - 'lock_timestamp': int(self.lock_time.replace(tzinfo=timezone.utc).timestamp() * 1000), - 'lock_end_time': self.lock_end_time.strftime(DATETIME_PRINT_FORMAT), - 'lock_end_timestamp': int(self.lock_end_time.replace(tzinfo=timezone.utc - ).timestamp() * 1000), - 'reason': self.reason, - 'side': self.side, - 'active': self.active, + "id": self.id, + "pair": self.pair, + "lock_time": self.lock_time.strftime(DATETIME_PRINT_FORMAT), + "lock_timestamp": int(self.lock_time.replace(tzinfo=timezone.utc).timestamp() * 1000), + "lock_end_time": self.lock_end_time.strftime(DATETIME_PRINT_FORMAT), + "lock_end_timestamp": int( + self.lock_end_time.replace(tzinfo=timezone.utc).timestamp() * 1000 + ), + "reason": self.reason, + "side": self.side, + "active": self.active, } diff --git a/freqtrade/persistence/pairlock_middleware.py b/freqtrade/persistence/pairlock_middleware.py index dd6bacf3a..616906658 100644 --- a/freqtrade/persistence/pairlock_middleware.py +++ b/freqtrade/persistence/pairlock_middleware.py @@ -21,7 +21,7 @@ class PairLocks: use_db = True locks: List[PairLock] = [] - timeframe: str = '' + timeframe: str = "" @staticmethod def reset_locks() -> None: @@ -32,8 +32,14 @@ class PairLocks: PairLocks.locks = [] @staticmethod - def lock_pair(pair: str, until: datetime, reason: Optional[str] = None, *, - now: Optional[datetime] = None, side: str = '*') -> PairLock: + def lock_pair( + pair: str, + until: datetime, + reason: Optional[str] = None, + *, + now: Optional[datetime] = None, + side: str = "*", + ) -> PairLock: """ Create PairLock from now to "until". Uses database by default, unless PairLocks.use_db is set to False, @@ -50,7 +56,7 @@ class PairLocks: lock_end_time=timeframe_to_next_date(PairLocks.timeframe, until), reason=reason, side=side, - active=True + active=True, ) if PairLocks.use_db: PairLock.session.add(lock) @@ -60,8 +66,9 @@ class PairLocks: return lock @staticmethod - def get_pair_locks(pair: Optional[str], now: Optional[datetime] = None, - side: str = '*') -> Sequence[PairLock]: + def get_pair_locks( + pair: Optional[str], now: Optional[datetime] = None, side: str = "*" + ) -> Sequence[PairLock]: """ Get all currently active locks for this pair :param pair: Pair to check for. Returns all current locks if pair is empty @@ -74,17 +81,22 @@ class PairLocks: if PairLocks.use_db: return PairLock.query_pair_locks(pair, now, side).all() else: - locks = [lock for lock in PairLocks.locks if ( - lock.lock_end_time >= now - and lock.active is True - and (pair is None or lock.pair == pair) - and (lock.side == '*' or lock.side == side) - )] + locks = [ + lock + for lock in PairLocks.locks + if ( + lock.lock_end_time >= now + and lock.active is True + and (pair is None or lock.pair == pair) + and (lock.side == "*" or lock.side == side) + ) + ] return locks @staticmethod def get_pair_longest_lock( - pair: str, now: Optional[datetime] = None, side: str = '*') -> Optional[PairLock]: + pair: str, now: Optional[datetime] = None, side: str = "*" + ) -> Optional[PairLock]: """ Get the lock that expires the latest for the pair given. """ @@ -93,7 +105,7 @@ class PairLocks: return locks[0] if locks else None @staticmethod - def unlock_pair(pair: str, now: Optional[datetime] = None, side: str = '*') -> None: + def unlock_pair(pair: str, now: Optional[datetime] = None, side: str = "*") -> None: """ Release all locks for this pair. :param pair: Pair to unlock @@ -124,10 +136,11 @@ class PairLocks: if PairLocks.use_db: # used in live modes logger.info(f"Releasing all locks with reason '{reason}':") - filters = [PairLock.lock_end_time > now, - PairLock.active.is_(True), - PairLock.reason == reason - ] + filters = [ + PairLock.lock_end_time > now, + PairLock.active.is_(True), + PairLock.reason == reason, + ] locks = PairLock.session.scalars(select(PairLock).filter(*filters)).all() for lock in locks: logger.info(f"Releasing lock for {lock.pair} with reason '{reason}'.") @@ -141,7 +154,7 @@ class PairLocks: lock.active = False @staticmethod - def is_global_lock(now: Optional[datetime] = None, side: str = '*') -> bool: + def is_global_lock(now: Optional[datetime] = None, side: str = "*") -> bool: """ :param now: Datetime object (generated via datetime.now(timezone.utc)). defaults to datetime.now(timezone.utc) @@ -149,10 +162,10 @@ class PairLocks: if not now: now = datetime.now(timezone.utc) - return len(PairLocks.get_pair_locks('*', now, side)) > 0 + return len(PairLocks.get_pair_locks("*", now, side)) > 0 @staticmethod - def is_pair_locked(pair: str, now: Optional[datetime] = None, side: str = '*') -> bool: + def is_pair_locked(pair: str, now: Optional[datetime] = None, side: str = "*") -> bool: """ :param pair: Pair to check for :param now: Datetime object (generated via datetime.now(timezone.utc)). @@ -161,9 +174,8 @@ class PairLocks: if not now: now = datetime.now(timezone.utc) - return ( - len(PairLocks.get_pair_locks(pair, now, side)) > 0 - or PairLocks.is_global_lock(now, side) + return len(PairLocks.get_pair_locks(pair, now, side)) > 0 or PairLocks.is_global_lock( + now, side ) @staticmethod diff --git a/freqtrade/persistence/trade_model.py b/freqtrade/persistence/trade_model.py index 1421666ea..7d803ea0f 100644 --- a/freqtrade/persistence/trade_model.py +++ b/freqtrade/persistence/trade_model.py @@ -1,6 +1,7 @@ """ This module contains the class to persist trades into SQLite """ + import logging from collections import defaultdict from dataclasses import dataclass @@ -8,18 +9,39 @@ from datetime import datetime, timedelta, timezone from math import isclose from typing import Any, ClassVar, Dict, List, Optional, Sequence, cast -from sqlalchemy import (Enum, Float, ForeignKey, Integer, ScalarResult, Select, String, - UniqueConstraint, desc, func, select) +from sqlalchemy import ( + Enum, + Float, + ForeignKey, + Integer, + ScalarResult, + Select, + String, + UniqueConstraint, + desc, + func, + select, +) from sqlalchemy.orm import Mapped, lazyload, mapped_column, relationship, validates from typing_extensions import Self -from freqtrade.constants import (CANCELED_EXCHANGE_STATES, CUSTOM_TAG_MAX_LENGTH, - DATETIME_PRINT_FORMAT, MATH_CLOSE_PREC, NON_OPEN_EXCHANGE_STATES, - BuySell, LongShort) +from freqtrade.constants import ( + CANCELED_EXCHANGE_STATES, + CUSTOM_TAG_MAX_LENGTH, + DATETIME_PRINT_FORMAT, + MATH_CLOSE_PREC, + NON_OPEN_EXCHANGE_STATES, + BuySell, + LongShort, +) from freqtrade.enums import ExitType, TradingMode from freqtrade.exceptions import DependencyException, OperationalException -from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision, - price_to_precision) +from freqtrade.exchange import ( + ROUND_DOWN, + ROUND_UP, + amount_to_contract_precision, + price_to_precision, +) from freqtrade.leverage import interest from freqtrade.misc import safe_value_fallback from freqtrade.persistence.base import ModelBase, SessionType @@ -49,16 +71,17 @@ class Order(ModelBase): Mirrors CCXT Order structure """ - __tablename__ = 'orders' + + __tablename__ = "orders" __allow_unmapped__ = True session: ClassVar[SessionType] # Uniqueness should be ensured over pair, order_id # its likely that order_id is unique per Pair on some exchanges. - __table_args__ = (UniqueConstraint('ft_pair', 'order_id', name="_order_pair_order_id"),) + __table_args__ = (UniqueConstraint("ft_pair", "order_id", name="_order_pair_order_id"),) id: Mapped[int] = mapped_column(Integer, primary_key=True) - ft_trade_id: Mapped[int] = mapped_column(Integer, ForeignKey('trades.id'), index=True) + ft_trade_id: Mapped[int] = mapped_column(Integer, ForeignKey("trades.id"), index=True) _trade_live: Mapped["Trade"] = relationship("Trade", back_populates="orders", lazy="immediate") _trade_bt: "LocalTrade" = None # type: ignore @@ -89,17 +112,18 @@ class Order(ModelBase): funding_fee: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) ft_fee_base: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) - ft_order_tag: Mapped[Optional[str]] = mapped_column(String(CUSTOM_TAG_MAX_LENGTH), - nullable=True) + ft_order_tag: Mapped[Optional[str]] = mapped_column( + String(CUSTOM_TAG_MAX_LENGTH), nullable=True + ) @property def order_date_utc(self) -> datetime: - """ Order-date with UTC timezoneinfo""" + """Order-date with UTC timezoneinfo""" return self.order_date.replace(tzinfo=timezone.utc) @property def order_filled_utc(self) -> Optional[datetime]: - """ last order-date with UTC timezoneinfo""" + """last order-date with UTC timezoneinfo""" return ( self.order_filled_date.replace(tzinfo=timezone.utc) if self.order_filled_date else None ) @@ -128,8 +152,9 @@ class Order(ModelBase): @property def safe_remaining(self) -> float: return ( - self.remaining if self.remaining is not None else - self.safe_amount - (self.filled or 0.0) + self.remaining + if self.remaining is not None + else self.safe_amount - (self.filled or 0.0) ) @property @@ -146,35 +171,36 @@ class Order(ModelBase): @property def stake_amount(self) -> float: - """ Amount in stake currency used for this order""" + """Amount in stake currency used for this order""" return self.safe_amount * self.safe_price / self.trade.leverage def __repr__(self): - - return (f"Order(id={self.id}, trade={self.ft_trade_id}, order_id={self.order_id}, " - f"side={self.side}, filled={self.safe_filled}, price={self.safe_price}, " - f"status={self.status}, date={self.order_date_utc:{DATETIME_PRINT_FORMAT}})") + return ( + f"Order(id={self.id}, trade={self.ft_trade_id}, order_id={self.order_id}, " + f"side={self.side}, filled={self.safe_filled}, price={self.safe_price}, " + f"status={self.status}, date={self.order_date_utc:{DATETIME_PRINT_FORMAT}})" + ) def update_from_ccxt_object(self, order): """ Update Order from ccxt response Only updates if fields are available from ccxt - """ - if self.order_id != str(order['id']): + if self.order_id != str(order["id"]): raise DependencyException("Order-id's don't match") - self.status = safe_value_fallback(order, 'status', default_value=self.status) - self.symbol = safe_value_fallback(order, 'symbol', default_value=self.symbol) - self.order_type = safe_value_fallback(order, 'type', default_value=self.order_type) - self.side = safe_value_fallback(order, 'side', default_value=self.side) - self.price = safe_value_fallback(order, 'price', default_value=self.price) - self.amount = safe_value_fallback(order, 'amount', default_value=self.amount) - self.filled = safe_value_fallback(order, 'filled', default_value=self.filled) - self.average = safe_value_fallback(order, 'average', default_value=self.average) - self.remaining = safe_value_fallback(order, 'remaining', default_value=self.remaining) - self.cost = safe_value_fallback(order, 'cost', default_value=self.cost) - self.stop_price = safe_value_fallback(order, 'stopPrice', default_value=self.stop_price) - order_date = safe_value_fallback(order, 'timestamp') + self.status = safe_value_fallback(order, "status", default_value=self.status) + self.symbol = safe_value_fallback(order, "symbol", default_value=self.symbol) + self.order_type = safe_value_fallback(order, "type", default_value=self.order_type) + self.side = safe_value_fallback(order, "side", default_value=self.side) + self.price = safe_value_fallback(order, "price", default_value=self.price) + self.amount = safe_value_fallback(order, "amount", default_value=self.amount) + self.filled = safe_value_fallback(order, "filled", default_value=self.filled) + self.average = safe_value_fallback(order, "average", default_value=self.average) + self.remaining = safe_value_fallback(order, "remaining", default_value=self.remaining) + self.cost = safe_value_fallback(order, "cost", default_value=self.cost) + self.stop_price = safe_value_fallback(order, "stopPrice", default_value=self.stop_price) + order_date = safe_value_fallback(order, "timestamp") if order_date: self.order_date = datetime.fromtimestamp(order_date / 1000, tz=timezone.utc) elif not self.order_date: @@ -183,35 +209,37 @@ class Order(ModelBase): self.ft_is_open = True if self.status in NON_OPEN_EXCHANGE_STATES: self.ft_is_open = False - if (order.get('filled', 0.0) or 0.0) > 0 and not self.order_filled_date: + if (order.get("filled", 0.0) or 0.0) > 0 and not self.order_filled_date: self.order_filled_date = dt_from_ts( - safe_value_fallback(order, 'lastTradeTimestamp', default_value=dt_ts()) + safe_value_fallback(order, "lastTradeTimestamp", default_value=dt_ts()) ) self.order_update_date = datetime.now(timezone.utc) - def to_ccxt_object(self, stopPriceName: str = 'stopPrice') -> Dict[str, Any]: + def to_ccxt_object(self, stopPriceName: str = "stopPrice") -> Dict[str, Any]: order: Dict[str, Any] = { - 'id': self.order_id, - 'symbol': self.ft_pair, - 'price': self.price, - 'average': self.average, - 'amount': self.amount, - 'cost': self.cost, - 'type': self.order_type, - 'side': self.ft_order_side, - 'filled': self.filled, - 'remaining': self.remaining, - 'datetime': self.order_date_utc.strftime('%Y-%m-%dT%H:%M:%S.%f'), - 'timestamp': int(self.order_date_utc.timestamp() * 1000), - 'status': self.status, - 'fee': None, - 'info': {}, + "id": self.order_id, + "symbol": self.ft_pair, + "price": self.price, + "average": self.average, + "amount": self.amount, + "cost": self.cost, + "type": self.order_type, + "side": self.ft_order_side, + "filled": self.filled, + "remaining": self.remaining, + "datetime": self.order_date_utc.strftime("%Y-%m-%dT%H:%M:%S.%f"), + "timestamp": int(self.order_date_utc.timestamp() * 1000), + "status": self.status, + "fee": None, + "info": {}, } - if self.ft_order_side == 'stoploss': - order.update({ - stopPriceName: self.stop_price, - 'ft_order_type': 'stoploss', - }) + if self.ft_order_side == "stoploss": + order.update( + { + stopPriceName: self.stop_price, + "ft_order_type": "stoploss", + } + ) return order @@ -221,48 +249,57 @@ class Order(ModelBase): Only used for backtesting. """ resp = { - 'amount': self.safe_amount, - 'safe_price': self.safe_price, - 'ft_order_side': self.ft_order_side, - 'order_filled_timestamp': dt_ts_none(self.order_filled_utc), - 'ft_is_entry': self.ft_order_side == entry_side, - 'ft_order_tag': self.ft_order_tag, + "amount": self.safe_amount, + "safe_price": self.safe_price, + "ft_order_side": self.ft_order_side, + "order_filled_timestamp": dt_ts_none(self.order_filled_utc), + "ft_is_entry": self.ft_order_side == entry_side, + "ft_order_tag": self.ft_order_tag, } if not minified: - resp.update({ - 'pair': self.ft_pair, - 'order_id': self.order_id, - 'status': self.status, - 'average': round(self.average, 8) if self.average else 0, - 'cost': self.cost if self.cost else 0, - 'filled': self.filled, - 'is_open': self.ft_is_open, - 'order_date': self.order_date.strftime(DATETIME_PRINT_FORMAT) - if self.order_date else None, - 'order_timestamp': int(self.order_date.replace( - tzinfo=timezone.utc).timestamp() * 1000) if self.order_date else None, - 'order_filled_date': self.order_filled_date.strftime(DATETIME_PRINT_FORMAT) - if self.order_filled_date else None, - 'order_type': self.order_type, - 'price': self.price, - 'remaining': self.remaining, - 'ft_fee_base': self.ft_fee_base, - 'funding_fee': self.funding_fee, - }) + resp.update( + { + "pair": self.ft_pair, + "order_id": self.order_id, + "status": self.status, + "average": round(self.average, 8) if self.average else 0, + "cost": self.cost if self.cost else 0, + "filled": self.filled, + "is_open": self.ft_is_open, + "order_date": ( + self.order_date.strftime(DATETIME_PRINT_FORMAT) if self.order_date else None + ), + "order_timestamp": ( + int(self.order_date.replace(tzinfo=timezone.utc).timestamp() * 1000) + if self.order_date + else None + ), + "order_filled_date": ( + self.order_filled_date.strftime(DATETIME_PRINT_FORMAT) + if self.order_filled_date + else None + ), + "order_type": self.order_type, + "price": self.price, + "remaining": self.remaining, + "ft_fee_base": self.ft_fee_base, + "funding_fee": self.funding_fee, + } + ) return resp - def close_bt_order(self, close_date: datetime, trade: 'LocalTrade'): + def close_bt_order(self, close_date: datetime, trade: "LocalTrade"): self.order_filled_date = close_date self.filled = self.amount self.remaining = 0 - self.status = 'closed' + self.status = "closed" self.ft_is_open = False # Assign funding fees to Order. # Assumes backtesting will use date_last_filled_utc to calculate future funding fees. self.funding_fee = trade.funding_fee_running trade.funding_fee_running = 0.0 - if (self.ft_order_side == trade.entry_side and self.price): + if self.ft_order_side == trade.entry_side and self.price: trade.open_rate = self.price trade.recalc_trade_from_orders() if trade.nr_of_successful_entries == 1: @@ -271,7 +308,7 @@ class Order(ModelBase): trade.adjust_stop_loss(trade.open_rate, trade.stop_loss_pct) @staticmethod - def update_orders(orders: List['Order'], order: Dict[str, Any]): + def update_orders(orders: List["Order"], order: Dict[str, Any]): """ Get all non-closed orders - useful when trying to batch-update orders """ @@ -279,7 +316,7 @@ class Order(ModelBase): logger.warning(f"{order} is not a valid response object.") return - filtered_orders = [o for o in orders if o.order_id == order.get('id')] + filtered_orders = [o for o in orders if o.order_id == order.get("id")] if filtered_orders: oobj = filtered_orders[0] oobj.update_from_ccxt_object(order) @@ -289,25 +326,30 @@ class Order(ModelBase): @classmethod def parse_from_ccxt_object( - cls, order: Dict[str, Any], pair: str, side: str, - amount: Optional[float] = None, price: Optional[float] = None) -> Self: + cls, + order: Dict[str, Any], + pair: str, + side: str, + amount: Optional[float] = None, + price: Optional[float] = None, + ) -> Self: """ Parse an order from a ccxt object and return a new order Object. Optional support for overriding amount and price is only used for test simplification. """ o = cls( - order_id=str(order['id']), + order_id=str(order["id"]), ft_order_side=side, ft_pair=pair, - ft_amount=amount if amount else order['amount'], - ft_price=price if price else order['price'], - ) + ft_amount=amount if amount else order["amount"], + ft_price=price if price else order["price"], + ) o.update_from_ccxt_object(order) return o @staticmethod - def get_open_orders() -> Sequence['Order']: + def get_open_orders() -> Sequence["Order"]: """ Retrieve open orders from the database :return: List of open orders @@ -315,7 +357,7 @@ class Order(ModelBase): return Order.session.scalars(select(Order).filter(Order.ft_is_open.is_(True))).all() @staticmethod - def order_by_id(order_id: str) -> Optional['Order']: + def order_by_id(order_id: str) -> Optional["Order"]: """ Retrieve order based on order_id :return: Order or None @@ -327,14 +369,14 @@ class LocalTrade: """ Trade database model. Used in backtesting - must be aligned to Trade model! - """ + use_db: bool = False # Trades container for backtesting - trades: List['LocalTrade'] = [] - trades_open: List['LocalTrade'] = [] + trades: List["LocalTrade"] = [] + trades_open: List["LocalTrade"] = [] # Copy of trades_open - but indexed by pair - bt_trades_open_pp: Dict[str, List['LocalTrade']] = defaultdict(list) + bt_trades_open_pp: Dict[str, List["LocalTrade"]] = defaultdict(list) bt_open_open_trade_count: int = 0 total_profit: float = 0 realized_profit: float = 0 @@ -343,17 +385,17 @@ class LocalTrade: orders: List[Order] = [] - exchange: str = '' - pair: str = '' - base_currency: Optional[str] = '' - stake_currency: Optional[str] = '' + exchange: str = "" + pair: str = "" + base_currency: Optional[str] = "" + stake_currency: Optional[str] = "" is_open: bool = True fee_open: float = 0.0 fee_open_cost: Optional[float] = None - fee_open_currency: Optional[str] = '' + fee_open_currency: Optional[str] = "" fee_close: Optional[float] = 0.0 fee_close_cost: Optional[float] = None - fee_close_currency: Optional[str] = '' + fee_close_currency: Optional[str] = "" open_rate: float = 0.0 open_rate_requested: Optional[float] = None # open_trade_value - calculated via _calc_open_trade_value @@ -381,9 +423,9 @@ class LocalTrade: max_rate: Optional[float] = None # Lowest price reached min_rate: Optional[float] = None - exit_reason: Optional[str] = '' - exit_order_status: Optional[str] = '' - strategy: Optional[str] = '' + exit_reason: Optional[str] = "" + exit_order_status: Optional[str] = "" + strategy: Optional[str] = "" enter_tag: Optional[str] = None timeframe: Optional[int] = None @@ -428,14 +470,14 @@ class LocalTrade: @property def has_no_leverage(self) -> bool: """Returns true if this is a non-leverage, non-short trade""" - return ((self.leverage == 1.0 or self.leverage is None) and not self.is_short) + return (self.leverage == 1.0 or self.leverage is None) and not self.is_short @property def borrowed(self) -> float: """ - The amount of currency borrowed from the exchange for leverage trades - If a long trade, the amount is in base currency - If a short trade, the amount is in the other currency being traded + The amount of currency borrowed from the exchange for leverage trades + If a long trade, the amount is in base currency + If a short trade, the amount is in the other currency being traded """ if self.has_no_leverage: return 0.0 @@ -446,7 +488,7 @@ class LocalTrade: @property def _date_last_filled_utc(self) -> Optional[datetime]: - """ Date of the last filled order""" + """Date of the last filled order""" orders = self.select_filled_orders() if orders: return max(o.order_filled_utc for o in orders if o.order_filled_utc) @@ -454,7 +496,7 @@ class LocalTrade: @property def date_last_filled_utc(self) -> datetime: - """ Date of the last filled order - or open_date if no orders are filled""" + """Date of the last filled order - or open_date if no orders are filled""" dt_last_filled = self._date_last_filled_utc if not dt_last_filled: return self.open_date_utc @@ -462,11 +504,10 @@ class LocalTrade: @property def date_entry_fill_utc(self) -> Optional[datetime]: - """ Date of the first filled order""" + """Date of the first filled order""" orders = self.select_filled_orders(self.entry_side) - if ( - orders - and len(filled_date := [o.order_filled_utc for o in orders if o.order_filled_utc]) + if orders and len( + filled_date := [o.order_filled_utc for o in orders if o.order_filled_utc] ): return min(filled_date) return None @@ -512,9 +553,9 @@ class LocalTrade: Compatibility layer for asset - which can be empty for old trades. """ try: - return self.base_currency or self.pair.split('/')[0] + return self.base_currency or self.pair.split("/")[0] except IndexError: - return '' + return "" @property def safe_quote_currency(self) -> str: @@ -522,16 +563,16 @@ class LocalTrade: Compatibility layer for asset - which can be empty for old trades. """ try: - return self.stake_currency or self.pair.split('/')[1].split(':')[0] + return self.stake_currency or self.pair.split("/")[1].split(":")[0] except IndexError: - return '' + return "" @property def open_orders(self) -> List[Order]: """ All open orders for this trade excluding stoploss orders """ - return [o for o in self.orders if o.ft_is_open and o.ft_order_side != 'stoploss'] + return [o for o in self.orders if o.ft_is_open and o.ft_order_side != "stoploss"] @property def has_open_orders(self) -> bool: @@ -539,8 +580,7 @@ class LocalTrade: True if there are open orders for this trade excluding stoploss orders """ open_orders_wo_sl = [ - o for o in self.orders - if o.ft_order_side not in ['stoploss'] and o.ft_is_open + o for o in self.orders if o.ft_order_side not in ["stoploss"] and o.ft_is_open ] return len(open_orders_wo_sl) > 0 @@ -549,10 +589,7 @@ class LocalTrade: """ All open stoploss orders for this trade """ - return [ - o for o in self.orders - if o.ft_order_side in ['stoploss'] and o.ft_is_open - ] + return [o for o in self.orders if o.ft_order_side in ["stoploss"] and o.ft_is_open] @property def has_open_sl_orders(self) -> bool: @@ -560,8 +597,7 @@ class LocalTrade: True if there are open stoploss orders for this trade """ open_sl_orders = [ - o for o in self.orders - if o.ft_order_side in ['stoploss'] and o.ft_is_open + o for o in self.orders if o.ft_order_side in ["stoploss"] and o.ft_is_open ] return len(open_sl_orders) > 0 @@ -570,16 +606,12 @@ class LocalTrade: """ All stoploss orders for this trade """ - return [ - o for o in self.orders - if o.ft_order_side in ['stoploss'] - ] + return [o for o in self.orders if o.ft_order_side in ["stoploss"]] @property def open_orders_ids(self) -> List[str]: open_orders_ids_wo_sl = [ - oo.order_id for oo in self.open_orders - if oo.ft_order_side not in ['stoploss'] + oo.order_id for oo in self.open_orders if oo.ft_order_side not in ["stoploss"] ] return open_orders_ids_wo_sl @@ -590,17 +622,18 @@ class LocalTrade: self.orders = [] if self.trading_mode == TradingMode.MARGIN and self.interest_rate is None: raise OperationalException( - f"{self.trading_mode.value} trading requires param interest_rate on trades") + f"{self.trading_mode.value} trading requires param interest_rate on trades" + ) def __repr__(self): open_since = ( - self.open_date_utc.strftime(DATETIME_PRINT_FORMAT) if self.is_open else 'closed' + self.open_date_utc.strftime(DATETIME_PRINT_FORMAT) if self.is_open else "closed" ) return ( - f'Trade(id={self.id}, pair={self.pair}, amount={self.amount:.8f}, ' - f'is_short={self.is_short or False}, leverage={self.leverage or 1.0}, ' - f'open_rate={self.open_rate:.8f}, open_since={open_since})' + f"Trade(id={self.id}, pair={self.pair}, amount={self.amount:.8f}, " + f"is_short={self.is_short or False}, leverage={self.leverage or 1.0}, " + f"open_rate={self.open_rate:.8f}, open_since={open_since})" ) def to_json(self, minified: bool = False) -> Dict[str, Any]: @@ -613,85 +646,93 @@ class LocalTrade: orders_json = [order.to_json(self.entry_side, minified) for order in filled_or_open_orders] return { - 'trade_id': self.id, - 'pair': self.pair, - 'base_currency': self.safe_base_currency, - 'quote_currency': self.safe_quote_currency, - 'is_open': self.is_open, - 'exchange': self.exchange, - 'amount': round(self.amount, 8), - 'amount_requested': round(self.amount_requested, 8) if self.amount_requested else None, - 'stake_amount': round(self.stake_amount, 8), - 'max_stake_amount': round(self.max_stake_amount, 8) if self.max_stake_amount else None, - 'strategy': self.strategy, - 'enter_tag': self.enter_tag, - 'timeframe': self.timeframe, - - 'fee_open': self.fee_open, - 'fee_open_cost': self.fee_open_cost, - 'fee_open_currency': self.fee_open_currency, - 'fee_close': self.fee_close, - 'fee_close_cost': self.fee_close_cost, - 'fee_close_currency': self.fee_close_currency, - - 'open_date': self.open_date.strftime(DATETIME_PRINT_FORMAT), - 'open_timestamp': dt_ts_none(self.open_date_utc), - 'open_fill_date': (self.date_entry_fill_utc.strftime(DATETIME_PRINT_FORMAT) - if self.date_entry_fill_utc else None), - 'open_fill_timestamp': dt_ts_none(self.date_entry_fill_utc), - 'open_rate': self.open_rate, - 'open_rate_requested': self.open_rate_requested, - 'open_trade_value': round(self.open_trade_value, 8), - - 'close_date': (self.close_date.strftime(DATETIME_PRINT_FORMAT) - if self.close_date else None), - 'close_timestamp': dt_ts_none(self.close_date_utc), - 'realized_profit': self.realized_profit or 0.0, + "trade_id": self.id, + "pair": self.pair, + "base_currency": self.safe_base_currency, + "quote_currency": self.safe_quote_currency, + "is_open": self.is_open, + "exchange": self.exchange, + "amount": round(self.amount, 8), + "amount_requested": round(self.amount_requested, 8) if self.amount_requested else None, + "stake_amount": round(self.stake_amount, 8), + "max_stake_amount": round(self.max_stake_amount, 8) if self.max_stake_amount else None, + "strategy": self.strategy, + "enter_tag": self.enter_tag, + "timeframe": self.timeframe, + "fee_open": self.fee_open, + "fee_open_cost": self.fee_open_cost, + "fee_open_currency": self.fee_open_currency, + "fee_close": self.fee_close, + "fee_close_cost": self.fee_close_cost, + "fee_close_currency": self.fee_close_currency, + "open_date": self.open_date.strftime(DATETIME_PRINT_FORMAT), + "open_timestamp": dt_ts_none(self.open_date_utc), + "open_fill_date": ( + self.date_entry_fill_utc.strftime(DATETIME_PRINT_FORMAT) + if self.date_entry_fill_utc + else None + ), + "open_fill_timestamp": dt_ts_none(self.date_entry_fill_utc), + "open_rate": self.open_rate, + "open_rate_requested": self.open_rate_requested, + "open_trade_value": round(self.open_trade_value, 8), + "close_date": ( + self.close_date.strftime(DATETIME_PRINT_FORMAT) if self.close_date else None + ), + "close_timestamp": dt_ts_none(self.close_date_utc), + "realized_profit": self.realized_profit or 0.0, # Close-profit corresponds to relative realized_profit ratio - 'realized_profit_ratio': self.close_profit or None, - 'close_rate': self.close_rate, - 'close_rate_requested': self.close_rate_requested, - 'close_profit': self.close_profit, # Deprecated - 'close_profit_pct': round(self.close_profit * 100, 2) if self.close_profit else None, - 'close_profit_abs': self.close_profit_abs, # Deprecated - - 'trade_duration_s': (int((self.close_date_utc - self.open_date_utc).total_seconds()) - if self.close_date else None), - 'trade_duration': (int((self.close_date_utc - self.open_date_utc).total_seconds() // 60) - if self.close_date else None), - - 'profit_ratio': self.close_profit, - 'profit_pct': round(self.close_profit * 100, 2) if self.close_profit else None, - 'profit_abs': self.close_profit_abs, - - 'exit_reason': self.exit_reason, - 'exit_order_status': self.exit_order_status, - 'stop_loss_abs': self.stop_loss, - 'stop_loss_ratio': self.stop_loss_pct if self.stop_loss_pct else None, - 'stop_loss_pct': (self.stop_loss_pct * 100) if self.stop_loss_pct else None, - 'stoploss_last_update': (self.stoploss_last_update_utc.strftime(DATETIME_PRINT_FORMAT) - if self.stoploss_last_update_utc else None), - 'stoploss_last_update_timestamp': dt_ts_none(self.stoploss_last_update_utc), - 'initial_stop_loss_abs': self.initial_stop_loss, - 'initial_stop_loss_ratio': (self.initial_stop_loss_pct - if self.initial_stop_loss_pct else None), - 'initial_stop_loss_pct': (self.initial_stop_loss_pct * 100 - if self.initial_stop_loss_pct else None), - 'min_rate': self.min_rate, - 'max_rate': self.max_rate, - - 'leverage': self.leverage, - 'interest_rate': self.interest_rate, - 'liquidation_price': self.liquidation_price, - 'is_short': self.is_short, - 'trading_mode': self.trading_mode, - 'funding_fees': self.funding_fees, - 'amount_precision': self.amount_precision, - 'price_precision': self.price_precision, - 'precision_mode': self.precision_mode, - 'contract_size': self.contract_size, - 'has_open_orders': self.has_open_orders, - 'orders': orders_json, + "realized_profit_ratio": self.close_profit or None, + "close_rate": self.close_rate, + "close_rate_requested": self.close_rate_requested, + "close_profit": self.close_profit, # Deprecated + "close_profit_pct": round(self.close_profit * 100, 2) if self.close_profit else None, + "close_profit_abs": self.close_profit_abs, # Deprecated + "trade_duration_s": ( + int((self.close_date_utc - self.open_date_utc).total_seconds()) + if self.close_date + else None + ), + "trade_duration": ( + int((self.close_date_utc - self.open_date_utc).total_seconds() // 60) + if self.close_date + else None + ), + "profit_ratio": self.close_profit, + "profit_pct": round(self.close_profit * 100, 2) if self.close_profit else None, + "profit_abs": self.close_profit_abs, + "exit_reason": self.exit_reason, + "exit_order_status": self.exit_order_status, + "stop_loss_abs": self.stop_loss, + "stop_loss_ratio": self.stop_loss_pct if self.stop_loss_pct else None, + "stop_loss_pct": (self.stop_loss_pct * 100) if self.stop_loss_pct else None, + "stoploss_last_update": ( + self.stoploss_last_update_utc.strftime(DATETIME_PRINT_FORMAT) + if self.stoploss_last_update_utc + else None + ), + "stoploss_last_update_timestamp": dt_ts_none(self.stoploss_last_update_utc), + "initial_stop_loss_abs": self.initial_stop_loss, + "initial_stop_loss_ratio": ( + self.initial_stop_loss_pct if self.initial_stop_loss_pct else None + ), + "initial_stop_loss_pct": ( + self.initial_stop_loss_pct * 100 if self.initial_stop_loss_pct else None + ), + "min_rate": self.min_rate, + "max_rate": self.max_rate, + "leverage": self.leverage, + "interest_rate": self.interest_rate, + "liquidation_price": self.liquidation_price, + "is_short": self.is_short, + "trading_mode": self.trading_mode, + "funding_fees": self.funding_fees, + "amount_precision": self.amount_precision, + "price_precision": self.price_precision, + "precision_mode": self.precision_mode, + "contract_size": self.contract_size, + "has_open_orders": self.has_open_orders, + "orders": orders_json, } @staticmethod @@ -741,8 +782,13 @@ class LocalTrade: self.stop_loss_pct = -1 * abs(percent) - def adjust_stop_loss(self, current_price: float, stoploss: Optional[float], - initial: bool = False, allow_refresh: bool = False) -> None: + def adjust_stop_loss( + self, + current_price: float, + stoploss: Optional[float], + initial: bool = False, + allow_refresh: bool = False, + ) -> None: """ This adjusts the stop loss to it's most recently observed setting :param current_price: Current rate the asset is traded @@ -761,14 +807,21 @@ class LocalTrade: else: new_loss = float(current_price * (1 - abs(stoploss / leverage))) - stop_loss_norm = price_to_precision(new_loss, self.price_precision, self.precision_mode, - rounding_mode=ROUND_DOWN if self.is_short else ROUND_UP) + stop_loss_norm = price_to_precision( + new_loss, + self.price_precision, + self.precision_mode, + rounding_mode=ROUND_DOWN if self.is_short else ROUND_UP, + ) # no stop loss assigned yet if self.initial_stop_loss_pct is None: self.__set_stop_loss(stop_loss_norm, stoploss) self.initial_stop_loss = price_to_precision( - stop_loss_norm, self.price_precision, self.precision_mode, - rounding_mode=ROUND_DOWN if self.is_short else ROUND_UP) + stop_loss_norm, + self.price_precision, + self.precision_mode, + rounding_mode=ROUND_DOWN if self.is_short else ROUND_UP, + ) self.initial_stop_loss_pct = -1 * abs(stoploss) # evaluate if the stop loss needs to be updated @@ -797,7 +850,8 @@ class LocalTrade: f"initial_stop_loss={self.initial_stop_loss:.8f}, " f"stop_loss={self.stop_loss:.8f}. " f"Trailing stoploss saved us: " - f"{float(self.stop_loss) - float(self.initial_stop_loss or 0.0):.8f}.") + f"{float(self.stop_loss) - float(self.initial_stop_loss or 0.0):.8f}." + ) def update_trade(self, order: Order, recalculating: bool = False) -> None: """ @@ -807,11 +861,11 @@ class LocalTrade: """ # Ignore open and cancelled orders - if order.status == 'open' or order.safe_price is None: + if order.status == "open" or order.safe_price is None: return - logger.info(f'Updating trade (id={self.id}) ...') - if order.ft_order_side != 'stoploss': + logger.info(f"Updating trade (id={self.id}) ...") + if order.ft_order_side != "stoploss": order.funding_fee = self.funding_fee_running # Reset running funding fees self.funding_fee_running = 0.0 @@ -823,29 +877,29 @@ class LocalTrade: self.amount = order.safe_amount_after_fee if self.is_open: payment = "SELL" if self.is_short else "BUY" - logger.info(f'{order_type}_{payment} has been fulfilled for {self}.') + logger.info(f"{order_type}_{payment} has been fulfilled for {self}.") self.recalc_trade_from_orders() elif order.ft_order_side == self.exit_side: if self.is_open: payment = "BUY" if self.is_short else "SELL" # * On margin shorts, you buy a little bit more than the amount (amount + interest) - logger.info(f'{order_type}_{payment} has been fulfilled for {self}.') + logger.info(f"{order_type}_{payment} has been fulfilled for {self}.") - elif order.ft_order_side == 'stoploss' and order.status not in ('open', ): + elif order.ft_order_side == "stoploss" and order.status not in ("open",): self.close_rate_requested = self.stop_loss self.exit_reason = ExitType.STOPLOSS_ON_EXCHANGE.value if self.is_open and order.safe_filled > 0: - logger.info(f'{order_type} is hit for {self}.') + logger.info(f"{order_type} is hit for {self}.") else: - raise ValueError(f'Unknown order type: {order.order_type}') + raise ValueError(f"Unknown order type: {order.order_type}") if order.ft_order_side != self.entry_side: - amount_tr = amount_to_contract_precision(self.amount, self.amount_precision, - self.precision_mode, self.contract_size) - if ( - isclose(order.safe_amount_after_fee, amount_tr, abs_tol=MATH_CLOSE_PREC) - or (not recalculating and order.safe_amount_after_fee > amount_tr) + amount_tr = amount_to_contract_precision( + self.amount, self.amount_precision, self.precision_mode, self.contract_size + ) + if isclose(order.safe_amount_after_fee, amount_tr, abs_tol=MATH_CLOSE_PREC) or ( + not recalculating and order.safe_amount_after_fee > amount_tr ): # When recalculating a trade, only coming out to 0 can force a close self.close(order.safe_price) @@ -862,14 +916,17 @@ class LocalTrade: self.close_rate = rate self.close_date = self.close_date or self._date_last_filled_utc or dt_now() self.is_open = False - self.exit_order_status = 'closed' + self.exit_order_status = "closed" self.recalc_trade_from_orders(is_closing=True) if show_msg: - logger.info(f"Marking {self} as closed as the trade is fulfilled " - "and found no open orders for it.") + logger.info( + f"Marking {self} as closed as the trade is fulfilled " + "and found no open orders for it." + ) - def update_fee(self, fee_cost: float, fee_currency: Optional[str], fee_rate: Optional[float], - side: str) -> None: + def update_fee( + self, fee_cost: float, fee_currency: Optional[str], fee_rate: Optional[float], side: str + ) -> None: """ Update Fee parameters. Only acts once per side """ @@ -900,13 +957,42 @@ class LocalTrade: def update_order(self, order: Dict) -> None: Order.update_orders(self.orders, order) + @property + def fully_canceled_entry_order_count(self) -> int: + """ + Get amount of failed exiting orders + assumes full exits. + """ + return len( + [ + o + for o in self.orders + if o.ft_order_side == self.entry_side + and o.status in CANCELED_EXCHANGE_STATES + and o.filled == 0 + ] + ) + + @property + def canceled_exit_order_count(self) -> int: + """ + Get amount of failed exiting orders + assumes full exits. + """ + return len( + [ + o + for o in self.orders + if o.ft_order_side == self.exit_side and o.status in CANCELED_EXCHANGE_STATES + ] + ) + def get_canceled_exit_order_count(self) -> int: """ Get amount of failed exiting orders assumes full exits. """ - return len([o for o in self.orders if o.ft_order_side == self.exit_side - and o.status in CANCELED_EXCHANGE_STATES]) + return self.canceled_exit_order_count def _calc_open_trade_value(self, amount: float, open_rate: float) -> float: """ @@ -948,7 +1034,6 @@ class LocalTrade: return interest(exchange_name=self.exchange, borrowed=borrowed, rate=rate, hours=hours) def _calc_base_close(self, amount: FtPrecise, rate: float, fee: Optional[float]) -> FtPrecise: - close_trade = amount * FtPrecise(rate) fees = close_trade * FtPrecise(fee or 0.0) @@ -972,8 +1057,7 @@ class LocalTrade: if trading_mode == TradingMode.SPOT: return float(self._calc_base_close(amount1, rate, self.fee_close)) - elif (trading_mode == TradingMode.MARGIN): - + elif trading_mode == TradingMode.MARGIN: total_interest = self.calculate_interest() if self.is_short: @@ -983,7 +1067,7 @@ class LocalTrade: # Currency already owned for longs, no need to purchase return float(self._calc_base_close(amount1, rate, self.fee_close) - total_interest) - elif (trading_mode == TradingMode.FUTURES): + elif trading_mode == TradingMode.FUTURES: funding_fees = self.funding_fees or 0.0 # Positive funding_fees -> Trade has gained from fees. # Negative funding_fees -> Trade had to pay the fees. @@ -993,10 +1077,12 @@ class LocalTrade: return float(self._calc_base_close(amount1, rate, self.fee_close)) + funding_fees else: raise OperationalException( - f"{self.trading_mode.value} trading is not yet available using freqtrade") + f"{self.trading_mode.value} trading is not yet available using freqtrade" + ) - def calc_profit(self, rate: float, amount: Optional[float] = None, - open_rate: Optional[float] = None) -> float: + def calc_profit( + self, rate: float, amount: Optional[float] = None, open_rate: Optional[float] = None + ) -> float: """ Calculate the absolute profit in stake currency between Close and Open trade Deprecated - only available for backwards compatibility @@ -1008,8 +1094,9 @@ class LocalTrade: prof = self.calculate_profit(rate, amount, open_rate) return prof.profit_abs - def calculate_profit(self, rate: float, amount: Optional[float] = None, - open_rate: Optional[float] = None) -> ProfitStruct: + def calculate_profit( + self, rate: float, amount: Optional[float] = None, open_rate: Optional[float] = None + ) -> ProfitStruct: """ Calculate profit metrics (absolute, ratio, total, total ratio). All calculations include fees. @@ -1042,7 +1129,8 @@ class LocalTrade: total_profit_abs = profit_abs + self.realized_profit total_profit_ratio = ( (total_profit_abs / self.max_stake_amount) * self.leverage - if self.max_stake_amount else 0.0 + if self.max_stake_amount + else 0.0 ) total_profit_ratio = float(f"{total_profit_ratio:.8f}") profit_abs = float(f"{profit_abs:.8f}") @@ -1055,8 +1143,8 @@ class LocalTrade: ) def calc_profit_ratio( - self, rate: float, amount: Optional[float] = None, - open_rate: Optional[float] = None) -> float: + self, rate: float, amount: Optional[float] = None, open_rate: Optional[float] = None + ) -> float: """ Calculates the profit as ratio (including fee). :param rate: rate to compare with. @@ -1071,10 +1159,10 @@ class LocalTrade: else: open_trade_value = self._calc_open_trade_value(amount, open_rate) - short_close_zero = (self.is_short and close_trade_value == 0.0) - long_close_zero = (not self.is_short and open_trade_value == 0.0) + short_close_zero = self.is_short and close_trade_value == 0.0 + long_close_zero = not self.is_short and open_trade_value == 0.0 - if (short_close_zero or long_close_zero): + if short_close_zero or long_close_zero: return 0.0 else: if self.is_short: @@ -1100,7 +1188,7 @@ class LocalTrade: for i, o in enumerate(self.orders): if o.ft_is_open or not o.filled: continue - funding_fees += (o.funding_fee or 0.0) + funding_fees += o.funding_fee or 0.0 tmp_amount = FtPrecise(o.safe_amount_after_fee) tmp_price = FtPrecise(o.safe_price) @@ -1130,7 +1218,7 @@ class LocalTrade: close_profit = (close_profit_abs / total_stake) * self.leverage else: total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price) - max_stake_amount += (tmp_amount * price) + max_stake_amount += tmp_amount * price self.funding_fees = funding_fees self.max_stake_amount = float(max_stake_amount) @@ -1140,7 +1228,8 @@ class LocalTrade: self.close_profit_abs = prof.profit_abs current_amount_tr = amount_to_contract_precision( - float(current_amount), self.amount_precision, self.precision_mode, self.contract_size) + float(current_amount), self.amount_precision, self.precision_mode, self.contract_size + ) if current_amount_tr > 0.0: # Trade is still open # Leverage not updated, as we don't allow changing leverage through DCA at the moment. @@ -1167,8 +1256,12 @@ class LocalTrade: return o return None - def select_order(self, order_side: Optional[str] = None, - is_open: Optional[bool] = None, only_filled: bool = False) -> Optional[Order]: + def select_order( + self, + order_side: Optional[str] = None, + is_open: Optional[bool] = None, + only_filled: bool = False, + ) -> Optional[Order]: """ Finds latest order for this orderside and status :param order_side: ft_order_side of the order (either 'buy', 'sell' or 'stoploss') @@ -1188,32 +1281,38 @@ class LocalTrade: else: return None - def select_filled_orders(self, order_side: Optional[str] = None) -> List['Order']: + def select_filled_orders(self, order_side: Optional[str] = None) -> List["Order"]: """ Finds filled orders for this order side. Will not return open orders which already partially filled. :param order_side: Side of the order (either 'buy', 'sell', or None) :return: array of Order objects """ - return [o for o in self.orders if ((o.ft_order_side == order_side) or (order_side is None)) - and o.ft_is_open is False - and o.filled - and o.status in NON_OPEN_EXCHANGE_STATES] + return [ + o + for o in self.orders + if ((o.ft_order_side == order_side) or (order_side is None)) + and o.ft_is_open is False + and o.filled + and o.status in NON_OPEN_EXCHANGE_STATES + ] - def select_filled_or_open_orders(self) -> List['Order']: + def select_filled_or_open_orders(self) -> List["Order"]: """ Finds filled or open orders :param order_side: Side of the order (either 'buy', 'sell', or None) :return: array of Order objects """ - return [o for o in self.orders if - ( - o.ft_is_open is False - and (o.filled or 0) > 0 - and o.status in NON_OPEN_EXCHANGE_STATES - ) - or (o.ft_is_open is True and o.status is not None) - ] + return [ + o + for o in self.orders + if ( + o.ft_is_open is False + and (o.filled or 0) > 0 + and o.status in NON_OPEN_EXCHANGE_STATES + ) + or (o.ft_is_open is True and o.status is not None) + ] def set_custom_data(self, key: str, value: Any) -> None: """ @@ -1274,7 +1373,7 @@ class LocalTrade: :return: int count of buy orders that have been filled for this trade. """ - return len(self.select_filled_orders('buy')) + return len(self.select_filled_orders("buy")) @property def nr_of_successful_sells(self) -> int: @@ -1283,11 +1382,11 @@ class LocalTrade: WARNING: Please use nr_of_successful_exits for short support. :return: int count of sell orders that have been filled for this trade. """ - return len(self.select_filled_orders('sell')) + return len(self.select_filled_orders("sell")) @property def sell_reason(self) -> Optional[str]: - """ DEPRECATED! Please use exit_reason instead.""" + """DEPRECATED! Please use exit_reason instead.""" return self.exit_reason @property @@ -1295,10 +1394,13 @@ class LocalTrade: return self.close_rate or self.close_rate_requested or 0.0 @staticmethod - def get_trades_proxy(*, pair: Optional[str] = None, is_open: Optional[bool] = None, - open_date: Optional[datetime] = None, - close_date: Optional[datetime] = None, - ) -> List['LocalTrade']: + def get_trades_proxy( + *, + pair: Optional[str] = None, + is_open: Optional[bool] = None, + open_date: Optional[datetime] = None, + close_date: Optional[datetime] = None, + ) -> List["LocalTrade"]: """ Helper function to query Trades. Returns a List of trades, filtered on the parameters given. @@ -1329,8 +1431,9 @@ class LocalTrade: if open_date: sel_trades = [trade for trade in sel_trades if trade.open_date > open_date] if close_date: - sel_trades = [trade for trade in sel_trades if trade.close_date - and trade.close_date > close_date] + sel_trades = [ + trade for trade in sel_trades if trade.close_date and trade.close_date > close_date + ] return sel_trades @@ -1386,8 +1489,7 @@ class LocalTrade: logger.info(f"Found open trade: {trade}") # skip case if trailing-stop changed the stoploss already. - if (not trade.is_stop_loss_trailing - and trade.initial_stop_loss_pct != desired_stoploss): + if not trade.is_stop_loss_trailing and trade.initial_stop_loss_pct != desired_stoploss: # Stoploss value got changed logger.info(f"Stoploss for {trade} needs adjustment...") @@ -1407,6 +1509,7 @@ class LocalTrade: :return: Trade instance """ import rapidjson + data = rapidjson.loads(json_str) trade = cls( __FROM_JSON=True, @@ -1432,8 +1535,11 @@ class LocalTrade: open_rate=data["open_rate"], open_rate_requested=data["open_rate_requested"], open_trade_value=data["open_trade_value"], - close_date=(datetime.fromtimestamp(data["close_timestamp"] // 1000, tz=timezone.utc) - if data["close_timestamp"] else None), + close_date=( + datetime.fromtimestamp(data["close_timestamp"] // 1000, tz=timezone.utc) + if data["close_timestamp"] + else None + ), realized_profit=data["realized_profit"], close_rate=data["close_rate"], close_rate_requested=data["close_rate_requested"], @@ -1453,13 +1559,12 @@ class LocalTrade: is_short=data["is_short"], trading_mode=data["trading_mode"], funding_fees=data["funding_fees"], - amount_precision=data.get('amount_precision', None), - price_precision=data.get('price_precision', None), - precision_mode=data.get('precision_mode', None), - contract_size=data.get('contract_size', None), + amount_precision=data.get("amount_precision", None), + price_precision=data.get("price_precision", None), + precision_mode=data.get("precision_mode", None), + contract_size=data.get("contract_size", None), ) for order in data["orders"]: - order_obj = Order( amount=order["amount"], ft_amount=order["amount"], @@ -1472,9 +1577,11 @@ class LocalTrade: cost=order["cost"], filled=order["filled"], order_date=datetime.strptime(order["order_date"], DATETIME_PRINT_FORMAT), - order_filled_date=(datetime.fromtimestamp( - order["order_filled_timestamp"] // 1000, tz=timezone.utc) - if order["order_filled_timestamp"] else None), + order_filled_date=( + datetime.fromtimestamp(order["order_filled_timestamp"] // 1000, tz=timezone.utc) + if order["order_filled_timestamp"] + else None + ), order_type=order["order_type"], price=order["price"], ft_price=order["price"], @@ -1494,7 +1601,8 @@ class Trade(ModelBase, LocalTrade): Note: Fields must be aligned with LocalTrade class """ - __tablename__ = 'trades' + + __tablename__ = "trades" session: ClassVar[SessionType] use_db: bool = True @@ -1502,11 +1610,11 @@ class Trade(ModelBase, LocalTrade): id: Mapped[int] = mapped_column(Integer, primary_key=True) # type: ignore orders: Mapped[List[Order]] = relationship( - "Order", order_by="Order.id", cascade="all, delete-orphan", lazy="selectin", - innerjoin=True) # type: ignore + "Order", order_by="Order.id", cascade="all, delete-orphan", lazy="selectin", innerjoin=True + ) # type: ignore custom_data: Mapped[List[_CustomData]] = relationship( - "_CustomData", cascade="all, delete-orphan", - lazy="raise") + "_CustomData", cascade="all, delete-orphan", lazy="raise" + ) exchange: Mapped[str] = mapped_column(String(25), nullable=False) # type: ignore pair: Mapped[str] = mapped_column(String(25), nullable=False, index=True) # type: ignore @@ -1515,61 +1623,76 @@ class Trade(ModelBase, LocalTrade): is_open: Mapped[bool] = mapped_column(nullable=False, default=True, index=True) # type: ignore fee_open: Mapped[float] = mapped_column(Float(), nullable=False, default=0.0) # type: ignore fee_open_cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore - fee_open_currency: Mapped[Optional[str]] = mapped_column( - String(25), nullable=True) # type: ignore - fee_close: Mapped[Optional[float]] = mapped_column( - Float(), nullable=False, default=0.0) # type: ignore + fee_open_currency: Mapped[Optional[str]] = mapped_column( # type: ignore + String(25), nullable=True + ) + fee_close: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=False, default=0.0 + ) fee_close_cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore - fee_close_currency: Mapped[Optional[str]] = mapped_column( - String(25), nullable=True) # type: ignore + fee_close_currency: Mapped[Optional[str]] = mapped_column( # type: ignore + String(25), nullable=True + ) open_rate: Mapped[float] = mapped_column(Float()) # type: ignore - open_rate_requested: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True) # type: ignore + open_rate_requested: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True + ) # open_trade_value - calculated via _calc_open_trade_value open_trade_value: Mapped[float] = mapped_column(Float(), nullable=True) # type: ignore close_rate: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore close_rate_requested: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore - realized_profit: Mapped[float] = mapped_column( - Float(), default=0.0, nullable=True) # type: ignore + realized_profit: Mapped[float] = mapped_column( # type: ignore + Float(), default=0.0, nullable=True + ) close_profit: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore close_profit_abs: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore stake_amount: Mapped[float] = mapped_column(Float(), nullable=False) # type: ignore max_stake_amount: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore amount: Mapped[float] = mapped_column(Float()) # type: ignore amount_requested: Mapped[Optional[float]] = mapped_column(Float()) # type: ignore - open_date: Mapped[datetime] = mapped_column( - nullable=False, default=datetime.now) # type: ignore + open_date: Mapped[datetime] = mapped_column( # type: ignore + nullable=False, default=datetime.now + ) close_date: Mapped[Optional[datetime]] = mapped_column() # type: ignore # absolute value of the stop loss stop_loss: Mapped[float] = mapped_column(Float(), nullable=True, default=0.0) # type: ignore # percentage value of the stop loss stop_loss_pct: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore # absolute value of the initial stop loss - initial_stop_loss: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True, default=0.0) # type: ignore + initial_stop_loss: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True, default=0.0 + ) # percentage value of the initial stop loss - initial_stop_loss_pct: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True) # type: ignore - is_stop_loss_trailing: Mapped[bool] = mapped_column( - nullable=False, default=False) # type: ignore + initial_stop_loss_pct: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True + ) + is_stop_loss_trailing: Mapped[bool] = mapped_column( # type: ignore + nullable=False, default=False + ) # absolute value of the highest reached price - max_rate: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True, default=0.0) # type: ignore + max_rate: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True, default=0.0 + ) # Lowest price reached min_rate: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore - exit_reason: Mapped[Optional[str]] = mapped_column( - String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore - exit_order_status: Mapped[Optional[str]] = mapped_column( - String(100), nullable=True) # type: ignore + exit_reason: Mapped[Optional[str]] = mapped_column( # type: ignore + String(CUSTOM_TAG_MAX_LENGTH), nullable=True + ) + exit_order_status: Mapped[Optional[str]] = mapped_column( # type: ignore + String(100), nullable=True + ) strategy: Mapped[Optional[str]] = mapped_column(String(100), nullable=True) # type: ignore - enter_tag: Mapped[Optional[str]] = mapped_column( - String(CUSTOM_TAG_MAX_LENGTH), nullable=True) # type: ignore + enter_tag: Mapped[Optional[str]] = mapped_column( # type: ignore + String(CUSTOM_TAG_MAX_LENGTH), nullable=True + ) timeframe: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore - trading_mode: Mapped[TradingMode] = mapped_column( - Enum(TradingMode), nullable=True) # type: ignore - amount_precision: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True) # type: ignore + trading_mode: Mapped[TradingMode] = mapped_column( # type: ignore + Enum(TradingMode), nullable=True + ) + amount_precision: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True + ) price_precision: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore precision_mode: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) # type: ignore contract_size: Mapped[Optional[float]] = mapped_column(Float(), nullable=True) # type: ignore @@ -1577,28 +1700,32 @@ class Trade(ModelBase, LocalTrade): # Leverage trading properties leverage: Mapped[float] = mapped_column(Float(), nullable=True, default=1.0) # type: ignore is_short: Mapped[bool] = mapped_column(nullable=False, default=False) # type: ignore - liquidation_price: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True) # type: ignore + liquidation_price: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True + ) # Margin Trading Properties - interest_rate: Mapped[float] = mapped_column( - Float(), nullable=False, default=0.0) # type: ignore + interest_rate: Mapped[float] = mapped_column( # type: ignore + Float(), nullable=False, default=0.0 + ) # Futures properties - funding_fees: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True, default=None) # type: ignore - funding_fee_running: Mapped[Optional[float]] = mapped_column( - Float(), nullable=True, default=None) # type: ignore + funding_fees: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True, default=None + ) + funding_fee_running: Mapped[Optional[float]] = mapped_column( # type: ignore + Float(), nullable=True, default=None + ) def __init__(self, **kwargs): - from_json = kwargs.pop('__FROM_JSON', None) + from_json = kwargs.pop("__FROM_JSON", None) super().__init__(**kwargs) if not from_json: # Skip recalculation when loading from json self.realized_profit = 0 self.recalc_open_trade_value() - @validates('enter_tag', 'exit_reason') + @validates("enter_tag", "exit_reason") def validate_string_len(self, key, value): max_len = getattr(self.__class__, key).prop.columns[0].type.length if value and len(value) > max_len: @@ -1606,7 +1733,6 @@ class Trade(ModelBase, LocalTrade): return value def delete(self) -> None: - for order in self.orders: Order.session.delete(order) @@ -1624,10 +1750,13 @@ class Trade(ModelBase, LocalTrade): Trade.session.rollback() @staticmethod - def get_trades_proxy(*, pair: Optional[str] = None, is_open: Optional[bool] = None, - open_date: Optional[datetime] = None, - close_date: Optional[datetime] = None, - ) -> List['LocalTrade']: + def get_trades_proxy( + *, + pair: Optional[str] = None, + is_open: Optional[bool] = None, + open_date: Optional[datetime] = None, + close_date: Optional[datetime] = None, + ) -> List["LocalTrade"]: """ Helper function to query Trades.j Returns a List of trades, filtered on the parameters given. @@ -1649,9 +1778,7 @@ class Trade(ModelBase, LocalTrade): return cast(List[LocalTrade], Trade.get_trades(trade_filter).all()) else: return LocalTrade.get_trades_proxy( - pair=pair, is_open=is_open, - open_date=open_date, - close_date=close_date + pair=pair, is_open=is_open, open_date=open_date, close_date=close_date ) @staticmethod @@ -1666,7 +1793,7 @@ class Trade(ModelBase, LocalTrade): :return: unsorted query object """ if not Trade.use_db: - raise NotImplementedError('`Trade.get_trades()` not supported in backtesting mode.') + raise NotImplementedError("`Trade.get_trades()` not supported in backtesting mode.") if trade_filter is not None: if not isinstance(trade_filter, list): trade_filter = [trade_filter] @@ -1680,7 +1807,7 @@ class Trade(ModelBase, LocalTrade): return this_query @staticmethod - def get_trades(trade_filter=None, include_orders: bool = True) -> ScalarResult['Trade']: + def get_trades(trade_filter=None, include_orders: bool = True) -> ScalarResult["Trade"]: """ Helper function to query Trades using filters. NOTE: Not supported in Backtesting. @@ -1701,10 +1828,13 @@ class Trade(ModelBase, LocalTrade): Returns all open trades which don't have open fees set correctly NOTE: Not supported in Backtesting. """ - return Trade.get_trades([Trade.fee_open_currency.is_(None), - Trade.orders.any(), - Trade.is_open.is_(True), - ]).all() + return Trade.get_trades( + [ + Trade.fee_open_currency.is_(None), + Trade.orders.any(), + Trade.is_open.is_(True), + ] + ).all() @staticmethod def get_closed_trades_without_assigned_fees(): @@ -1712,10 +1842,13 @@ class Trade(ModelBase, LocalTrade): Returns all closed trades which don't have fees set correctly NOTE: Not supported in Backtesting. """ - return Trade.get_trades([Trade.fee_close_currency.is_(None), - Trade.orders.any(), - Trade.is_open.is_(False), - ]).all() + return Trade.get_trades( + [ + Trade.fee_close_currency.is_(None), + Trade.orders.any(), + Trade.is_open.is_(False), + ] + ).all() @staticmethod def get_total_closed_profit() -> float: @@ -1727,8 +1860,10 @@ class Trade(ModelBase, LocalTrade): select(func.sum(Trade.close_profit_abs)).filter(Trade.is_open.is_(False)) ).scalar_one() else: - total_profit = sum(t.close_profit_abs # type: ignore - for t in LocalTrade.get_trades_proxy(is_open=False)) + total_profit = sum( + t.close_profit_abs # type: ignore + for t in LocalTrade.get_trades_proxy(is_open=False) + ) return total_profit or 0 @staticmethod @@ -1743,7 +1878,8 @@ class Trade(ModelBase, LocalTrade): ) else: total_open_stake_amount = sum( - t.stake_amount for t in LocalTrade.get_trades_proxy(is_open=True)) + t.stake_amount for t in LocalTrade.get_trades_proxy(is_open=True) + ) return total_open_stake_amount or 0 @staticmethod @@ -1760,22 +1896,23 @@ class Trade(ModelBase, LocalTrade): pair_rates = Trade.session.execute( select( Trade.pair, - func.sum(Trade.close_profit).label('profit_sum'), - func.sum(Trade.close_profit_abs).label('profit_sum_abs'), - func.count(Trade.pair).label('count') - ).filter(*filters) + func.sum(Trade.close_profit).label("profit_sum"), + func.sum(Trade.close_profit_abs).label("profit_sum_abs"), + func.count(Trade.pair).label("count"), + ) + .filter(*filters) .group_by(Trade.pair) - .order_by(desc('profit_sum_abs')) - ).all() + .order_by(desc("profit_sum_abs")) + ).all() return [ { - 'pair': pair, - 'profit_ratio': profit, - 'profit': round(profit * 100, 2), # Compatibility mode - 'profit_pct': round(profit * 100, 2), - 'profit_abs': profit_abs, - 'count': count + "pair": pair, + "profit_ratio": profit, + "profit": round(profit * 100, 2), # Compatibility mode + "profit_pct": round(profit * 100, 2), + "profit_abs": profit_abs, + "count": count, } for pair, profit, profit_abs, count in pair_rates ] @@ -1789,27 +1926,28 @@ class Trade(ModelBase, LocalTrade): """ filters: List = [Trade.is_open.is_(False)] - if (pair is not None): + if pair is not None: filters.append(Trade.pair == pair) enter_tag_perf = Trade.session.execute( select( Trade.enter_tag, - func.sum(Trade.close_profit).label('profit_sum'), - func.sum(Trade.close_profit_abs).label('profit_sum_abs'), - func.count(Trade.pair).label('count') - ).filter(*filters) + func.sum(Trade.close_profit).label("profit_sum"), + func.sum(Trade.close_profit_abs).label("profit_sum_abs"), + func.count(Trade.pair).label("count"), + ) + .filter(*filters) .group_by(Trade.enter_tag) - .order_by(desc('profit_sum_abs')) + .order_by(desc("profit_sum_abs")) ).all() return [ { - 'enter_tag': enter_tag if enter_tag is not None else "Other", - 'profit_ratio': profit, - 'profit_pct': round(profit * 100, 2), - 'profit_abs': profit_abs, - 'count': count + "enter_tag": enter_tag if enter_tag is not None else "Other", + "profit_ratio": profit, + "profit_pct": round(profit * 100, 2), + "profit_abs": profit_abs, + "count": count, } for enter_tag, profit, profit_abs, count in enter_tag_perf ] @@ -1823,26 +1961,27 @@ class Trade(ModelBase, LocalTrade): """ filters: List = [Trade.is_open.is_(False)] - if (pair is not None): + if pair is not None: filters.append(Trade.pair == pair) sell_tag_perf = Trade.session.execute( select( Trade.exit_reason, - func.sum(Trade.close_profit).label('profit_sum'), - func.sum(Trade.close_profit_abs).label('profit_sum_abs'), - func.count(Trade.pair).label('count') - ).filter(*filters) + func.sum(Trade.close_profit).label("profit_sum"), + func.sum(Trade.close_profit_abs).label("profit_sum_abs"), + func.count(Trade.pair).label("count"), + ) + .filter(*filters) .group_by(Trade.exit_reason) - .order_by(desc('profit_sum_abs')) + .order_by(desc("profit_sum_abs")) ).all() return [ { - 'exit_reason': exit_reason if exit_reason is not None else "Other", - 'profit_ratio': profit, - 'profit_pct': round(profit * 100, 2), - 'profit_abs': profit_abs, - 'count': count + "exit_reason": exit_reason if exit_reason is not None else "Other", + "profit_ratio": profit, + "profit_pct": round(profit * 100, 2), + "profit_abs": profit_abs, + "count": count, } for exit_reason, profit, profit_abs, count in sell_tag_perf ] @@ -1856,19 +1995,20 @@ class Trade(ModelBase, LocalTrade): """ filters: List = [Trade.is_open.is_(False)] - if (pair is not None): + if pair is not None: filters.append(Trade.pair == pair) mix_tag_perf = Trade.session.execute( select( Trade.id, Trade.enter_tag, Trade.exit_reason, - func.sum(Trade.close_profit).label('profit_sum'), - func.sum(Trade.close_profit_abs).label('profit_sum_abs'), - func.count(Trade.pair).label('count') - ).filter(*filters) + func.sum(Trade.close_profit).label("profit_sum"), + func.sum(Trade.close_profit_abs).label("profit_sum_abs"), + func.count(Trade.pair).label("count"), + ) + .filter(*filters) .group_by(Trade.id) - .order_by(desc('profit_sum_abs')) + .order_by(desc("profit_sum_abs")) ).all() resp: List[Dict] = [] @@ -1876,59 +2016,64 @@ class Trade(ModelBase, LocalTrade): enter_tag = enter_tag if enter_tag is not None else "Other" exit_reason = exit_reason if exit_reason is not None else "Other" - if (exit_reason is not None and enter_tag is not None): + if exit_reason is not None and enter_tag is not None: mix_tag = enter_tag + " " + exit_reason i = 0 if not any(item["mix_tag"] == mix_tag for item in resp): - resp.append({'mix_tag': mix_tag, - 'profit_ratio': profit, - 'profit_pct': round(profit * 100, 2), - 'profit_abs': profit_abs, - 'count': count}) + resp.append( + { + "mix_tag": mix_tag, + "profit_ratio": profit, + "profit_pct": round(profit * 100, 2), + "profit_abs": profit_abs, + "count": count, + } + ) else: while i < len(resp): if resp[i]["mix_tag"] == mix_tag: resp[i] = { - 'mix_tag': mix_tag, - 'profit_ratio': profit + resp[i]["profit_ratio"], - 'profit_pct': round(profit + resp[i]["profit_ratio"] * 100, 2), - 'profit_abs': profit_abs + resp[i]["profit_abs"], - 'count': 1 + resp[i]["count"] + "mix_tag": mix_tag, + "profit_ratio": profit + resp[i]["profit_ratio"], + "profit_pct": round(profit + resp[i]["profit_ratio"] * 100, 2), + "profit_abs": profit_abs + resp[i]["profit_abs"], + "count": 1 + resp[i]["count"], } i += 1 return resp @staticmethod - def get_best_pair(start_date: datetime = datetime.fromtimestamp(0)): + def get_best_pair(start_date: Optional[datetime] = None): """ Get best pair with closed trade. NOTE: Not supported in Backtesting. :returns: Tuple containing (pair, profit_sum) """ + filters: List = [Trade.is_open.is_(False)] + if start_date: + filters.append(Trade.close_date >= start_date) + best_pair = Trade.session.execute( - select( - Trade.pair, - func.sum(Trade.close_profit).label('profit_sum') - ).filter(Trade.is_open.is_(False) & (Trade.close_date >= start_date)) + select(Trade.pair, func.sum(Trade.close_profit).label("profit_sum")) + .filter(*filters) .group_by(Trade.pair) - .order_by(desc('profit_sum')) + .order_by(desc("profit_sum")) ).first() return best_pair @staticmethod - def get_trading_volume(start_date: datetime = datetime.fromtimestamp(0)) -> float: + def get_trading_volume(start_date: Optional[datetime] = None) -> float: """ Get Trade volume based on Orders NOTE: Not supported in Backtesting. :returns: Tuple containing (pair, profit_sum) """ + filters = [Order.status == "closed"] + if start_date: + filters.append(Order.order_filled_date >= start_date) trading_volume = Trade.session.execute( - select( - func.sum(Order.cost).label('volume') - ).filter( - Order.order_filled_date >= start_date, - Order.status == 'closed' - )).scalar_one() + select(func.sum(Order.cost).label("volume")).filter(*filters) + ).scalar_one() return trading_volume or 0.0 diff --git a/freqtrade/persistence/usedb_context.py b/freqtrade/persistence/usedb_context.py index 732f0b0f8..3266ca157 100644 --- a/freqtrade/persistence/usedb_context.py +++ b/freqtrade/persistence/usedb_context.py @@ -1,4 +1,3 @@ - from freqtrade.persistence.custom_data import CustomDataWrapper from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.persistence.trade_model import Trade @@ -20,13 +19,13 @@ def enable_database_use() -> None: Cleanup function to restore database usage. """ PairLocks.use_db = True - PairLocks.timeframe = '' + PairLocks.timeframe = "" Trade.use_db = True CustomDataWrapper.use_db = True class FtNoDBContext: - def __init__(self, timeframe: str = ''): + def __init__(self, timeframe: str = ""): self.timeframe = timeframe def __enter__(self): diff --git a/freqtrade/plot/plotting.py b/freqtrade/plot/plotting.py index 4d29337a7..de0910732 100644 --- a/freqtrade/plot/plotting.py +++ b/freqtrade/plot/plotting.py @@ -7,13 +7,20 @@ import pandas as pd from freqtrade.configuration import TimeRange from freqtrade.constants import Config -from freqtrade.data.btanalysis import (analyze_trade_parallelism, extract_trades_of_period, - load_trades) +from freqtrade.data.btanalysis import ( + analyze_trade_parallelism, + extract_trades_of_period, + load_trades, +) from freqtrade.data.converter import trim_dataframe from freqtrade.data.dataprovider import DataProvider from freqtrade.data.history import get_timerange, load_data -from freqtrade.data.metrics import (calculate_max_drawdown, calculate_underwater, - combine_dataframes_with_mean, create_cum_profit) +from freqtrade.data.metrics import ( + calculate_max_drawdown, + calculate_underwater, + combine_dataframes_with_mean, + create_cum_profit, +) from freqtrade.enums import CandleType from freqtrade.exceptions import OperationalException from freqtrade.exchange import timeframe_to_prev_date, timeframe_to_seconds @@ -43,55 +50,57 @@ def init_plotscript(config, markets: List, startup_candles: int = 0): """ if "pairs" in config: - pairs = expand_pairlist(config['pairs'], markets) + pairs = expand_pairlist(config["pairs"], markets) else: - pairs = expand_pairlist(config['exchange']['pair_whitelist'], markets) + pairs = expand_pairlist(config["exchange"]["pair_whitelist"], markets) # Set timerange to use - timerange = TimeRange.parse_timerange(config.get('timerange')) + timerange = TimeRange.parse_timerange(config.get("timerange")) data = load_data( - datadir=config.get('datadir'), + datadir=config.get("datadir"), pairs=pairs, - timeframe=config['timeframe'], + timeframe=config["timeframe"], timerange=timerange, startup_candles=startup_candles, - data_format=config['dataformat_ohlcv'], - candle_type=config.get('candle_type_def', CandleType.SPOT) + data_format=config["dataformat_ohlcv"], + candle_type=config.get("candle_type_def", CandleType.SPOT), ) if startup_candles and data: min_date, max_date = get_timerange(data) logger.info(f"Loading data from {min_date} to {max_date}") - timerange.adjust_start_if_necessary(timeframe_to_seconds(config['timeframe']), - startup_candles, min_date) + timerange.adjust_start_if_necessary( + timeframe_to_seconds(config["timeframe"]), startup_candles, min_date + ) no_trades = False filename = config.get("exportfilename") if config.get("no_trades", False): no_trades = True - elif config['trade_source'] == 'file': + elif config["trade_source"] == "file": if not filename.is_dir() and not filename.is_file(): logger.warning("Backtest file is missing skipping trades.") no_trades = True try: trades = load_trades( - config['trade_source'], - db_url=config.get('db_url'), + config["trade_source"], + db_url=config.get("db_url"), exportfilename=filename, no_trades=no_trades, - strategy=config.get('strategy'), + strategy=config.get("strategy"), ) except ValueError as e: raise OperationalException(e) from e if not trades.empty: - trades = trim_dataframe(trades, timerange, df_date_col='open_date') + trades = trim_dataframe(trades, timerange, df_date_col="open_date") - return {"ohlcv": data, - "trades": trades, - "pairs": pairs, - "timerange": timerange, - } + return { + "ohlcv": data, + "trades": trades, + "pairs": pairs, + "timerange": timerange, + } def add_indicators(fig, row, indicators: Dict[str, Dict], data: pd.DataFrame) -> make_subplots: @@ -104,38 +113,40 @@ def add_indicators(fig, row, indicators: Dict[str, Dict], data: pd.DataFrame) -> :param data: candlestick DataFrame """ plot_kinds = { - 'scatter': go.Scatter, - 'bar': go.Bar, + "scatter": go.Scatter, + "bar": go.Bar, } for indicator, conf in indicators.items(): logger.debug(f"indicator {indicator} with config {conf}") if indicator in data: - kwargs = {'x': data['date'], - 'y': data[indicator].values, - 'name': indicator - } + kwargs = {"x": data["date"], "y": data[indicator].values, "name": indicator} - plot_type = conf.get('type', 'scatter') - color = conf.get('color') - if plot_type == 'bar': - kwargs.update({'marker_color': color or 'DarkSlateGrey', - 'marker_line_color': color or 'DarkSlateGrey'}) + plot_type = conf.get("type", "scatter") + color = conf.get("color") + if plot_type == "bar": + kwargs.update( + { + "marker_color": color or "DarkSlateGrey", + "marker_line_color": color or "DarkSlateGrey", + } + ) else: if color: - kwargs.update({'line': {'color': color}}) - kwargs['mode'] = 'lines' - if plot_type != 'scatter': - logger.warning(f'Indicator {indicator} has unknown plot trace kind {plot_type}' - f', assuming "scatter".') + kwargs.update({"line": {"color": color}}) + kwargs["mode"] = "lines" + if plot_type != "scatter": + logger.warning( + f"Indicator {indicator} has unknown plot trace kind {plot_type}" + f', assuming "scatter".' + ) - kwargs.update(conf.get('plotly', {})) + kwargs.update(conf.get("plotly", {})) trace = plot_kinds[plot_type](**kwargs) fig.add_trace(trace, row, 1) else: logger.info( - 'Indicator "%s" ignored. Reason: This indicator is not found ' - 'in your strategy.', - indicator + 'Indicator "%s" ignored. Reason: This indicator is not found ' "in your strategy.", + indicator, ) return fig @@ -161,33 +172,25 @@ def add_profit(fig, row, data: pd.DataFrame, column: str, name: str) -> make_sub return fig -def add_max_drawdown(fig, row, trades: pd.DataFrame, df_comb: pd.DataFrame, - timeframe: str, starting_balance: float) -> make_subplots: +def add_max_drawdown( + fig, row, trades: pd.DataFrame, df_comb: pd.DataFrame, timeframe: str, starting_balance: float +) -> make_subplots: """ Add scatter points indicating max drawdown """ try: - _, highdate, lowdate, _, _, max_drawdown = calculate_max_drawdown( - trades, - starting_balance=starting_balance - ) + drawdown = calculate_max_drawdown(trades, starting_balance=starting_balance) drawdown = go.Scatter( - x=[highdate, lowdate], + x=[drawdown.high_date, drawdown.low_date], y=[ - df_comb.loc[timeframe_to_prev_date(timeframe, highdate), 'cum_profit'], - df_comb.loc[timeframe_to_prev_date(timeframe, lowdate), 'cum_profit'], + df_comb.loc[timeframe_to_prev_date(timeframe, drawdown.high_date), "cum_profit"], + df_comb.loc[timeframe_to_prev_date(timeframe, drawdown.low_date), "cum_profit"], ], - mode='markers', - name=f"Max drawdown {max_drawdown:.2%}", - text=f"Max drawdown {max_drawdown:.2%}", - marker=dict( - symbol='square-open', - size=9, - line=dict(width=2), - color='green' - - ) + mode="markers", + name=f"Max drawdown {drawdown.relative_account_drawdown:.2%}", + text=f"Max drawdown {drawdown.relative_account_drawdown:.2%}", + marker=dict(symbol="square-open", size=9, line=dict(width=2), color="green"), ) fig.add_trace(drawdown, row, 1) except ValueError: @@ -201,27 +204,25 @@ def add_underwater(fig, row, trades: pd.DataFrame, starting_balance: float) -> m """ try: underwater = calculate_underwater( - trades, - value_col="profit_abs", - starting_balance=starting_balance + trades, value_col="profit_abs", starting_balance=starting_balance ) underwater_plot = go.Scatter( - x=underwater['date'], - y=underwater['drawdown'], + x=underwater["date"], + y=underwater["drawdown"], name="Underwater Plot", - fill='tozeroy', - fillcolor='#cc362b', - line={'color': '#cc362b'} + fill="tozeroy", + fillcolor="#cc362b", + line={"color": "#cc362b"}, ) underwater_plot_relative = go.Scatter( - x=underwater['date'], - y=(-underwater['drawdown_relative']), + x=underwater["date"], + y=(-underwater["drawdown_relative"]), name="Underwater Plot (%)", - fill='tozeroy', - fillcolor='green', - line={'color': 'green'} + fill="tozeroy", + fillcolor="green", + line={"color": "green"}, ) fig.add_trace(underwater_plot, row, 1) @@ -240,11 +241,11 @@ def add_parallelism(fig, row, trades: pd.DataFrame, timeframe: str) -> make_subp drawdown = go.Scatter( x=result.index, - y=result['open_trades'], + y=result["open_trades"], name="Parallel trades", - fill='tozeroy', - fillcolor='#242222', - line={'color': '#242222'}, + fill="tozeroy", + fillcolor="#242222", + line={"color": "#242222"}, ) fig.add_trace(drawdown, row, 1) except ValueError: @@ -259,52 +260,37 @@ def plot_trades(fig, trades: pd.DataFrame) -> make_subplots: # Trades can be empty if trades is not None and len(trades) > 0: # Create description for exit summarizing the trade - trades['desc'] = trades.apply( - lambda row: f"{row['profit_ratio']:.2%}, " + - (f"{row['enter_tag']}, " if row['enter_tag'] is not None else "") + - f"{row['exit_reason']}, " + - f"{row['trade_duration']} min", - axis=1) + trades["desc"] = trades.apply( + lambda row: f"{row['profit_ratio']:.2%}, " + + (f"{row['enter_tag']}, " if row["enter_tag"] is not None else "") + + f"{row['exit_reason']}, " + + f"{row['trade_duration']} min", + axis=1, + ) trade_entries = go.Scatter( x=trades["open_date"], y=trades["open_rate"], - mode='markers', - name='Trade entry', + mode="markers", + name="Trade entry", text=trades["desc"], - marker=dict( - symbol='circle-open', - size=11, - line=dict(width=2), - color='cyan' - - ) + marker=dict(symbol="circle-open", size=11, line=dict(width=2), color="cyan"), ) trade_exits = go.Scatter( - x=trades.loc[trades['profit_ratio'] > 0, "close_date"], - y=trades.loc[trades['profit_ratio'] > 0, "close_rate"], - text=trades.loc[trades['profit_ratio'] > 0, "desc"], - mode='markers', - name='Exit - Profit', - marker=dict( - symbol='square-open', - size=11, - line=dict(width=2), - color='green' - ) + x=trades.loc[trades["profit_ratio"] > 0, "close_date"], + y=trades.loc[trades["profit_ratio"] > 0, "close_rate"], + text=trades.loc[trades["profit_ratio"] > 0, "desc"], + mode="markers", + name="Exit - Profit", + marker=dict(symbol="square-open", size=11, line=dict(width=2), color="green"), ) trade_exits_loss = go.Scatter( - x=trades.loc[trades['profit_ratio'] <= 0, "close_date"], - y=trades.loc[trades['profit_ratio'] <= 0, "close_rate"], - text=trades.loc[trades['profit_ratio'] <= 0, "desc"], - mode='markers', - name='Exit - Loss', - marker=dict( - symbol='square-open', - size=11, - line=dict(width=2), - color='red' - ) + x=trades.loc[trades["profit_ratio"] <= 0, "close_date"], + y=trades.loc[trades["profit_ratio"] <= 0, "close_rate"], + text=trades.loc[trades["profit_ratio"] <= 0, "desc"], + mode="markers", + name="Exit - Loss", + marker=dict(symbol="square-open", size=11, line=dict(width=2), color="red"), ) fig.add_trace(trade_entries, 1, 1) fig.add_trace(trade_exits, 1, 1) @@ -314,8 +300,9 @@ def plot_trades(fig, trades: pd.DataFrame) -> make_subplots: return fig -def create_plotconfig(indicators1: List[str], indicators2: List[str], - plot_config: Dict[str, Dict]) -> Dict[str, Dict]: +def create_plotconfig( + indicators1: List[str], indicators2: List[str], plot_config: Dict[str, Dict] +) -> Dict[str, Dict]: """ Combines indicators 1 and indicators 2 into plot_config if necessary :param indicators1: List containing Main plot indicators @@ -326,34 +313,40 @@ def create_plotconfig(indicators1: List[str], indicators2: List[str], if plot_config: if indicators1: - plot_config['main_plot'] = {ind: {} for ind in indicators1} + plot_config["main_plot"] = {ind: {} for ind in indicators1} if indicators2: - plot_config['subplots'] = {'Other': {ind: {} for ind in indicators2}} + plot_config["subplots"] = {"Other": {ind: {} for ind in indicators2}} if not plot_config: # If no indicators and no plot-config given, use defaults. if not indicators1: - indicators1 = ['sma', 'ema3', 'ema5'] + indicators1 = ["sma", "ema3", "ema5"] if not indicators2: - indicators2 = ['macd', 'macdsignal'] + indicators2 = ["macd", "macdsignal"] # Create subplot configuration if plot_config is not available. plot_config = { - 'main_plot': {ind: {} for ind in indicators1}, - 'subplots': {'Other': {ind: {} for ind in indicators2}}, + "main_plot": {ind: {} for ind in indicators1}, + "subplots": {"Other": {ind: {} for ind in indicators2}}, } - if 'main_plot' not in plot_config: - plot_config['main_plot'] = {} + if "main_plot" not in plot_config: + plot_config["main_plot"] = {} - if 'subplots' not in plot_config: - plot_config['subplots'] = {} + if "subplots" not in plot_config: + plot_config["subplots"] = {} return plot_config -def plot_area(fig, row: int, data: pd.DataFrame, indicator_a: str, - indicator_b: str, label: str = "", - fill_color: str = "rgba(0,176,246,0.2)") -> make_subplots: - """ Creates a plot for the area between two traces and adds it to fig. +def plot_area( + fig, + row: int, + data: pd.DataFrame, + indicator_a: str, + indicator_b: str, + label: str = "", + fill_color: str = "rgba(0,176,246,0.2)", +) -> make_subplots: + """Creates a plot for the area between two traces and adds it to fig. :param fig: Plot figure to append to :param row: row number for this plot :param data: candlestick DataFrame @@ -365,21 +358,24 @@ def plot_area(fig, row: int, data: pd.DataFrame, indicator_a: str, """ if indicator_a in data and indicator_b in data: # make lines invisible to get the area plotted, only. - line = {'color': 'rgba(255,255,255,0)'} + line = {"color": "rgba(255,255,255,0)"} # TODO: Figure out why scattergl causes problems plotly/plotly.js#2284 - trace_a = go.Scatter(x=data.date, y=data[indicator_a], - showlegend=False, - line=line) - trace_b = go.Scatter(x=data.date, y=data[indicator_b], name=label, - fill="tonexty", fillcolor=fill_color, - line=line) + trace_a = go.Scatter(x=data.date, y=data[indicator_a], showlegend=False, line=line) + trace_b = go.Scatter( + x=data.date, + y=data[indicator_b], + name=label, + fill="tonexty", + fillcolor=fill_color, + line=line, + ) fig.add_trace(trace_a, row, 1) fig.add_trace(trace_b, row, 1) return fig def add_areas(fig, row: int, data: pd.DataFrame, indicators) -> make_subplots: - """ Adds all area plots (specified in plot_config) to fig. + """Adds all area plots (specified in plot_config) to fig. :param fig: Plot figure to append to :param row: row number for this plot :param data: candlestick DataFrame @@ -388,48 +384,43 @@ def add_areas(fig, row: int, data: pd.DataFrame, indicators) -> make_subplots: :return: fig with added filled_traces plot """ for indicator, ind_conf in indicators.items(): - if 'fill_to' in ind_conf: - indicator_b = ind_conf['fill_to'] + if "fill_to" in ind_conf: + indicator_b = ind_conf["fill_to"] if indicator in data and indicator_b in data: - label = ind_conf.get('fill_label', - f'{indicator}<>{indicator_b}') - fill_color = ind_conf.get('fill_color', 'rgba(0,176,246,0.2)') - fig = plot_area(fig, row, data, indicator, indicator_b, - label=label, fill_color=fill_color) + label = ind_conf.get("fill_label", f"{indicator}<>{indicator_b}") + fill_color = ind_conf.get("fill_color", "rgba(0,176,246,0.2)") + fig = plot_area( + fig, row, data, indicator, indicator_b, label=label, fill_color=fill_color + ) elif indicator not in data: logger.info( 'Indicator "%s" ignored. Reason: This indicator is not ' - 'found in your strategy.', indicator + "found in your strategy.", + indicator, ) elif indicator_b not in data: logger.info( - 'fill_to: "%s" ignored. Reason: This indicator is not ' - 'in your strategy.', indicator_b + 'fill_to: "%s" ignored. Reason: This indicator is not ' "in your strategy.", + indicator_b, ) return fig -def create_scatter( - data, - column_name, - color, - direction -) -> Optional[go.Scatter]: - +def create_scatter(data, column_name, color, direction) -> Optional[go.Scatter]: if column_name in data.columns: df_short = data[data[column_name] == 1] if len(df_short) > 0: shorts = go.Scatter( x=df_short.date, y=df_short.close, - mode='markers', + mode="markers", name=column_name, marker=dict( symbol=f"triangle-{direction}-dot", size=9, line=dict(width=1), color=color, - ) + ), ) return shorts else: @@ -439,10 +430,14 @@ def create_scatter( def generate_candlestick_graph( - pair: str, data: pd.DataFrame, trades: Optional[pd.DataFrame] = None, *, - indicators1: Optional[List[str]] = None, indicators2: Optional[List[str]] = None, - plot_config: Optional[Dict[str, Dict]] = None, - ) -> go.Figure: + pair: str, + data: pd.DataFrame, + trades: Optional[pd.DataFrame] = None, + *, + indicators1: Optional[List[str]] = None, + indicators2: Optional[List[str]] = None, + plot_config: Optional[Dict[str, Dict]] = None, +) -> go.Figure: """ Generate the graph from the data generated by Backtesting or from DB Volume will always be plotted in row2, so Row 1 and 3 are to our disposal for custom indicators @@ -459,8 +454,8 @@ def generate_candlestick_graph( indicators2 or [], plot_config or {}, ) - rows = 2 + len(plot_config['subplots']) - row_widths = [1 for _ in plot_config['subplots']] + rows = 2 + len(plot_config["subplots"]) + row_widths = [1 for _ in plot_config["subplots"]] # Define the graph fig = make_subplots( rows=rows, @@ -469,127 +464,131 @@ def generate_candlestick_graph( row_width=row_widths + [1, 4], vertical_spacing=0.0001, ) - fig['layout'].update(title=pair) - fig['layout']['yaxis1'].update(title='Price') - fig['layout']['yaxis2'].update(title='Volume') - for i, name in enumerate(plot_config['subplots']): - fig['layout'][f'yaxis{3 + i}'].update(title=name) - fig['layout']['xaxis']['rangeslider'].update(visible=False) + fig["layout"].update(title=pair) + fig["layout"]["yaxis1"].update(title="Price") + fig["layout"]["yaxis2"].update(title="Volume") + for i, name in enumerate(plot_config["subplots"]): + fig["layout"][f"yaxis{3 + i}"].update(title=name) + fig["layout"]["xaxis"]["rangeslider"].update(visible=False) fig.update_layout(modebar_add=["v1hovermode", "toggleSpikeLines"]) # Common information candles = go.Candlestick( - x=data.date, - open=data.open, - high=data.high, - low=data.low, - close=data.close, - name='Price' + x=data.date, open=data.open, high=data.high, low=data.low, close=data.close, name="Price" ) fig.add_trace(candles, 1, 1) - longs = create_scatter(data, 'enter_long', 'green', 'up') - exit_longs = create_scatter(data, 'exit_long', 'red', 'down') - shorts = create_scatter(data, 'enter_short', 'blue', 'down') - exit_shorts = create_scatter(data, 'exit_short', 'violet', 'up') + longs = create_scatter(data, "enter_long", "green", "up") + exit_longs = create_scatter(data, "exit_long", "red", "down") + shorts = create_scatter(data, "enter_short", "blue", "down") + exit_shorts = create_scatter(data, "exit_short", "violet", "up") for scatter in [longs, exit_longs, shorts, exit_shorts]: if scatter: fig.add_trace(scatter, 1, 1) # Add Bollinger Bands - fig = plot_area(fig, 1, data, 'bb_lowerband', 'bb_upperband', - label="Bollinger Band") + fig = plot_area(fig, 1, data, "bb_lowerband", "bb_upperband", label="Bollinger Band") # prevent bb_lower and bb_upper from plotting try: - del plot_config['main_plot']['bb_lowerband'] - del plot_config['main_plot']['bb_upperband'] + del plot_config["main_plot"]["bb_lowerband"] + del plot_config["main_plot"]["bb_upperband"] except KeyError: pass # main plot goes to row 1 - fig = add_indicators(fig=fig, row=1, indicators=plot_config['main_plot'], data=data) - fig = add_areas(fig, 1, data, plot_config['main_plot']) + fig = add_indicators(fig=fig, row=1, indicators=plot_config["main_plot"], data=data) + fig = add_areas(fig, 1, data, plot_config["main_plot"]) fig = plot_trades(fig, trades) # sub plot: Volume goes to row 2 volume = go.Bar( - x=data['date'], - y=data['volume'], - name='Volume', - marker_color='DarkSlateGrey', - marker_line_color='DarkSlateGrey' + x=data["date"], + y=data["volume"], + name="Volume", + marker_color="DarkSlateGrey", + marker_line_color="DarkSlateGrey", ) fig.add_trace(volume, 2, 1) # add each sub plot to a separate row - for i, label in enumerate(plot_config['subplots']): - sub_config = plot_config['subplots'][label] + for i, label in enumerate(plot_config["subplots"]): + sub_config = plot_config["subplots"][label] row = 3 + i - fig = add_indicators(fig=fig, row=row, indicators=sub_config, - data=data) + fig = add_indicators(fig=fig, row=row, indicators=sub_config, data=data) # fill area between indicators ( 'fill_to': 'other_indicator') fig = add_areas(fig, row, data, sub_config) return fig -def generate_profit_graph(pairs: str, data: Dict[str, pd.DataFrame], - trades: pd.DataFrame, timeframe: str, stake_currency: str, - starting_balance: float) -> go.Figure: +def generate_profit_graph( + pairs: str, + data: Dict[str, pd.DataFrame], + trades: pd.DataFrame, + timeframe: str, + stake_currency: str, + starting_balance: float, +) -> go.Figure: # Combine close-values for all pairs, rename columns to "pair" try: df_comb = combine_dataframes_with_mean(data, "close") except ValueError: raise OperationalException( "No data found. Please make sure that data is available for " - "the timerange and pairs selected.") + "the timerange and pairs selected." + ) # Trim trades to available OHLCV data trades = extract_trades_of_period(df_comb, trades, date_index=True) if len(trades) == 0: - raise OperationalException('No trades found in selected timerange.') + raise OperationalException("No trades found in selected timerange.") # Add combined cumulative profit - df_comb = create_cum_profit(df_comb, trades, 'cum_profit', timeframe) + df_comb = create_cum_profit(df_comb, trades, "cum_profit", timeframe) # Plot the pairs average close prices, and total profit growth avgclose = go.Scatter( x=df_comb.index, - y=df_comb['mean'], - name='Avg close price', + y=df_comb["mean"], + name="Avg close price", ) - fig = make_subplots(rows=6, cols=1, shared_xaxes=True, - row_heights=[1, 1, 1, 0.5, 0.75, 0.75], - vertical_spacing=0.05, - subplot_titles=[ - "AVG Close Price", - "Combined Profit", - "Profit per pair", - "Parallelism", - "Underwater", - "Relative Drawdown", - ]) - fig['layout'].update(title="Freqtrade Profit plot") - fig['layout']['yaxis1'].update(title='Price') - fig['layout']['yaxis2'].update(title=f'Profit {stake_currency}') - fig['layout']['yaxis3'].update(title=f'Profit {stake_currency}') - fig['layout']['yaxis4'].update(title='Trade count') - fig['layout']['yaxis5'].update(title='Underwater Plot') - fig['layout']['yaxis6'].update(title='Underwater Plot Relative (%)', tickformat=',.2%') - fig['layout']['xaxis']['rangeslider'].update(visible=False) + fig = make_subplots( + rows=6, + cols=1, + shared_xaxes=True, + row_heights=[1, 1, 1, 0.5, 0.75, 0.75], + vertical_spacing=0.05, + subplot_titles=[ + "AVG Close Price", + "Combined Profit", + "Profit per pair", + "Parallelism", + "Underwater", + "Relative Drawdown", + ], + ) + fig["layout"].update(title="Freqtrade Profit plot") + fig["layout"]["yaxis1"].update(title="Price") + fig["layout"]["yaxis2"].update(title=f"Profit {stake_currency}") + fig["layout"]["yaxis3"].update(title=f"Profit {stake_currency}") + fig["layout"]["yaxis4"].update(title="Trade count") + fig["layout"]["yaxis5"].update(title="Underwater Plot") + fig["layout"]["yaxis6"].update(title="Underwater Plot Relative (%)", tickformat=",.2%") + fig["layout"]["xaxis"]["rangeslider"].update(visible=False) fig.update_layout(modebar_add=["v1hovermode", "toggleSpikeLines"]) fig.add_trace(avgclose, 1, 1) - fig = add_profit(fig, 2, df_comb, 'cum_profit', 'Profit') + fig = add_profit(fig, 2, df_comb, "cum_profit", "Profit") fig = add_max_drawdown(fig, 2, trades, df_comb, timeframe, starting_balance) fig = add_parallelism(fig, 4, trades, timeframe) # Two rows consumed fig = add_underwater(fig, 5, trades, starting_balance) for pair in pairs: - profit_col = f'cum_profit_{pair}' + profit_col = f"cum_profit_{pair}" try: - df_comb = create_cum_profit(df_comb, trades[trades['pair'] == pair], profit_col, - timeframe) + df_comb = create_cum_profit( + df_comb, trades[trades["pair"] == pair], profit_col, timeframe + ) fig = add_profit(fig, 3, df_comb, profit_col, f"Profit {pair}") except ValueError: pass @@ -601,9 +600,9 @@ def generate_plot_filename(pair: str, timeframe: str) -> str: Generate filenames per pair/timeframe to be used for storing plots """ pair_s = pair_to_filename(pair) - file_name = 'freqtrade-plot-' + pair_s + '-' + timeframe + '.html' + file_name = "freqtrade-plot-" + pair_s + "-" + timeframe + ".html" - logger.info('Generate plot file for %s', pair) + logger.info("Generate plot file for %s", pair) return file_name @@ -620,8 +619,7 @@ def store_plot_file(fig, filename: str, directory: Path, auto_open: bool = False directory.mkdir(parents=True, exist_ok=True) _filename = directory.joinpath(filename) - plot(fig, filename=str(_filename), - auto_open=auto_open) + plot(fig, filename=str(_filename), auto_open=auto_open) logger.info(f"Stored plot as {_filename}") @@ -643,17 +641,17 @@ def load_and_plot_trades(config: Config): strategy.ft_bot_start() strategy_safe_wrapper(strategy.bot_loop_start)(current_time=datetime.now(timezone.utc)) plot_elements = init_plotscript(config, list(exchange.markets), strategy.startup_candle_count) - timerange = plot_elements['timerange'] - trades = plot_elements['trades'] + timerange = plot_elements["timerange"] + trades = plot_elements["trades"] pair_counter = 0 for pair, data in plot_elements["ohlcv"].items(): pair_counter += 1 logger.info("analyse pair %s", pair) - df_analyzed = strategy.analyze_ticker(data, {'pair': pair}) + df_analyzed = strategy.analyze_ticker(data, {"pair": pair}) df_analyzed = trim_dataframe(df_analyzed, timerange) if not trades.empty: - trades_pair = trades.loc[trades['pair'] == pair] + trades_pair = trades.loc[trades["pair"] == pair] trades_pair = extract_trades_of_period(df_analyzed, trades_pair) else: trades_pair = trades @@ -662,15 +660,18 @@ def load_and_plot_trades(config: Config): pair=pair, data=df_analyzed, trades=trades_pair, - indicators1=config.get('indicators1', []), - indicators2=config.get('indicators2', []), - plot_config=strategy.plot_config if hasattr(strategy, 'plot_config') else {} + indicators1=config.get("indicators1", []), + indicators2=config.get("indicators2", []), + plot_config=strategy.plot_config if hasattr(strategy, "plot_config") else {}, ) - store_plot_file(fig, filename=generate_plot_filename(pair, config['timeframe']), - directory=config['user_data_dir'] / 'plot') + store_plot_file( + fig, + filename=generate_plot_filename(pair, config["timeframe"]), + directory=config["user_data_dir"] / "plot", + ) - logger.info('End of plotting process. %s plots generated', pair_counter) + logger.info("End of plotting process. %s plots generated", pair_counter) def plot_profit(config: Config) -> None: @@ -680,28 +681,37 @@ def plot_profit(config: Config) -> None: But should be somewhat proportional, and therefore useful in helping out to find a good algorithm. """ - if 'timeframe' not in config: - raise OperationalException('Timeframe must be set in either config or via --timeframe.') + if "timeframe" not in config: + raise OperationalException("Timeframe must be set in either config or via --timeframe.") exchange = ExchangeResolver.load_exchange(config) plot_elements = init_plotscript(config, list(exchange.markets)) - trades = plot_elements['trades'] + trades = plot_elements["trades"] # Filter trades to relevant pairs # Remove open pairs - we don't know the profit yet so can't calculate profit for these. # Also, If only one open pair is left, then the profit-generation would fail. - trades = trades[(trades['pair'].isin(plot_elements['pairs'])) - & (~trades['close_date'].isnull()) - ] + trades = trades[ + (trades["pair"].isin(plot_elements["pairs"])) & (~trades["close_date"].isnull()) + ] if len(trades) == 0: - raise OperationalException("No trades found, cannot generate Profit-plot without " - "trades from either Backtest result or database.") + raise OperationalException( + "No trades found, cannot generate Profit-plot without " + "trades from either Backtest result or database." + ) # Create an average close price of all the pairs that were involved. # this could be useful to gauge the overall market trend - fig = generate_profit_graph(plot_elements['pairs'], plot_elements['ohlcv'], - trades, config['timeframe'], - config.get('stake_currency', ''), - config.get('available_capital', config['dry_run_wallet'])) - store_plot_file(fig, filename='freqtrade-profit-plot.html', - directory=config['user_data_dir'] / 'plot', - auto_open=config.get('plot_auto_open', False)) + fig = generate_profit_graph( + plot_elements["pairs"], + plot_elements["ohlcv"], + trades, + config["timeframe"], + config.get("stake_currency", ""), + config.get("available_capital", config["dry_run_wallet"]), + ) + store_plot_file( + fig, + filename="freqtrade-profit-plot.html", + directory=config["user_data_dir"] / "plot", + auto_open=config.get("plot_auto_open", False), + ) diff --git a/freqtrade/plugins/pairlist/AgeFilter.py b/freqtrade/plugins/pairlist/AgeFilter.py index bce789446..0be04d7b8 100644 --- a/freqtrade/plugins/pairlist/AgeFilter.py +++ b/freqtrade/plugins/pairlist/AgeFilter.py @@ -1,6 +1,7 @@ """ Minimum age (days listed) pair list filter """ + import logging from copy import deepcopy from datetime import timedelta @@ -20,32 +21,40 @@ logger = logging.getLogger(__name__) class AgeFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) # Checked symbols cache (dictionary of ticker symbol => timestamp) self._symbolsChecked: Dict[str, int] = {} self._symbolsCheckFailed = PeriodicCache(maxsize=1000, ttl=86_400) - self._min_days_listed = pairlistconfig.get('min_days_listed', 10) - self._max_days_listed = pairlistconfig.get('max_days_listed') + self._min_days_listed = pairlistconfig.get("min_days_listed", 10) + self._max_days_listed = pairlistconfig.get("max_days_listed") - candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def']) + candle_limit = exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"]) if self._min_days_listed < 1: raise OperationalException("AgeFilter requires min_days_listed to be >= 1") if self._min_days_listed > candle_limit: - raise OperationalException("AgeFilter requires min_days_listed to not exceed " - "exchange max request size " - f"({candle_limit})") + raise OperationalException( + "AgeFilter requires min_days_listed to not exceed " + "exchange max request size " + f"({candle_limit})" + ) if self._max_days_listed and self._max_days_listed <= self._min_days_listed: raise OperationalException("AgeFilter max_days_listed <= min_days_listed not permitted") if self._max_days_listed and self._max_days_listed > candle_limit: - raise OperationalException("AgeFilter requires max_days_listed to not exceed " - "exchange max request size " - f"({candle_limit})") + raise OperationalException( + "AgeFilter requires max_days_listed to not exceed " + "exchange max request size " + f"({candle_limit})" + ) @property def needstickers(self) -> bool: @@ -63,10 +72,11 @@ class AgeFilter(IPairList): return ( f"{self.name} - Filtering pairs with age less than " f"{self._min_days_listed} {plural(self._min_days_listed, 'day')}" - ) + (( - " or more than " - f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}" - ) if self._max_days_listed else '') + ) + ( + (" or more than {self._max_days_listed} {plural(self._max_days_listed, 'day')}") + if self._max_days_listed + else "" + ) @staticmethod def description() -> str: @@ -96,21 +106,26 @@ class AgeFilter(IPairList): :return: new allowlist """ needed_pairs: ListPairsWithTimeframes = [ - (p, '1d', self._config['candle_type_def']) for p in pairlist - if p not in self._symbolsChecked and p not in self._symbolsCheckFailed] + (p, "1d", self._config["candle_type_def"]) + for p in pairlist + if p not in self._symbolsChecked and p not in self._symbolsCheckFailed + ] if not needed_pairs: # Remove pairs that have been removed before return [p for p in pairlist if p not in self._symbolsCheckFailed] - since_days = -( - self._max_days_listed if self._max_days_listed else self._min_days_listed - ) - 1 + since_days = ( + -(self._max_days_listed if self._max_days_listed else self._min_days_listed) - 1 + ) since_ms = dt_ts(dt_floor_day(dt_now()) + timedelta(days=since_days)) candles = self._exchange.refresh_latest_ohlcv(needed_pairs, since_ms=since_ms, cache=False) if self._enabled: for p in deepcopy(pairlist): - daily_candles = candles[(p, '1d', self._config['candle_type_def'])] if ( - p, '1d', self._config['candle_type_def']) in candles else None + daily_candles = ( + candles[(p, "1d", self._config["candle_type_def"])] + if (p, "1d", self._config["candle_type_def"]) in candles + else None + ) if not self._validate_pair_loc(p, daily_candles): pairlist.remove(p) self.log_once(f"Validated {len(pairlist)} pairs.", logger.info) @@ -128,23 +143,30 @@ class AgeFilter(IPairList): return True if daily_candles is not None: - if ( - len(daily_candles) >= self._min_days_listed - and (not self._max_days_listed or len(daily_candles) <= self._max_days_listed) + if len(daily_candles) >= self._min_days_listed and ( + not self._max_days_listed or len(daily_candles) <= self._max_days_listed ): # We have fetched at least the minimum required number of daily candles # Add to cache, store the time we last checked this symbol self._symbolsChecked[pair] = dt_ts() return True else: - self.log_once(( - f"Removed {pair} from whitelist, because age " - f"{len(daily_candles)} is less than {self._min_days_listed} " - f"{plural(self._min_days_listed, 'day')}" - ) + (( - " or more than " - f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}" - ) if self._max_days_listed else ''), logger.info) + self.log_once( + ( + f"Removed {pair} from whitelist, because age " + f"{len(daily_candles)} is less than {self._min_days_listed} " + f"{plural(self._min_days_listed, 'day')}" + ) + + ( + ( + " or more than " + f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}" + ) + if self._max_days_listed + else "" + ), + logger.info, + ) self._symbolsCheckFailed[pair] = dt_ts() return False return False diff --git a/freqtrade/plugins/pairlist/FullTradesFilter.py b/freqtrade/plugins/pairlist/FullTradesFilter.py index 69779d896..11d98abc5 100644 --- a/freqtrade/plugins/pairlist/FullTradesFilter.py +++ b/freqtrade/plugins/pairlist/FullTradesFilter.py @@ -1,6 +1,7 @@ """ Full trade slots pair list filter """ + import logging from typing import Any, Dict, List @@ -14,10 +15,14 @@ logger = logging.getLogger(__name__) class FullTradesFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) @property @@ -49,7 +54,7 @@ class FullTradesFilter(IPairList): """ # Get the number of open trades and max open trades config num_open = Trade.get_open_trade_count() - max_trades = self._config['max_open_trades'] + max_trades = self._config["max_open_trades"] if (num_open >= max_trades) and (max_trades > 0): return [] diff --git a/freqtrade/plugins/pairlist/IPairList.py b/freqtrade/plugins/pairlist/IPairList.py index d09b447d4..0db38ff2f 100644 --- a/freqtrade/plugins/pairlist/IPairList.py +++ b/freqtrade/plugins/pairlist/IPairList.py @@ -1,6 +1,7 @@ """ PairList Handler base class """ + import logging from abc import ABC, abstractmethod, abstractproperty from copy import deepcopy @@ -46,17 +47,21 @@ PairlistParameter = Union[ __NumberPairlistParameter, __StringPairlistParameter, __OptionPairlistParameter, - __BoolPairlistParameter - ] + __BoolPairlistParameter, +] class IPairList(LoggingMixin, ABC): - is_pairlist_generator = False - def __init__(self, exchange: Exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange: Exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: """ :param exchange: Exchange instance :param pairlistmanager: Instantiated Pairlist manager @@ -71,7 +76,7 @@ class IPairList(LoggingMixin, ABC): self._config = config self._pairlistconfig = pairlistconfig self._pairlist_pos = pairlist_pos - self.refresh_period = self._pairlistconfig.get('refresh_period', 1800) + self.refresh_period = self._pairlistconfig.get("refresh_period", 1800) LoggingMixin.__init__(self, logger, self.refresh_period) @property @@ -155,8 +160,10 @@ class IPairList(LoggingMixin, ABC): :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: List of pairs """ - raise OperationalException("This Pairlist Handler should not be used " - "at the first position in the list of Pairlist Handlers.") + raise OperationalException( + "This Pairlist Handler should not be used " + "at the first position in the list of Pairlist Handlers." + ) def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ @@ -191,8 +198,9 @@ class IPairList(LoggingMixin, ABC): """ return self._pairlistmanager.verify_blacklist(pairlist, logmethod) - def verify_whitelist(self, pairlist: List[str], logmethod, - keep_invalid: bool = False) -> List[str]: + def verify_whitelist( + self, pairlist: List[str], logmethod, keep_invalid: bool = False + ) -> List[str]: """ Proxy method to verify_whitelist for easy access for child classes. :param pairlist: Pairlist to validate @@ -212,26 +220,33 @@ class IPairList(LoggingMixin, ABC): markets = self._exchange.markets if not markets: raise OperationalException( - 'Markets not loaded. Make sure that exchange is initialized correctly.') + "Markets not loaded. Make sure that exchange is initialized correctly." + ) sanitized_whitelist: List[str] = [] for pair in pairlist: # pair is not in the generated dynamic market or has the wrong stake currency if pair not in markets: - self.log_once(f"Pair {pair} is not compatible with exchange " - f"{self._exchange.name}. Removing it from whitelist..", - logger.warning) + self.log_once( + f"Pair {pair} is not compatible with exchange " + f"{self._exchange.name}. Removing it from whitelist..", + logger.warning, + ) continue if not self._exchange.market_is_tradable(markets[pair]): - self.log_once(f"Pair {pair} is not tradable with Freqtrade." - "Removing it from whitelist..", logger.warning) + self.log_once( + f"Pair {pair} is not tradable with Freqtrade. Removing it from whitelist..", + logger.warning, + ) continue - if self._exchange.get_pair_quote_currency(pair) != self._config['stake_currency']: - self.log_once(f"Pair {pair} is not compatible with your stake currency " - f"{self._config['stake_currency']}. Removing it from whitelist..", - logger.warning) + if self._exchange.get_pair_quote_currency(pair) != self._config["stake_currency"]: + self.log_once( + f"Pair {pair} is not compatible with your stake currency " + f"{self._config['stake_currency']}. Removing it from whitelist..", + logger.warning, + ) continue # Check if market is active diff --git a/freqtrade/plugins/pairlist/MarketCapPairList.py b/freqtrade/plugins/pairlist/MarketCapPairList.py index 0c968f988..709b6100b 100644 --- a/freqtrade/plugins/pairlist/MarketCapPairList.py +++ b/freqtrade/plugins/pairlist/MarketCapPairList.py @@ -3,47 +3,57 @@ Market Cap PairList provider Provides dynamic pair list based on Market Cap """ + import logging from typing import Any, Dict, List from cachetools import TTLCache -from pycoingecko import CoinGeckoAPI from freqtrade.constants import Config from freqtrade.exceptions import OperationalException from freqtrade.exchange.types import Tickers from freqtrade.plugins.pairlist.IPairList import IPairList, PairlistParameter +from freqtrade.util.coin_gecko import FtCoinGeckoApi logger = logging.getLogger(__name__) class MarketCapPairList(IPairList): - is_pairlist_generator = True - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - if 'number_assets' not in self._pairlistconfig: + if "number_assets" not in self._pairlistconfig: raise OperationalException( - '`number_assets` not specified. Please check your configuration ' - 'for "pairlist.config.number_assets"') + "`number_assets` not specified. Please check your configuration " + 'for "pairlist.config.number_assets"' + ) - self._stake_currency = config['stake_currency'] - self._number_assets = self._pairlistconfig['number_assets'] - self._max_rank = self._pairlistconfig.get('max_rank', 30) - self._refresh_period = self._pairlistconfig.get('refresh_period', 86400) + self._stake_currency = config["stake_currency"] + self._number_assets = self._pairlistconfig["number_assets"] + self._max_rank = self._pairlistconfig.get("max_rank", 30) + self._refresh_period = self._pairlistconfig.get("refresh_period", 86400) self._marketcap_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) - self._def_candletype = self._config['candle_type_def'] - self._coingecko: CoinGeckoAPI = CoinGeckoAPI() + self._def_candletype = self._config["candle_type_def"] + + _coingecko_config = config.get("coingecko", {}) + + self._coingecko: FtCoinGeckoApi = FtCoinGeckoApi( + api_key=_coingecko_config.get("api_key", ""), + is_demo=_coingecko_config.get("is_demo", True), + ) if self._max_rank > 250: - raise OperationalException( - "This filter only support marketcap rank up to 250." - ) + raise OperationalException("This filter only support marketcap rank up to 250.") @property def needstickers(self) -> bool: @@ -87,7 +97,7 @@ class MarketCapPairList(IPairList): "default": 86400, "description": "Refresh period", "help": "Refresh period in seconds", - } + }, } def gen_pairlist(self, tickers: Tickers) -> List[str]: @@ -98,21 +108,24 @@ class MarketCapPairList(IPairList): """ # Generate dynamic whitelist # Must always run if this pairlist is the first in the list. - pairlist = self._marketcap_cache.get('pairlist_mc') + pairlist = self._marketcap_cache.get("pairlist_mc") if pairlist: # Item found - no refresh necessary return pairlist.copy() else: # Use fresh pairlist # Check if pair quote currency equals to the stake currency. - _pairlist = [k for k in self._exchange.get_markets( - quote_currencies=[self._stake_currency], - tradable_only=True, active_only=True).keys()] + _pairlist = [ + k + for k in self._exchange.get_markets( + quote_currencies=[self._stake_currency], tradable_only=True, active_only=True + ).keys() + ] # No point in testing for blacklisted pairs... _pairlist = self.verify_blacklist(_pairlist, logger.info) pairlist = self.filter_pairlist(_pairlist, tickers) - self._marketcap_cache['pairlist_mc'] = pairlist.copy() + self._marketcap_cache["pairlist_mc"] = pairlist.copy() return pairlist @@ -124,25 +137,30 @@ class MarketCapPairList(IPairList): :param tickers: Tickers (from exchange.get_tickers). May be cached. :return: new whitelist """ - marketcap_list = self._marketcap_cache.get('marketcap') + marketcap_list = self._marketcap_cache.get("marketcap") if marketcap_list is None: - data = self._coingecko.get_coins_markets(vs_currency='usd', order='market_cap_desc', - per_page='250', page='1', sparkline='false', - locale='en') + data = self._coingecko.get_coins_markets( + vs_currency="usd", + order="market_cap_desc", + per_page="250", + page="1", + sparkline="false", + locale="en", + ) if data: - marketcap_list = [row['symbol'] for row in data] - self._marketcap_cache['marketcap'] = marketcap_list + marketcap_list = [row["symbol"] for row in data] + self._marketcap_cache["marketcap"] = marketcap_list if marketcap_list: filtered_pairlist = [] - market = self._config['trading_mode'] + market = self._config["trading_mode"] pair_format = f"{self._stake_currency.upper()}" - if (market == 'futures'): + if market == "futures": pair_format += f":{self._stake_currency.upper()}" - top_marketcap = marketcap_list[:self._max_rank:] + top_marketcap = marketcap_list[: self._max_rank :] for mc_pair in top_marketcap: test_pair = f"{mc_pair.upper()}/{pair_format}" diff --git a/freqtrade/plugins/pairlist/OffsetFilter.py b/freqtrade/plugins/pairlist/OffsetFilter.py index af152c7bc..1fa9e1bd0 100644 --- a/freqtrade/plugins/pairlist/OffsetFilter.py +++ b/freqtrade/plugins/pairlist/OffsetFilter.py @@ -1,6 +1,7 @@ """ Offset pair list filter """ + import logging from typing import Any, Dict, List @@ -14,14 +15,18 @@ logger = logging.getLogger(__name__) class OffsetFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._offset = pairlistconfig.get('offset', 0) - self._number_pairs = pairlistconfig.get('number_assets', 0) + self._offset = pairlistconfig.get("offset", 0) + self._number_pairs = pairlistconfig.get("number_assets", 0) if self._offset < 0: raise OperationalException("OffsetFilter requires offset to be >= 0") @@ -73,11 +78,13 @@ class OffsetFilter(IPairList): :return: new whitelist """ if self._offset > len(pairlist): - self.log_once(f"Offset of {self._offset} is larger than " + - f"pair count of {len(pairlist)}", logger.warning) - pairs = pairlist[self._offset:] + self.log_once( + f"Offset of {self._offset} is larger than " + f"pair count of {len(pairlist)}", + logger.warning, + ) + pairs = pairlist[self._offset :] if self._number_pairs: - pairs = pairs[:self._number_pairs] + pairs = pairs[: self._number_pairs] self.log_once(f"Searching {len(pairs)} pairs: {pairs}", logger.info) diff --git a/freqtrade/plugins/pairlist/PerformanceFilter.py b/freqtrade/plugins/pairlist/PerformanceFilter.py index b45259605..930c78334 100644 --- a/freqtrade/plugins/pairlist/PerformanceFilter.py +++ b/freqtrade/plugins/pairlist/PerformanceFilter.py @@ -1,6 +1,7 @@ """ Performance pair list filter """ + import logging from typing import Any, Dict, List @@ -16,14 +17,18 @@ logger = logging.getLogger(__name__) class PerformanceFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._minutes = pairlistconfig.get('minutes', 0) - self._min_profit = pairlistconfig.get('min_profit') + self._minutes = pairlistconfig.get("minutes", 0) + self._min_profit = pairlistconfig.get("min_profit") @property def needstickers(self) -> bool: @@ -82,25 +87,29 @@ class PerformanceFilter(IPairList): return pairlist # Get pairlist from performance dataframe values - list_df = pd.DataFrame({'pair': pairlist}) - list_df['prior_idx'] = list_df.index + list_df = pd.DataFrame({"pair": pairlist}) + list_df["prior_idx"] = list_df.index # Set initial value for pairs with no trades to 0 # Sort the list using: # - primarily performance (high to low) # - then count (low to high, so as to favor same performance with fewer trades) # - then by prior index, keeping original sorting order - sorted_df = list_df.merge(performance, on='pair', how='left')\ - .fillna(0).sort_values(by=['profit_ratio', 'count', 'prior_idx'], - ascending=[False, True, True]) + sorted_df = ( + list_df.merge(performance, on="pair", how="left") + .fillna(0) + .sort_values(by=["profit_ratio", "count", "prior_idx"], ascending=[False, True, True]) + ) if self._min_profit is not None: - removed = sorted_df[sorted_df['profit_ratio'] < self._min_profit] + removed = sorted_df[sorted_df["profit_ratio"] < self._min_profit] for _, row in removed.iterrows(): self.log_once( f"Removing pair {row['pair']} since {row['profit_ratio']} is " - f"below {self._min_profit}", logger.info) - sorted_df = sorted_df[sorted_df['profit_ratio'] >= self._min_profit] + f"below {self._min_profit}", + logger.info, + ) + sorted_df = sorted_df[sorted_df["profit_ratio"] >= self._min_profit] - pairlist = sorted_df['pair'].tolist() + pairlist = sorted_df["pair"].tolist() return pairlist diff --git a/freqtrade/plugins/pairlist/PrecisionFilter.py b/freqtrade/plugins/pairlist/PrecisionFilter.py index d354eaf63..0e8c50849 100644 --- a/freqtrade/plugins/pairlist/PrecisionFilter.py +++ b/freqtrade/plugins/pairlist/PrecisionFilter.py @@ -1,6 +1,7 @@ """ Precision pair list filter """ + import logging from typing import Any, Dict, Optional @@ -15,17 +16,22 @@ logger = logging.getLogger(__name__) class PrecisionFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - if 'stoploss' not in self._config: + if "stoploss" not in self._config: raise OperationalException( - 'PrecisionFilter can only work with stoploss defined. Please add the ' - 'stoploss key to your configuration (overwrites eventual strategy settings).') - self._stoploss = self._config['stoploss'] + "PrecisionFilter can only work with stoploss defined. Please add the " + "stoploss key to your configuration (overwrites eventual strategy settings)." + ) + self._stoploss = self._config["stoploss"] self._enabled = self._stoploss != 0 # Precalculate sanitized stoploss value to avoid recalculation for every pair @@ -58,23 +64,29 @@ class PrecisionFilter(IPairList): :param ticker: ticker dict as returned from ccxt.fetch_ticker :return: True if the pair can stay, false if it should be removed """ - if not ticker or ticker.get('last', None) is None: - self.log_once(f"Removed {pair} from whitelist, because " - "ticker['last'] is empty (Usually no trade in the last 24h).", - logger.info) + if not ticker or ticker.get("last", None) is None: + self.log_once( + f"Removed {pair} from whitelist, because " + "ticker['last'] is empty (Usually no trade in the last 24h).", + logger.info, + ) return False - stop_price = ticker['last'] * self._stoploss + stop_price = ticker["last"] * self._stoploss # Adjust stop-prices to precision sp = self._exchange.price_to_precision(pair, stop_price, rounding_mode=ROUND_UP) - stop_gap_price = self._exchange.price_to_precision(pair, stop_price * 0.99, - rounding_mode=ROUND_UP) + stop_gap_price = self._exchange.price_to_precision( + pair, stop_price * 0.99, rounding_mode=ROUND_UP + ) logger.debug(f"{pair} - {sp} : {stop_gap_price}") if sp <= stop_gap_price: - self.log_once(f"Removed {pair} from whitelist, because " - f"stop price {sp} would be <= stop limit {stop_gap_price}", logger.info) + self.log_once( + f"Removed {pair} from whitelist, because " + f"stop price {sp} would be <= stop limit {stop_gap_price}", + logger.info, + ) return False return True diff --git a/freqtrade/plugins/pairlist/PriceFilter.py b/freqtrade/plugins/pairlist/PriceFilter.py index f27fe035a..81dbdfc33 100644 --- a/freqtrade/plugins/pairlist/PriceFilter.py +++ b/freqtrade/plugins/pairlist/PriceFilter.py @@ -1,6 +1,7 @@ """ Price pair list filter """ + import logging from typing import Any, Dict, Optional @@ -14,28 +15,34 @@ logger = logging.getLogger(__name__) class PriceFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._low_price_ratio = pairlistconfig.get('low_price_ratio', 0) + self._low_price_ratio = pairlistconfig.get("low_price_ratio", 0) if self._low_price_ratio < 0: raise OperationalException("PriceFilter requires low_price_ratio to be >= 0") - self._min_price = pairlistconfig.get('min_price', 0) + self._min_price = pairlistconfig.get("min_price", 0) if self._min_price < 0: raise OperationalException("PriceFilter requires min_price to be >= 0") - self._max_price = pairlistconfig.get('max_price', 0) + self._max_price = pairlistconfig.get("max_price", 0) if self._max_price < 0: raise OperationalException("PriceFilter requires max_price to be >= 0") - self._max_value = pairlistconfig.get('max_value', 0) + self._max_value = pairlistconfig.get("max_value", 0) if self._max_value < 0: raise OperationalException("PriceFilter requires max_value to be >= 0") - self._enabled = ((self._low_price_ratio > 0) or - (self._min_price > 0) or - (self._max_price > 0) or - (self._max_value > 0)) + self._enabled = ( + (self._low_price_ratio > 0) + or (self._min_price > 0) + or (self._max_price > 0) + or (self._max_value > 0) + ) @property def needstickers(self) -> bool: @@ -76,8 +83,9 @@ class PriceFilter(IPairList): "type": "number", "default": 0, "description": "Low price ratio", - "help": ("Remove pairs where a price move of 1 price unit (pip) " - "is above this ratio."), + "help": ( + "Remove pairs where a price move of 1 price unit (pip) is above this ratio." + ), }, "min_price": { "type": "number", @@ -106,12 +114,14 @@ class PriceFilter(IPairList): :param ticker: ticker dict as returned from ccxt.fetch_ticker :return: True if the pair can stay, false if it should be removed """ - if ticker and 'last' in ticker and ticker['last'] is not None and ticker.get('last') != 0: - price: float = ticker['last'] + if ticker and "last" in ticker and ticker["last"] is not None and ticker.get("last") != 0: + price: float = ticker["last"] else: - self.log_once(f"Removed {pair} from whitelist, because " - "ticker['last'] is empty (Usually no trade in the last 24h).", - logger.info) + self.log_once( + f"Removed {pair} from whitelist, because " + "ticker['last'] is empty (Usually no trade in the last 24h).", + logger.info, + ) return False # Perform low_price_ratio check. @@ -119,17 +129,19 @@ class PriceFilter(IPairList): compare = self._exchange.price_get_one_pip(pair, price) changeperc = compare / price if changeperc > self._low_price_ratio: - self.log_once(f"Removed {pair} from whitelist, " - f"because 1 unit is {changeperc:.3%}", logger.info) + self.log_once( + f"Removed {pair} from whitelist, because 1 unit is {changeperc:.3%}", + logger.info, + ) return False # Perform low_amount check if self._max_value != 0: market = self._exchange.markets[pair] - limits = market['limits'] - if (limits['amount']['min'] is not None): - min_amount = limits['amount']['min'] - min_precision = market['precision']['amount'] + limits = market["limits"] + if limits["amount"]["min"] is not None: + min_amount = limits["amount"]["min"] + min_precision = market["precision"]["amount"] min_value = min_amount * price if self._exchange.precisionMode == 4: @@ -142,23 +154,31 @@ class PriceFilter(IPairList): diff = next_value - min_value if diff > self._max_value: - self.log_once(f"Removed {pair} from whitelist, " - f"because min value change of {diff} > {self._max_value}.", - logger.info) + self.log_once( + f"Removed {pair} from whitelist, " + f"because min value change of {diff} > {self._max_value}.", + logger.info, + ) return False # Perform min_price check. if self._min_price != 0: if price < self._min_price: - self.log_once(f"Removed {pair} from whitelist, " - f"because last price < {self._min_price:.8f}", logger.info) + self.log_once( + f"Removed {pair} from whitelist, " + f"because last price < {self._min_price:.8f}", + logger.info, + ) return False # Perform max_price check. if self._max_price != 0: if price > self._max_price: - self.log_once(f"Removed {pair} from whitelist, " - f"because last price > {self._max_price:.8f}", logger.info) + self.log_once( + f"Removed {pair} from whitelist, " + f"because last price > {self._max_price:.8f}", + logger.info, + ) return False return True diff --git a/freqtrade/plugins/pairlist/ProducerPairList.py b/freqtrade/plugins/pairlist/ProducerPairList.py index 826f05913..771f87380 100644 --- a/freqtrade/plugins/pairlist/ProducerPairList.py +++ b/freqtrade/plugins/pairlist/ProducerPairList.py @@ -3,6 +3,7 @@ External Pair List provider Provides pair list from Leader data """ + import logging from typing import Any, Dict, List, Optional @@ -28,18 +29,25 @@ class ProducerPairList(IPairList): } ], """ + is_pairlist_generator = True - def __init__(self, exchange, pairlistmanager, - config: Dict[str, Any], pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Dict[str, Any], + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._num_assets: int = self._pairlistconfig.get('number_assets', 0) - self._producer_name = self._pairlistconfig.get('producer_name', 'default') - if not config.get('external_message_consumer', {}).get('enabled'): + self._num_assets: int = self._pairlistconfig.get("number_assets", 0) + self._producer_name = self._pairlistconfig.get("producer_name", "default") + if not config.get("external_message_consumer", {}).get("enabled"): raise OperationalException( - "ProducerPairList requires external_message_consumer to be enabled.") + "ProducerPairList requires external_message_consumer to be enabled." + ) @property def needstickers(self) -> bool: @@ -74,21 +82,24 @@ class ProducerPairList(IPairList): "type": "string", "default": "default", "description": "Producer name", - "help": ("Name of the producer to use. Requires additional " - "external_message_consumer configuration.") + "help": ( + "Name of the producer to use. Requires additional " + "external_message_consumer configuration." + ), }, } def _filter_pairlist(self, pairlist: Optional[List[str]]): upstream_pairlist = self._pairlistmanager._dataprovider.get_producer_pairs( - self._producer_name) + self._producer_name + ) if pairlist is None: pairlist = self._pairlistmanager._dataprovider.get_producer_pairs(self._producer_name) pairs = list(dict.fromkeys(pairlist + upstream_pairlist)) if self._num_assets: - pairs = pairs[:self._num_assets] + pairs = pairs[: self._num_assets] return pairs diff --git a/freqtrade/plugins/pairlist/RemotePairList.py b/freqtrade/plugins/pairlist/RemotePairList.py index 0fe67968f..b15cfa96e 100644 --- a/freqtrade/plugins/pairlist/RemotePairList.py +++ b/freqtrade/plugins/pairlist/RemotePairList.py @@ -3,6 +3,7 @@ Remote PairList provider Provides pair list fetched from a remote source """ + import logging from pathlib import Path from typing import Any, Dict, List, Tuple @@ -24,51 +25,59 @@ logger = logging.getLogger(__name__) class RemotePairList(IPairList): - is_pairlist_generator = True - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - if 'number_assets' not in self._pairlistconfig: + if "number_assets" not in self._pairlistconfig: raise OperationalException( - '`number_assets` not specified. Please check your configuration ' - 'for "pairlist.config.number_assets"') + "`number_assets` not specified. Please check your configuration " + 'for "pairlist.config.number_assets"' + ) - if 'pairlist_url' not in self._pairlistconfig: + if "pairlist_url" not in self._pairlistconfig: raise OperationalException( - '`pairlist_url` not specified. Please check your configuration ' - 'for "pairlist.config.pairlist_url"') + "`pairlist_url` not specified. Please check your configuration " + 'for "pairlist.config.pairlist_url"' + ) - self._mode = self._pairlistconfig.get('mode', 'whitelist') - self._processing_mode = self._pairlistconfig.get('processing_mode', 'filter') - self._number_pairs = self._pairlistconfig['number_assets'] - self._refresh_period: int = self._pairlistconfig.get('refresh_period', 1800) - self._keep_pairlist_on_failure = self._pairlistconfig.get('keep_pairlist_on_failure', True) + self._mode = self._pairlistconfig.get("mode", "whitelist") + self._processing_mode = self._pairlistconfig.get("processing_mode", "filter") + self._number_pairs = self._pairlistconfig["number_assets"] + self._refresh_period: int = self._pairlistconfig.get("refresh_period", 1800) + self._keep_pairlist_on_failure = self._pairlistconfig.get("keep_pairlist_on_failure", True) self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) - self._pairlist_url = self._pairlistconfig.get('pairlist_url', '') - self._read_timeout = self._pairlistconfig.get('read_timeout', 60) - self._bearer_token = self._pairlistconfig.get('bearer_token', '') + self._pairlist_url = self._pairlistconfig.get("pairlist_url", "") + self._read_timeout = self._pairlistconfig.get("read_timeout", 60) + self._bearer_token = self._pairlistconfig.get("bearer_token", "") self._init_done = False - self._save_to_file = self._pairlistconfig.get('save_to_file', None) + self._save_to_file = self._pairlistconfig.get("save_to_file", None) self._last_pairlist: List[Any] = list() - if self._mode not in ['whitelist', 'blacklist']: + if self._mode not in ["whitelist", "blacklist"]: raise OperationalException( - '`mode` not configured correctly. Supported Modes ' - 'are "whitelist","blacklist"') + "`mode` not configured correctly. Supported Modes " 'are "whitelist","blacklist"' + ) - if self._processing_mode not in ['filter', 'append']: + if self._processing_mode not in ["filter", "append"]: raise OperationalException( - '`processing_mode` not configured correctly. Supported Modes ' - 'are "filter","append"') + "`processing_mode` not configured correctly. Supported Modes " + 'are "filter","append"' + ) - if self._pairlist_pos == 0 and self._mode == 'blacklist': + if self._pairlist_pos == 0 and self._mode == "blacklist": raise OperationalException( - 'A `blacklist` mode RemotePairList can not be on the first ' - 'position of your pairlist.') + "A `blacklist` mode RemotePairList can not be on the first " + "position of your pairlist." + ) @property def needstickers(self) -> bool: @@ -146,13 +155,15 @@ class RemotePairList(IPairList): } def process_json(self, jsonparse) -> List[str]: - - pairlist = jsonparse.get('pairs', []) - remote_refresh_period = int(jsonparse.get('refresh_period', self._refresh_period)) + pairlist = jsonparse.get("pairs", []) + remote_refresh_period = int(jsonparse.get("refresh_period", self._refresh_period)) if self._refresh_period < remote_refresh_period: - self.log_once(f'Refresh Period has been increased from {self._refresh_period}' - f' to minimum allowed: {remote_refresh_period} from Remote.', logger.info) + self.log_once( + f"Refresh Period has been increased from {self._refresh_period}" + f" to minimum allowed: {remote_refresh_period} from Remote.", + logger.info, + ) self._refresh_period = remote_refresh_period self._pair_cache = TTLCache(maxsize=1, ttl=remote_refresh_period) @@ -164,25 +175,21 @@ class RemotePairList(IPairList): def return_last_pairlist(self) -> List[str]: if self._keep_pairlist_on_failure: pairlist = self._last_pairlist - self.log_once('Keeping last fetched pairlist', logger.info) + self.log_once("Keeping last fetched pairlist", logger.info) else: pairlist = [] return pairlist def fetch_pairlist(self) -> Tuple[List[str], float]: - - headers = { - 'User-Agent': 'Freqtrade/' + __version__ + ' Remotepairlist' - } + headers = {"User-Agent": "Freqtrade/" + __version__ + " Remotepairlist"} if self._bearer_token: - headers['Authorization'] = f'Bearer {self._bearer_token}' + headers["Authorization"] = f"Bearer {self._bearer_token}" try: - response = requests.get(self._pairlist_url, headers=headers, - timeout=self._read_timeout) - content_type = response.headers.get('content-type') + response = requests.get(self._pairlist_url, headers=headers, timeout=self._read_timeout) + content_type = response.headers.get("content-type") time_elapsed = response.elapsed.total_seconds() if "application/json" in str(content_type): @@ -191,14 +198,16 @@ class RemotePairList(IPairList): try: pairlist = self.process_json(jsonparse) except Exception as e: - pairlist = self._handle_error(f'Failed processing JSON data: {type(e)}') + pairlist = self._handle_error(f"Failed processing JSON data: {type(e)}") else: - pairlist = self._handle_error(f'RemotePairList is not of type JSON.' - f' {self._pairlist_url}') + pairlist = self._handle_error( + f"RemotePairList is not of type JSON. {self._pairlist_url}" + ) except requests.exceptions.RequestException: - pairlist = self._handle_error(f'Was not able to fetch pairlist from:' - f' {self._pairlist_url}') + pairlist = self._handle_error( + f"Was not able to fetch pairlist from: {self._pairlist_url}" + ) time_elapsed = 0 @@ -219,7 +228,7 @@ class RemotePairList(IPairList): """ if self._init_done: - pairlist = self._pair_cache.get('pairlist') + pairlist = self._pair_cache.get("pairlist") if pairlist == [None]: # Valid but empty pairlist. return [] @@ -243,7 +252,7 @@ class RemotePairList(IPairList): jsonparse = rapidjson.load(json_file, parse_mode=CONFIG_PARSE_MODE) pairlist = self.process_json(jsonparse) except Exception as e: - pairlist = self._handle_error(f'processing JSON data: {type(e)}') + pairlist = self._handle_error(f"processing JSON data: {type(e)}") else: pairlist = self._handle_error(f"{self._pairlist_url} does not exist.") @@ -255,18 +264,18 @@ class RemotePairList(IPairList): pairlist = expand_pairlist(pairlist, list(self._exchange.get_markets().keys())) pairlist = self._whitelist_for_active_markets(pairlist) - pairlist = pairlist[:self._number_pairs] + pairlist = pairlist[: self._number_pairs] if pairlist: - self._pair_cache['pairlist'] = pairlist.copy() + self._pair_cache["pairlist"] = pairlist.copy() else: # If pairlist is empty, set a dummy value to avoid fetching again - self._pair_cache['pairlist'] = [None] + self._pair_cache["pairlist"] = [None] if time_elapsed != 0.0: - self.log_once(f'Pairlist Fetched in {time_elapsed} seconds.', logger.info) + self.log_once(f"Pairlist Fetched in {time_elapsed} seconds.", logger.info) else: - self.log_once('Fetched Pairlist.', logger.info) + self.log_once("Fetched Pairlist.", logger.info) self._last_pairlist = list(pairlist) @@ -276,12 +285,10 @@ class RemotePairList(IPairList): return pairlist def save_pairlist(self, pairlist: List[str], filename: str) -> None: - pairlist_data = { - "pairs": pairlist - } + pairlist_data = {"pairs": pairlist} try: file_path = Path(filename) - with file_path.open('w') as json_file: + with file_path.open("w") as json_file: rapidjson.dump(pairlist_data, json_file) logger.info(f"Processed pairlist saved to {filename}") except Exception as e: @@ -314,5 +321,5 @@ class RemotePairList(IPairList): if filtered: self.log_once(f"Blacklist - Filtered out pairs: {filtered}", logger.info) - merged_list = merged_list[:self._number_pairs] + merged_list = merged_list[: self._number_pairs] return merged_list diff --git a/freqtrade/plugins/pairlist/ShuffleFilter.py b/freqtrade/plugins/pairlist/ShuffleFilter.py index ce37dd8b5..d7f8a60bc 100644 --- a/freqtrade/plugins/pairlist/ShuffleFilter.py +++ b/freqtrade/plugins/pairlist/ShuffleFilter.py @@ -1,6 +1,7 @@ """ Shuffle pair list filter """ + import logging import random from typing import Any, Dict, List, Literal @@ -15,29 +16,34 @@ from freqtrade.util.periodic_cache import PeriodicCache logger = logging.getLogger(__name__) -ShuffleValues = Literal['candle', 'iteration'] +ShuffleValues = Literal["candle", "iteration"] class ShuffleFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) # Apply seed in backtesting mode to get comparable results, # but not in live modes to get a non-repeating order of pairs during live modes. - if config.get('runmode') in (RunMode.LIVE, RunMode.DRY_RUN): + if config.get("runmode") in (RunMode.LIVE, RunMode.DRY_RUN): self._seed = None logger.info("Live mode detected, not applying seed.") else: - self._seed = pairlistconfig.get('seed') + self._seed = pairlistconfig.get("seed") logger.info(f"Backtesting mode detected, applying seed value: {self._seed}") self._random = random.Random(self._seed) - self._shuffle_freq: ShuffleValues = pairlistconfig.get('shuffle_frequency', 'candle') + self._shuffle_freq: ShuffleValues = pairlistconfig.get("shuffle_frequency", "candle") self.__pairlist_cache = PeriodicCache( - maxsize=1000, ttl=timeframe_to_seconds(self._config['timeframe'])) + maxsize=1000, ttl=timeframe_to_seconds(self._config["timeframe"]) + ) @property def needstickers(self) -> bool: @@ -52,8 +58,9 @@ class ShuffleFilter(IPairList): """ Short whitelist method description - used for startup-messages """ - return (f"{self.name} - Shuffling pairs every {self._shuffle_freq}" + - (f", seed = {self._seed}." if self._seed is not None else ".")) + return f"{self.name} - Shuffling pairs every {self._shuffle_freq}" + ( + f", seed = {self._seed}." if self._seed is not None else "." + ) @staticmethod def description() -> str: @@ -87,7 +94,7 @@ class ShuffleFilter(IPairList): """ pairlist_bef = tuple(pairlist) pairlist_new = self.__pairlist_cache.get(pairlist_bef) - if pairlist_new and self._shuffle_freq == 'candle': + if pairlist_new and self._shuffle_freq == "candle": # Use cached pairlist. return pairlist_new # Shuffle is done inplace diff --git a/freqtrade/plugins/pairlist/SpreadFilter.py b/freqtrade/plugins/pairlist/SpreadFilter.py index ee41cbe66..4aca98f3e 100644 --- a/freqtrade/plugins/pairlist/SpreadFilter.py +++ b/freqtrade/plugins/pairlist/SpreadFilter.py @@ -1,6 +1,7 @@ """ Spread pair list filter """ + import logging from typing import Any, Dict, Optional @@ -14,16 +15,20 @@ logger = logging.getLogger(__name__) class SpreadFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._max_spread_ratio = pairlistconfig.get('max_spread_ratio', 0.005) + self._max_spread_ratio = pairlistconfig.get("max_spread_ratio", 0.005) self._enabled = self._max_spread_ratio != 0 - if not self._exchange.get_option('tickers_have_bid_ask'): + if not self._exchange.get_option("tickers_have_bid_ask"): raise OperationalException( f"{self.name} requires exchange to have bid/ask data for tickers, " "which is not available for the selected exchange / trading mode." @@ -42,8 +47,10 @@ class SpreadFilter(IPairList): """ Short whitelist method description - used for startup-messages """ - return (f"{self.name} - Filtering pairs with ask/bid diff above " - f"{self._max_spread_ratio:.2%}.") + return ( + f"{self.name} - Filtering pairs with ask/bid diff above " + f"{self._max_spread_ratio:.2%}." + ) @staticmethod def description() -> str: @@ -67,15 +74,18 @@ class SpreadFilter(IPairList): :param ticker: ticker dict as returned from ccxt.fetch_ticker :return: True if the pair can stay, false if it should be removed """ - if ticker and 'bid' in ticker and 'ask' in ticker and ticker['ask'] and ticker['bid']: - spread = 1 - ticker['bid'] / ticker['ask'] + if ticker and "bid" in ticker and "ask" in ticker and ticker["ask"] and ticker["bid"]: + spread = 1 - ticker["bid"] / ticker["ask"] if spread > self._max_spread_ratio: - self.log_once(f"Removed {pair} from whitelist, because spread " - f"{spread:.3%} > {self._max_spread_ratio:.3%}", - logger.info) + self.log_once( + f"Removed {pair} from whitelist, because spread " + f"{spread:.3%} > {self._max_spread_ratio:.3%}", + logger.info, + ) return False else: return True - self.log_once(f"Removed {pair} from whitelist due to invalid ticker data: {ticker}", - logger.info) + self.log_once( + f"Removed {pair} from whitelist due to invalid ticker data: {ticker}", logger.info + ) return False diff --git a/freqtrade/plugins/pairlist/StaticPairList.py b/freqtrade/plugins/pairlist/StaticPairList.py index 16fb97adb..ac1201ca3 100644 --- a/freqtrade/plugins/pairlist/StaticPairList.py +++ b/freqtrade/plugins/pairlist/StaticPairList.py @@ -3,6 +3,7 @@ Static Pair List provider Provides pair white list as it configured in config """ + import logging from copy import deepcopy from typing import Any, Dict, List @@ -16,15 +17,19 @@ logger = logging.getLogger(__name__) class StaticPairList(IPairList): - is_pairlist_generator = True - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._allow_inactive = self._pairlistconfig.get('allow_inactive', False) + self._allow_inactive = self._pairlistconfig.get("allow_inactive", False) @property def needstickers(self) -> bool: @@ -65,11 +70,12 @@ class StaticPairList(IPairList): """ if self._allow_inactive: return self.verify_whitelist( - self._config['exchange']['pair_whitelist'], logger.info, keep_invalid=True + self._config["exchange"]["pair_whitelist"], logger.info, keep_invalid=True ) else: return self._whitelist_for_active_markets( - self.verify_whitelist(self._config['exchange']['pair_whitelist'], logger.info)) + self.verify_whitelist(self._config["exchange"]["pair_whitelist"], logger.info) + ) def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: """ @@ -80,7 +86,7 @@ class StaticPairList(IPairList): :return: new whitelist """ pairlist_ = deepcopy(pairlist) - for pair in self._config['exchange']['pair_whitelist']: + for pair in self._config["exchange"]["pair_whitelist"]: if pair not in pairlist_: pairlist_.append(pair) return pairlist_ diff --git a/freqtrade/plugins/pairlist/VolatilityFilter.py b/freqtrade/plugins/pairlist/VolatilityFilter.py index cdd171e91..c4088196d 100644 --- a/freqtrade/plugins/pairlist/VolatilityFilter.py +++ b/freqtrade/plugins/pairlist/VolatilityFilter.py @@ -1,6 +1,7 @@ """ Volatility pairlist filter """ + import logging import sys from datetime import timedelta @@ -26,29 +27,38 @@ class VolatilityFilter(IPairList): Filters pairs by volatility """ - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._days = pairlistconfig.get('lookback_days', 10) - self._min_volatility = pairlistconfig.get('min_volatility', 0) - self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize) - self._refresh_period = pairlistconfig.get('refresh_period', 1440) - self._def_candletype = self._config['candle_type_def'] - self._sort_direction: Optional[str] = pairlistconfig.get('sort_direction', None) + self._days = pairlistconfig.get("lookback_days", 10) + self._min_volatility = pairlistconfig.get("min_volatility", 0) + self._max_volatility = pairlistconfig.get("max_volatility", sys.maxsize) + self._refresh_period = pairlistconfig.get("refresh_period", 1440) + self._def_candletype = self._config["candle_type_def"] + self._sort_direction: Optional[str] = pairlistconfig.get("sort_direction", None) self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period) - candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def']) + candle_limit = exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"]) if self._days < 1: raise OperationalException("VolatilityFilter requires lookback_days to be >= 1") if self._days > candle_limit: - raise OperationalException("VolatilityFilter requires lookback_days to not " - f"exceed exchange max request size ({candle_limit})") - if self._sort_direction not in [None, 'asc', 'desc']: - raise OperationalException("VolatilityFilter requires sort_direction to be " - "either None (undefined), 'asc' or 'desc'") + raise OperationalException( + "VolatilityFilter requires lookback_days to not " + f"exceed exchange max request size ({candle_limit})" + ) + if self._sort_direction not in [None, "asc", "desc"]: + raise OperationalException( + "VolatilityFilter requires sort_direction to be " + "either None (undefined), 'asc' or 'desc'" + ) @property def needstickers(self) -> bool: @@ -63,9 +73,11 @@ class VolatilityFilter(IPairList): """ Short whitelist method description - used for startup-messages """ - return (f"{self.name} - Filtering pairs with volatility range " - f"{self._min_volatility}-{self._max_volatility} " - f" the last {self._days} {plural(self._days, 'day')}.") + return ( + f"{self.name} - Filtering pairs with volatility range " + f"{self._min_volatility}-{self._max_volatility} " + f" the last {self._days} {plural(self._days, 'day')}." + ) @staticmethod def description() -> str: @@ -99,7 +111,7 @@ class VolatilityFilter(IPairList): "description": "Sort pairlist", "help": "Sort Pairlist ascending or descending by volatility.", }, - **IPairList.refresh_period_parameter() + **IPairList.refresh_period_parameter(), } def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: @@ -110,7 +122,8 @@ class VolatilityFilter(IPairList): :return: new allowlist """ needed_pairs: ListPairsWithTimeframes = [ - (p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache] + (p, "1d", self._def_candletype) for p in pairlist if p not in self._pair_cache + ] since_ms = dt_ts(dt_floor_day(dt_now()) - timedelta(days=self._days)) candles = self._exchange.refresh_ohlcv_with_cache(needed_pairs, since_ms=since_ms) @@ -118,7 +131,7 @@ class VolatilityFilter(IPairList): resulting_pairlist: List[str] = [] volatilitys: Dict[str, float] = {} for p in pairlist: - daily_candles = candles.get((p, '1d', self._def_candletype), None) + daily_candles = candles.get((p, "1d", self._def_candletype), None) volatility_avg = self._calculate_volatility(p, daily_candles) @@ -132,18 +145,20 @@ class VolatilityFilter(IPairList): self.log_once(f"Removed {p} from whitelist, no candles found.", logger.info) if self._sort_direction: - resulting_pairlist = sorted(resulting_pairlist, - key=lambda p: volatilitys[p], - reverse=self._sort_direction == 'desc') + resulting_pairlist = sorted( + resulting_pairlist, + key=lambda p: volatilitys[p], + reverse=self._sort_direction == "desc", + ) return resulting_pairlist - def _calculate_volatility(self, pair: str, daily_candles: DataFrame) -> Optional[float]: + def _calculate_volatility(self, pair: str, daily_candles: DataFrame) -> Optional[float]: # Check symbol in cache if (volatility_avg := self._pair_cache.get(pair, None)) is not None: return volatility_avg if daily_candles is not None and not daily_candles.empty: - returns = (np.log(daily_candles["close"].shift(1) / daily_candles["close"])) + returns = np.log(daily_candles["close"].shift(1) / daily_candles["close"]) returns.fillna(0, inplace=True) volatility_series = returns.rolling(window=self._days).std() * np.sqrt(self._days) @@ -165,11 +180,13 @@ class VolatilityFilter(IPairList): if self._min_volatility <= volatility_avg <= self._max_volatility: result = True else: - self.log_once(f"Removed {pair} from whitelist, because volatility " - f"over {self._days} {plural(self._days, 'day')} " - f"is: {volatility_avg:.3f} " - f"which is not in the configured range of " - f"{self._min_volatility}-{self._max_volatility}.", - logger.info) + self.log_once( + f"Removed {pair} from whitelist, because volatility " + f"over {self._days} {plural(self._days, 'day')} " + f"is: {volatility_avg:.3f} " + f"which is not in the configured range of " + f"{self._min_volatility}-{self._max_volatility}.", + logger.info, + ) result = False return result diff --git a/freqtrade/plugins/pairlist/VolumePairList.py b/freqtrade/plugins/pairlist/VolumePairList.py index acc6ad7e1..f9a0dd6b1 100644 --- a/freqtrade/plugins/pairlist/VolumePairList.py +++ b/freqtrade/plugins/pairlist/VolumePairList.py @@ -3,6 +3,7 @@ Volume PairList provider Provides dynamic pair list based on trade volumes """ + import logging from datetime import timedelta from typing import Any, Dict, List, Literal @@ -20,45 +21,50 @@ from freqtrade.util import dt_now, format_ms_time logger = logging.getLogger(__name__) -SORT_VALUES = ['quoteVolume'] +SORT_VALUES = ["quoteVolume"] class VolumePairList(IPairList): - is_pairlist_generator = True - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - if 'number_assets' not in self._pairlistconfig: + if "number_assets" not in self._pairlistconfig: raise OperationalException( - '`number_assets` not specified. Please check your configuration ' - 'for "pairlist.config.number_assets"') + "`number_assets` not specified. Please check your configuration " + 'for "pairlist.config.number_assets"' + ) - self._stake_currency = config['stake_currency'] - self._number_pairs = self._pairlistconfig['number_assets'] - self._sort_key: Literal['quoteVolume'] = self._pairlistconfig.get('sort_key', 'quoteVolume') - self._min_value = self._pairlistconfig.get('min_value', 0) + self._stake_currency = config["stake_currency"] + self._number_pairs = self._pairlistconfig["number_assets"] + self._sort_key: Literal["quoteVolume"] = self._pairlistconfig.get("sort_key", "quoteVolume") + self._min_value = self._pairlistconfig.get("min_value", 0) self._max_value = self._pairlistconfig.get("max_value", None) - self._refresh_period = self._pairlistconfig.get('refresh_period', 1800) + self._refresh_period = self._pairlistconfig.get("refresh_period", 1800) self._pair_cache: TTLCache = TTLCache(maxsize=1, ttl=self._refresh_period) - self._lookback_days = self._pairlistconfig.get('lookback_days', 0) - self._lookback_timeframe = self._pairlistconfig.get('lookback_timeframe', '1d') - self._lookback_period = self._pairlistconfig.get('lookback_period', 0) - self._def_candletype = self._config['candle_type_def'] + self._lookback_days = self._pairlistconfig.get("lookback_days", 0) + self._lookback_timeframe = self._pairlistconfig.get("lookback_timeframe", "1d") + self._lookback_period = self._pairlistconfig.get("lookback_period", 0) + self._def_candletype = self._config["candle_type_def"] if (self._lookback_days > 0) & (self._lookback_period > 0): raise OperationalException( - 'Ambigous configuration: lookback_days and lookback_period both set in pairlist ' - 'config. Please set lookback_days only or lookback_period and lookback_timeframe ' - 'and restart the bot.' + "Ambiguous configuration: lookback_days and lookback_period both set in pairlist " + "config. Please set lookback_days only or lookback_period and lookback_timeframe " + "and restart the bot." ) # overwrite lookback timeframe and days when lookback_days is set if self._lookback_days > 0: - self._lookback_timeframe = '1d' + self._lookback_timeframe = "1d" self._lookback_period = self._lookback_days # get timeframe in minutes and seconds @@ -70,14 +76,15 @@ class VolumePairList(IPairList): if self._use_range & (self._refresh_period < _tf_in_sec): raise OperationalException( - f'Refresh period of {self._refresh_period} seconds is smaller than one ' - f'timeframe of {self._lookback_timeframe}. Please adjust refresh_period ' - f'to at least {_tf_in_sec} and restart the bot.' + f"Refresh period of {self._refresh_period} seconds is smaller than one " + f"timeframe of {self._lookback_timeframe}. Please adjust refresh_period " + f"to at least {_tf_in_sec} and restart the bot." ) - if (not self._use_range and not ( - self._exchange.exchange_has('fetchTickers') - and self._exchange.get_option("tickers_have_quoteVolume"))): + if not self._use_range and not ( + self._exchange.exchange_has("fetchTickers") + and self._exchange.get_option("tickers_have_quoteVolume") + ): raise OperationalException( "Exchange does not support dynamic whitelist in this configuration. " "Please edit your config and either remove Volumepairlist, " @@ -85,16 +92,18 @@ class VolumePairList(IPairList): ) if not self._validate_keys(self._sort_key): - raise OperationalException( - f'key {self._sort_key} not in {SORT_VALUES}') + raise OperationalException(f"key {self._sort_key} not in {SORT_VALUES}") candle_limit = exchange.ohlcv_candle_limit( - self._lookback_timeframe, self._config['candle_type_def']) + self._lookback_timeframe, self._config["candle_type_def"] + ) if self._lookback_period < 0: raise OperationalException("VolumeFilter requires lookback_period to be >= 0") if self._lookback_period > candle_limit: - raise OperationalException("VolumeFilter requires lookback_period to not " - f"exceed exchange max request size ({candle_limit})") + raise OperationalException( + "VolumeFilter requires lookback_period to not " + f"exceed exchange max request size ({candle_limit})" + ) @property def needstickers(self) -> bool: @@ -175,30 +184,37 @@ class VolumePairList(IPairList): """ # Generate dynamic whitelist # Must always run if this pairlist is not the first in the list. - pairlist = self._pair_cache.get('pairlist') + pairlist = self._pair_cache.get("pairlist") if pairlist: # Item found - no refresh necessary return pairlist.copy() else: # Use fresh pairlist # Check if pair quote currency equals to the stake currency. - _pairlist = [k for k in self._exchange.get_markets( - quote_currencies=[self._stake_currency], - tradable_only=True, active_only=True).keys()] + _pairlist = [ + k + for k in self._exchange.get_markets( + quote_currencies=[self._stake_currency], tradable_only=True, active_only=True + ).keys() + ] # No point in testing for blacklisted pairs... _pairlist = self.verify_blacklist(_pairlist, logger.info) if not self._use_range: filtered_tickers = [ - v for k, v in tickers.items() - if (self._exchange.get_pair_quote_currency(k) == self._stake_currency + v + for k, v in tickers.items() + if ( + self._exchange.get_pair_quote_currency(k) == self._stake_currency and (self._use_range or v.get(self._sort_key) is not None) - and v['symbol'] in _pairlist)] - pairlist = [s['symbol'] for s in filtered_tickers] + and v["symbol"] in _pairlist + ) + ] + pairlist = [s["symbol"] for s in filtered_tickers] else: pairlist = _pairlist pairlist = self.filter_pairlist(pairlist, tickers) - self._pair_cache['pairlist'] = pairlist.copy() + self._pair_cache["pairlist"] = pairlist.copy() return pairlist @@ -212,81 +228,95 @@ class VolumePairList(IPairList): """ if self._use_range: # Create bare minimum from tickers structure. - filtered_tickers: List[Dict[str, Any]] = [{'symbol': k} for k in pairlist] + filtered_tickers: List[Dict[str, Any]] = [{"symbol": k} for k in pairlist] # get lookback period in ms, for exchange ohlcv fetch - since_ms = int(timeframe_to_prev_date( - self._lookback_timeframe, - dt_now() + timedelta( - minutes=-(self._lookback_period * self._tf_in_min) - self._tf_in_min) - ).timestamp()) * 1000 + since_ms = ( + int( + timeframe_to_prev_date( + self._lookback_timeframe, + dt_now() + + timedelta( + minutes=-(self._lookback_period * self._tf_in_min) - self._tf_in_min + ), + ).timestamp() + ) + * 1000 + ) - to_ms = int(timeframe_to_prev_date( - self._lookback_timeframe, - dt_now() - timedelta(minutes=self._tf_in_min) - ).timestamp()) * 1000 + to_ms = ( + int( + timeframe_to_prev_date( + self._lookback_timeframe, dt_now() - timedelta(minutes=self._tf_in_min) + ).timestamp() + ) + * 1000 + ) # todo: utc date output for starting date - self.log_once(f"Using volume range of {self._lookback_period} candles, timeframe: " - f"{self._lookback_timeframe}, starting from {format_ms_time(since_ms)} " - f"till {format_ms_time(to_ms)}", logger.info) + self.log_once( + f"Using volume range of {self._lookback_period} candles, timeframe: " + f"{self._lookback_timeframe}, starting from {format_ms_time(since_ms)} " + f"till {format_ms_time(to_ms)}", + logger.info, + ) needed_pairs: ListPairsWithTimeframes = [ - (p, self._lookback_timeframe, self._def_candletype) for p in - [s['symbol'] for s in filtered_tickers] + (p, self._lookback_timeframe, self._def_candletype) + for p in [s["symbol"] for s in filtered_tickers] if p not in self._pair_cache ] candles = self._exchange.refresh_ohlcv_with_cache(needed_pairs, since_ms) for i, p in enumerate(filtered_tickers): - contract_size = self._exchange.markets[p['symbol']].get('contractSize', 1.0) or 1.0 - pair_candles = candles[ - (p['symbol'], self._lookback_timeframe, self._def_candletype) - ] if ( - p['symbol'], self._lookback_timeframe, self._def_candletype - ) in candles else None + contract_size = self._exchange.markets[p["symbol"]].get("contractSize", 1.0) or 1.0 + pair_candles = ( + candles[(p["symbol"], self._lookback_timeframe, self._def_candletype)] + if (p["symbol"], self._lookback_timeframe, self._def_candletype) in candles + else None + ) # in case of candle data calculate typical price and quoteVolume for candle if pair_candles is not None and not pair_candles.empty: if self._exchange.get_option("ohlcv_volume_currency") == "base": - pair_candles['typical_price'] = (pair_candles['high'] + pair_candles['low'] - + pair_candles['close']) / 3 + pair_candles["typical_price"] = ( + pair_candles["high"] + pair_candles["low"] + pair_candles["close"] + ) / 3 - pair_candles['quoteVolume'] = ( - pair_candles['volume'] * pair_candles['typical_price'] - * contract_size + pair_candles["quoteVolume"] = ( + pair_candles["volume"] * pair_candles["typical_price"] * contract_size ) else: # Exchange ohlcv data is in quote volume already. - pair_candles['quoteVolume'] = pair_candles['volume'] + pair_candles["quoteVolume"] = pair_candles["volume"] # ensure that a rolling sum over the lookback_period is built # if pair_candles contains more candles than lookback_period - quoteVolume = (pair_candles['quoteVolume'] - .rolling(self._lookback_period) - .sum() - .fillna(0) - .iloc[-1]) + quoteVolume = ( + pair_candles["quoteVolume"] + .rolling(self._lookback_period) + .sum() + .fillna(0) + .iloc[-1] + ) # replace quoteVolume with range quoteVolume sum calculated above - filtered_tickers[i]['quoteVolume'] = quoteVolume + filtered_tickers[i]["quoteVolume"] = quoteVolume else: - filtered_tickers[i]['quoteVolume'] = 0 + filtered_tickers[i]["quoteVolume"] = 0 else: # Tickers mode - filter based on incoming pairlist. filtered_tickers = [v for k, v in tickers.items() if k in pairlist] if self._min_value > 0: - filtered_tickers = [ - v for v in filtered_tickers if v[self._sort_key] > self._min_value] + filtered_tickers = [v for v in filtered_tickers if v[self._sort_key] > self._min_value] if self._max_value is not None: - filtered_tickers = [ - v for v in filtered_tickers if v[self._sort_key] < self._max_value] + filtered_tickers = [v for v in filtered_tickers if v[self._sort_key] < self._max_value] sorted_tickers = sorted(filtered_tickers, reverse=True, key=lambda t: t[self._sort_key]) # Validate whitelist to only have active market pairs - pairs = self._whitelist_for_active_markets([s['symbol'] for s in sorted_tickers]) + pairs = self._whitelist_for_active_markets([s["symbol"] for s in sorted_tickers]) pairs = self.verify_blacklist(pairs, logmethod=logger.info) # Limit pairlist to the requested number of pairs - pairs = pairs[:self._number_pairs] + pairs = pairs[: self._number_pairs] return pairs diff --git a/freqtrade/plugins/pairlist/pairlist_helpers.py b/freqtrade/plugins/pairlist/pairlist_helpers.py index ca8fdc200..9bbd85182 100644 --- a/freqtrade/plugins/pairlist/pairlist_helpers.py +++ b/freqtrade/plugins/pairlist/pairlist_helpers.py @@ -4,8 +4,9 @@ from typing import List from freqtrade.constants import Config -def expand_pairlist(wildcardpl: List[str], available_pairs: List[str], - keep_invalid: bool = False) -> List[str]: +def expand_pairlist( + wildcardpl: List[str], available_pairs: List[str], keep_invalid: bool = False +) -> List[str]: """ Expand pairlist potentially containing wildcards based on available markets. This will implicitly filter all pairs in the wildcard-list which are not in available_pairs. @@ -20,34 +21,29 @@ def expand_pairlist(wildcardpl: List[str], available_pairs: List[str], for pair_wc in wildcardpl: try: comp = re.compile(pair_wc, re.IGNORECASE) - result_partial = [ - pair for pair in available_pairs if re.fullmatch(comp, pair) - ] + result_partial = [pair for pair in available_pairs if re.fullmatch(comp, pair)] # Add all matching pairs. # If there are no matching pairs (Pair not on exchange) keep it. result += result_partial or [pair_wc] except re.error as err: raise ValueError(f"Wildcard error in {pair_wc}, {err}") - result = [element for element in result if re.fullmatch(r'^[A-Za-z0-9:/-]+$', element)] + result = [element for element in result if re.fullmatch(r"^[A-Za-z0-9:/-]+$", element)] else: for pair_wc in wildcardpl: try: comp = re.compile(pair_wc, re.IGNORECASE) - result += [ - pair for pair in available_pairs if re.fullmatch(comp, pair) - ] + result += [pair for pair in available_pairs if re.fullmatch(comp, pair)] except re.error as err: raise ValueError(f"Wildcard error in {pair_wc}, {err}") return result def dynamic_expand_pairlist(config: Config, markets: List[str]) -> List[str]: - expanded_pairs = expand_pairlist(config['pairs'], markets) - if config.get('freqai', {}).get('enabled', False): - corr_pairlist = config['freqai']['feature_parameters']['include_corr_pairlist'] - expanded_pairs += [pair for pair in corr_pairlist - if pair not in config['pairs']] + expanded_pairs = expand_pairlist(config["pairs"], markets) + if config.get("freqai", {}).get("enabled", False): + corr_pairlist = config["freqai"]["feature_parameters"]["include_corr_pairlist"] + expanded_pairs += [pair for pair in corr_pairlist if pair not in config["pairs"]] return expanded_pairs diff --git a/freqtrade/plugins/pairlist/rangestabilityfilter.py b/freqtrade/plugins/pairlist/rangestabilityfilter.py index 0480f60d0..54c6a536e 100644 --- a/freqtrade/plugins/pairlist/rangestabilityfilter.py +++ b/freqtrade/plugins/pairlist/rangestabilityfilter.py @@ -1,6 +1,7 @@ """ Rate of change pairlist filter """ + import logging from datetime import timedelta from typing import Any, Dict, List, Optional @@ -20,30 +21,38 @@ logger = logging.getLogger(__name__) class RangeStabilityFilter(IPairList): - - def __init__(self, exchange, pairlistmanager, - config: Config, pairlistconfig: Dict[str, Any], - pairlist_pos: int) -> None: + def __init__( + self, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: Dict[str, Any], + pairlist_pos: int, + ) -> None: super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos) - self._days = pairlistconfig.get('lookback_days', 10) - self._min_rate_of_change = pairlistconfig.get('min_rate_of_change', 0.01) - self._max_rate_of_change = pairlistconfig.get('max_rate_of_change') - self._refresh_period = pairlistconfig.get('refresh_period', 86400) - self._def_candletype = self._config['candle_type_def'] - self._sort_direction: Optional[str] = pairlistconfig.get('sort_direction', None) + self._days = pairlistconfig.get("lookback_days", 10) + self._min_rate_of_change = pairlistconfig.get("min_rate_of_change", 0.01) + self._max_rate_of_change = pairlistconfig.get("max_rate_of_change") + self._refresh_period = pairlistconfig.get("refresh_period", 86400) + self._def_candletype = self._config["candle_type_def"] + self._sort_direction: Optional[str] = pairlistconfig.get("sort_direction", None) self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period) - candle_limit = exchange.ohlcv_candle_limit('1d', self._config['candle_type_def']) + candle_limit = exchange.ohlcv_candle_limit("1d", self._config["candle_type_def"]) if self._days < 1: raise OperationalException("RangeStabilityFilter requires lookback_days to be >= 1") if self._days > candle_limit: - raise OperationalException("RangeStabilityFilter requires lookback_days to not " - f"exceed exchange max request size ({candle_limit})") - if self._sort_direction not in [None, 'asc', 'desc']: - raise OperationalException("RangeStabilityFilter requires sort_direction to be " - "either None (undefined), 'asc' or 'desc'") + raise OperationalException( + "RangeStabilityFilter requires lookback_days to not " + f"exceed exchange max request size ({candle_limit})" + ) + if self._sort_direction not in [None, "asc", "desc"]: + raise OperationalException( + "RangeStabilityFilter requires sort_direction to be " + "either None (undefined), 'asc' or 'desc'" + ) @property def needstickers(self) -> bool: @@ -60,10 +69,12 @@ class RangeStabilityFilter(IPairList): """ max_rate_desc = "" if self._max_rate_of_change: - max_rate_desc = (f" and above {self._max_rate_of_change}") - return (f"{self.name} - Filtering pairs with rate of change below " - f"{self._min_rate_of_change}{max_rate_desc} over the " - f"last {plural(self._days, 'day')}.") + max_rate_desc = f" and above {self._max_rate_of_change}" + return ( + f"{self.name} - Filtering pairs with rate of change below " + f"{self._min_rate_of_change}{max_rate_desc} over the " + f"last {plural(self._days, 'day')}." + ) @staticmethod def description() -> str: @@ -97,7 +108,7 @@ class RangeStabilityFilter(IPairList): "description": "Sort pairlist", "help": "Sort Pairlist ascending or descending by rate of change.", }, - **IPairList.refresh_period_parameter() + **IPairList.refresh_period_parameter(), } def filter_pairlist(self, pairlist: List[str], tickers: Tickers) -> List[str]: @@ -108,7 +119,8 @@ class RangeStabilityFilter(IPairList): :return: new allowlist """ needed_pairs: ListPairsWithTimeframes = [ - (p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache] + (p, "1d", self._def_candletype) for p in pairlist if p not in self._pair_cache + ] since_ms = dt_ts(dt_floor_day(dt_now()) - timedelta(days=self._days + 1)) candles = self._exchange.refresh_ohlcv_with_cache(needed_pairs, since_ms=since_ms) @@ -117,7 +129,7 @@ class RangeStabilityFilter(IPairList): pct_changes: Dict[str, float] = {} for p in pairlist: - daily_candles = candles.get((p, '1d', self._def_candletype), None) + daily_candles = candles.get((p, "1d", self._def_candletype), None) pct_change = self._calculate_rate_of_change(p, daily_candles) @@ -129,9 +141,11 @@ class RangeStabilityFilter(IPairList): self.log_once(f"Removed {p} from whitelist, no candles found.", logger.info) if self._sort_direction: - resulting_pairlist = sorted(resulting_pairlist, - key=lambda p: pct_changes[p], - reverse=self._sort_direction == 'desc') + resulting_pairlist = sorted( + resulting_pairlist, + key=lambda p: pct_changes[p], + reverse=self._sort_direction == "desc", + ) return resulting_pairlist def _calculate_rate_of_change(self, pair: str, daily_candles: DataFrame) -> Optional[float]: @@ -139,9 +153,8 @@ class RangeStabilityFilter(IPairList): if (pct_change := self._pair_cache.get(pair, None)) is not None: return pct_change if daily_candles is not None and not daily_candles.empty: - - highest_high = daily_candles['high'].max() - lowest_low = daily_candles['low'].min() + highest_high = daily_candles["high"].max() + lowest_low = daily_candles["low"].min() pct_change = ((highest_high - lowest_low) / lowest_low) if lowest_low > 0 else 0 self._pair_cache[pair] = pct_change return pct_change @@ -158,10 +171,12 @@ class RangeStabilityFilter(IPairList): result = True if pct_change < self._min_rate_of_change: - self.log_once(f"Removed {pair} from whitelist, because rate of change " - f"over {self._days} {plural(self._days, 'day')} is {pct_change:.3f}, " - f"which is below the threshold of {self._min_rate_of_change}.", - logger.info) + self.log_once( + f"Removed {pair} from whitelist, because rate of change " + f"over {self._days} {plural(self._days, 'day')} is {pct_change:.3f}, " + f"which is below the threshold of {self._min_rate_of_change}.", + logger.info, + ) result = False if self._max_rate_of_change: if pct_change > self._max_rate_of_change: @@ -169,6 +184,7 @@ class RangeStabilityFilter(IPairList): f"Removed {pair} from whitelist, because rate of change " f"over {self._days} {plural(self._days, 'day')} is {pct_change:.3f}, " f"which is above the threshold of {self._max_rate_of_change}.", - logger.info) + logger.info, + ) result = False return result diff --git a/freqtrade/plugins/pairlistmanager.py b/freqtrade/plugins/pairlistmanager.py index b300f06be..a6afd5e64 100644 --- a/freqtrade/plugins/pairlistmanager.py +++ b/freqtrade/plugins/pairlistmanager.py @@ -1,6 +1,7 @@ """ PairList manager class """ + import logging from functools import partial from typing import Dict, List, Optional @@ -22,24 +23,24 @@ logger = logging.getLogger(__name__) class PairListManager(LoggingMixin): - def __init__( - self, exchange, config: Config, dataprovider: Optional[DataProvider] = None) -> None: + self, exchange, config: Config, dataprovider: Optional[DataProvider] = None + ) -> None: self._exchange = exchange self._config = config - self._whitelist = self._config['exchange'].get('pair_whitelist') - self._blacklist = self._config['exchange'].get('pair_blacklist', []) + self._whitelist = self._config["exchange"].get("pair_whitelist") + self._blacklist = self._config["exchange"].get("pair_blacklist", []) self._pairlist_handlers: List[IPairList] = [] self._tickers_needed = False self._dataprovider: Optional[DataProvider] = dataprovider - for pairlist_handler_config in self._config.get('pairlists', []): + for pairlist_handler_config in self._config.get("pairlists", []): pairlist_handler = PairListResolver.load_pairlist( - pairlist_handler_config['method'], + pairlist_handler_config["method"], exchange=exchange, pairlistmanager=self, config=config, pairlistconfig=pairlist_handler_config, - pairlist_pos=len(self._pairlist_handlers) + pairlist_pos=len(self._pairlist_handlers), ) self._tickers_needed |= pairlist_handler.needstickers self._pairlist_handlers.append(pairlist_handler) @@ -47,7 +48,7 @@ class PairListManager(LoggingMixin): if not self._pairlist_handlers: raise OperationalException("No Pairlist Handlers defined") - if self._tickers_needed and not self._exchange.exchange_has('fetchTickers'): + if self._tickers_needed and not self._exchange.exchange_has("fetchTickers"): invalid = ". ".join([p.name for p in self._pairlist_handlers if p.needstickers]) raise OperationalException( @@ -56,7 +57,7 @@ class PairListManager(LoggingMixin): f"{invalid}." ) - refresh_period = config.get('pairlist_refresh_period', 3600) + refresh_period = config.get("pairlist_refresh_period", 3600) LoggingMixin.__init__(self, logger, refresh_period) @property @@ -135,8 +136,9 @@ class PairListManager(LoggingMixin): pairlist.remove(pair) return pairlist - def verify_whitelist(self, pairlist: List[str], logmethod, - keep_invalid: bool = False) -> List[str]: + def verify_whitelist( + self, pairlist: List[str], logmethod, keep_invalid: bool = False + ) -> List[str]: """ Verify and remove items from pairlist - returning a filtered pairlist. Logs a warning or info depending on `aswarning`. @@ -155,14 +157,16 @@ class PairListManager(LoggingMixin): return whitelist def create_pair_list( - self, pairs: List[str], timeframe: Optional[str] = None) -> ListPairsWithTimeframes: + self, pairs: List[str], timeframe: Optional[str] = None + ) -> ListPairsWithTimeframes: """ Create list of pair tuples with (pair, timeframe) """ return [ ( pair, - timeframe or self._config['timeframe'], - self._config.get('candle_type_def', CandleType.SPOT) - ) for pair in pairs + timeframe or self._config["timeframe"], + self._config.get("candle_type_def", CandleType.SPOT), + ) + for pair in pairs ] diff --git a/freqtrade/plugins/protectionmanager.py b/freqtrade/plugins/protectionmanager.py index 6e55ade11..4f60ae0e0 100644 --- a/freqtrade/plugins/protectionmanager.py +++ b/freqtrade/plugins/protectionmanager.py @@ -1,6 +1,7 @@ """ Protection manager class """ + import logging from datetime import datetime, timezone from typing import Dict, List, Optional @@ -16,14 +17,13 @@ logger = logging.getLogger(__name__) class ProtectionManager: - def __init__(self, config: Config, protections: List) -> None: self._config = config self._protection_handlers: List[IProtection] = [] for protection_handler_config in protections: protection_handler = ProtectionResolver.load_protection( - protection_handler_config['method'], + protection_handler_config["method"], config=config, protection_config=protection_handler_config, ) @@ -45,8 +45,9 @@ class ProtectionManager: """ return [{p.name: p.short_desc()} for p in self._protection_handlers] - def global_stop(self, now: Optional[datetime] = None, - side: LongShort = 'long') -> Optional[PairLock]: + def global_stop( + self, now: Optional[datetime] = None, side: LongShort = "long" + ) -> Optional[PairLock]: if not now: now = datetime.now(timezone.utc) result = None @@ -56,20 +57,22 @@ class ProtectionManager: if lock and lock.until: if not PairLocks.is_global_lock(lock.until, side=lock.lock_side): result = PairLocks.lock_pair( - '*', lock.until, lock.reason, now=now, side=lock.lock_side) + "*", lock.until, lock.reason, now=now, side=lock.lock_side + ) return result - def stop_per_pair(self, pair, now: Optional[datetime] = None, - side: LongShort = 'long') -> Optional[PairLock]: + def stop_per_pair( + self, pair, now: Optional[datetime] = None, side: LongShort = "long" + ) -> Optional[PairLock]: if not now: now = datetime.now(timezone.utc) result = None for protection_handler in self._protection_handlers: if protection_handler.has_local_stop: - lock = protection_handler.stop_per_pair( - pair=pair, date_now=now, side=side) + lock = protection_handler.stop_per_pair(pair=pair, date_now=now, side=side) if lock and lock.until: if not PairLocks.is_pair_locked(pair, lock.until, lock.lock_side): result = PairLocks.lock_pair( - pair, lock.until, lock.reason, now=now, side=lock.lock_side) + pair, lock.until, lock.reason, now=now, side=lock.lock_side + ) return result diff --git a/freqtrade/plugins/protections/cooldown_period.py b/freqtrade/plugins/protections/cooldown_period.py index 426b8f1b6..2948d17d0 100644 --- a/freqtrade/plugins/protections/cooldown_period.py +++ b/freqtrade/plugins/protections/cooldown_period.py @@ -1,4 +1,3 @@ - import logging from datetime import datetime, timedelta from typing import Optional @@ -12,7 +11,6 @@ logger = logging.getLogger(__name__) class CooldownPeriod(IProtection): - has_global_stop: bool = False has_local_stop: bool = True @@ -20,13 +18,13 @@ class CooldownPeriod(IProtection): """ LockReason to use """ - return (f'Cooldown period for {self.stop_duration_str}.') + return f"Cooldown period for {self.stop_duration_str}." def short_desc(self) -> str: """ Short method description - used for startup-messages """ - return (f"{self.name} - Cooldown period of {self.stop_duration_str}.") + return f"{self.name} - Cooldown period of {self.stop_duration_str}." def _cooldown_period(self, pair: str, date_now: datetime) -> Optional[ProtectionReturn]: """ @@ -66,7 +64,8 @@ class CooldownPeriod(IProtection): return None def stop_per_pair( - self, pair: str, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: + self, pair: str, date_now: datetime, side: LongShort + ) -> Optional[ProtectionReturn]: """ Stops trading (position entering) for this pair This must evaluate to true for the whole period of the "cooldown period". diff --git a/freqtrade/plugins/protections/iprotection.py b/freqtrade/plugins/protections/iprotection.py index 378eccfef..204a8b827 100644 --- a/freqtrade/plugins/protections/iprotection.py +++ b/freqtrade/plugins/protections/iprotection.py @@ -1,4 +1,3 @@ - import logging from abc import ABC, abstractmethod from dataclasses import dataclass @@ -20,11 +19,10 @@ class ProtectionReturn: lock: bool until: datetime reason: Optional[str] - lock_side: str = '*' + lock_side: str = "*" class IProtection(LoggingMixin, ABC): - # Can globally stop the bot has_global_stop: bool = False # Can stop trading for one pair @@ -36,19 +34,19 @@ class IProtection(LoggingMixin, ABC): self._stop_duration_candles: Optional[int] = None self._lookback_period_candles: Optional[int] = None - tf_in_min = timeframe_to_minutes(config['timeframe']) - if 'stop_duration_candles' in protection_config: - self._stop_duration_candles = int(protection_config.get('stop_duration_candles', 1)) - self._stop_duration = (tf_in_min * self._stop_duration_candles) + tf_in_min = timeframe_to_minutes(config["timeframe"]) + if "stop_duration_candles" in protection_config: + self._stop_duration_candles = int(protection_config.get("stop_duration_candles", 1)) + self._stop_duration = tf_in_min * self._stop_duration_candles else: self._stop_duration_candles = None - self._stop_duration = int(protection_config.get('stop_duration', 60)) - if 'lookback_period_candles' in protection_config: - self._lookback_period_candles = int(protection_config.get('lookback_period_candles', 1)) + self._stop_duration = int(protection_config.get("stop_duration", 60)) + if "lookback_period_candles" in protection_config: + self._lookback_period_candles = int(protection_config.get("lookback_period_candles", 1)) self._lookback_period = tf_in_min * self._lookback_period_candles else: self._lookback_period_candles = None - self._lookback_period = int(protection_config.get('lookback_period', 60)) + self._lookback_period = int(protection_config.get("lookback_period", 60)) LoggingMixin.__init__(self, logger) @@ -62,11 +60,12 @@ class IProtection(LoggingMixin, ABC): Output configured stop duration in either candles or minutes """ if self._stop_duration_candles: - return (f"{self._stop_duration_candles} " - f"{plural(self._stop_duration_candles, 'candle', 'candles')}") + return ( + f"{self._stop_duration_candles} " + f"{plural(self._stop_duration_candles, 'candle', 'candles')}" + ) else: - return (f"{self._stop_duration} " - f"{plural(self._stop_duration, 'minute', 'minutes')}") + return f"{self._stop_duration} {plural(self._stop_duration, 'minute', 'minutes')}" @property def lookback_period_str(self) -> str: @@ -74,11 +73,12 @@ class IProtection(LoggingMixin, ABC): Output configured lookback period in either candles or minutes """ if self._lookback_period_candles: - return (f"{self._lookback_period_candles} " - f"{plural(self._lookback_period_candles, 'candle', 'candles')}") + return ( + f"{self._lookback_period_candles} " + f"{plural(self._lookback_period_candles, 'candle', 'candles')}" + ) else: - return (f"{self._lookback_period} " - f"{plural(self._lookback_period, 'minute', 'minutes')}") + return f"{self._lookback_period} {plural(self._lookback_period, 'minute', 'minutes')}" @abstractmethod def short_desc(self) -> str: @@ -96,7 +96,8 @@ class IProtection(LoggingMixin, ABC): @abstractmethod def stop_per_pair( - self, pair: str, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: + self, pair: str, date_now: datetime, side: LongShort + ) -> Optional[ProtectionReturn]: """ Stops trading (position entering) for this pair This must evaluate to true for the whole period of the "cooldown period". diff --git a/freqtrade/plugins/protections/low_profit_pairs.py b/freqtrade/plugins/protections/low_profit_pairs.py index f638673fa..360f6721c 100644 --- a/freqtrade/plugins/protections/low_profit_pairs.py +++ b/freqtrade/plugins/protections/low_profit_pairs.py @@ -1,4 +1,3 @@ - import logging from datetime import datetime, timedelta from typing import Any, Dict, Optional @@ -12,33 +11,37 @@ logger = logging.getLogger(__name__) class LowProfitPairs(IProtection): - has_global_stop: bool = False has_local_stop: bool = True def __init__(self, config: Config, protection_config: Dict[str, Any]) -> None: super().__init__(config, protection_config) - self._trade_limit = protection_config.get('trade_limit', 1) - self._required_profit = protection_config.get('required_profit', 0.0) - self._only_per_side = protection_config.get('only_per_side', False) + self._trade_limit = protection_config.get("trade_limit", 1) + self._required_profit = protection_config.get("required_profit", 0.0) + self._only_per_side = protection_config.get("only_per_side", False) def short_desc(self) -> str: """ Short method description - used for startup-messages """ - return (f"{self.name} - Low Profit Protection, locks pairs with " - f"profit < {self._required_profit} within {self.lookback_period_str}.") + return ( + f"{self.name} - Low Profit Protection, locks pairs with " + f"profit < {self._required_profit} within {self.lookback_period_str}." + ) def _reason(self, profit: float) -> str: """ LockReason to use """ - return (f'{profit} < {self._required_profit} in {self.lookback_period_str}, ' - f'locking for {self.stop_duration_str}.') + return ( + f"{profit} < {self._required_profit} in {self.lookback_period_str}, " + f"locking for {self.stop_duration_str}." + ) def _low_profit( - self, date_now: datetime, pair: str, side: LongShort) -> Optional[ProtectionReturn]: + self, date_now: datetime, pair: str, side: LongShort + ) -> Optional[ProtectionReturn]: """ Evaluate recent trades for pair """ @@ -57,20 +60,23 @@ class LowProfitPairs(IProtection): return None profit = sum( - trade.close_profit for trade in trades if trade.close_profit - and (not self._only_per_side or trade.trade_direction == side) - ) + trade.close_profit + for trade in trades + if trade.close_profit and (not self._only_per_side or trade.trade_direction == side) + ) if profit < self._required_profit: self.log_once( f"Trading for {pair} stopped due to {profit:.2f} < {self._required_profit} " - f"within {self._lookback_period} minutes.", logger.info) + f"within {self._lookback_period} minutes.", + logger.info, + ) until = self.calculate_lock_end(trades, self._stop_duration) return ProtectionReturn( lock=True, until=until, reason=self._reason(profit), - lock_side=(side if self._only_per_side else '*') + lock_side=(side if self._only_per_side else "*"), ) return None @@ -85,7 +91,8 @@ class LowProfitPairs(IProtection): return None def stop_per_pair( - self, pair: str, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: + self, pair: str, date_now: datetime, side: LongShort + ) -> Optional[ProtectionReturn]: """ Stops trading (position entering) for this pair This must evaluate to true for the whole period of the "cooldown period". diff --git a/freqtrade/plugins/protections/max_drawdown_protection.py b/freqtrade/plugins/protections/max_drawdown_protection.py index 8193dc7e4..a1ba166fa 100644 --- a/freqtrade/plugins/protections/max_drawdown_protection.py +++ b/freqtrade/plugins/protections/max_drawdown_protection.py @@ -1,4 +1,3 @@ - import logging from datetime import datetime, timedelta from typing import Any, Dict, Optional @@ -15,30 +14,33 @@ logger = logging.getLogger(__name__) class MaxDrawdown(IProtection): - has_global_stop: bool = True has_local_stop: bool = False def __init__(self, config: Config, protection_config: Dict[str, Any]) -> None: super().__init__(config, protection_config) - self._trade_limit = protection_config.get('trade_limit', 1) - self._max_allowed_drawdown = protection_config.get('max_allowed_drawdown', 0.0) + self._trade_limit = protection_config.get("trade_limit", 1) + self._max_allowed_drawdown = protection_config.get("max_allowed_drawdown", 0.0) # TODO: Implement checks to limit max_drawdown to sensible values def short_desc(self) -> str: """ Short method description - used for startup-messages """ - return (f"{self.name} - Max drawdown protection, stop trading if drawdown is > " - f"{self._max_allowed_drawdown} within {self.lookback_period_str}.") + return ( + f"{self.name} - Max drawdown protection, stop trading if drawdown is > " + f"{self._max_allowed_drawdown} within {self.lookback_period_str}." + ) def _reason(self, drawdown: float) -> str: """ LockReason to use """ - return (f'{drawdown} passed {self._max_allowed_drawdown} in {self.lookback_period_str}, ' - f'locking for {self.stop_duration_str}.') + return ( + f"{drawdown} passed {self._max_allowed_drawdown} in {self.lookback_period_str}, " + f"locking for {self.stop_duration_str}." + ) def _max_drawdown(self, date_now: datetime) -> Optional[ProtectionReturn]: """ @@ -57,14 +59,17 @@ class MaxDrawdown(IProtection): # Drawdown is always positive try: # TODO: This should use absolute profit calculation, considering account balance. - drawdown, _, _, _, _, _ = calculate_max_drawdown(trades_df, value_col='close_profit') + drawdown_obj = calculate_max_drawdown(trades_df, value_col="close_profit") + drawdown = drawdown_obj.drawdown_abs except ValueError: return None if drawdown > self._max_allowed_drawdown: self.log_once( f"Trading stopped due to Max Drawdown {drawdown:.2f} > {self._max_allowed_drawdown}" - f" within {self.lookback_period_str}.", logger.info) + f" within {self.lookback_period_str}.", + logger.info, + ) until = self.calculate_lock_end(trades, self._stop_duration) return ProtectionReturn( @@ -85,7 +90,8 @@ class MaxDrawdown(IProtection): return self._max_drawdown(date_now) def stop_per_pair( - self, pair: str, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: + self, pair: str, date_now: datetime, side: LongShort + ) -> Optional[ProtectionReturn]: """ Stops trading (position entering) for this pair This must evaluate to true for the whole period of the "cooldown period". diff --git a/freqtrade/plugins/protections/stoploss_guard.py b/freqtrade/plugins/protections/stoploss_guard.py index 23ceebbc9..a9aca20b4 100644 --- a/freqtrade/plugins/protections/stoploss_guard.py +++ b/freqtrade/plugins/protections/stoploss_guard.py @@ -1,4 +1,3 @@ - import logging from datetime import datetime, timedelta from typing import Any, Dict, Optional @@ -13,44 +12,59 @@ logger = logging.getLogger(__name__) class StoplossGuard(IProtection): - has_global_stop: bool = True has_local_stop: bool = True def __init__(self, config: Config, protection_config: Dict[str, Any]) -> None: super().__init__(config, protection_config) - self._trade_limit = protection_config.get('trade_limit', 10) - self._disable_global_stop = protection_config.get('only_per_pair', False) - self._only_per_side = protection_config.get('only_per_side', False) - self._profit_limit = protection_config.get('required_profit', 0.0) + self._trade_limit = protection_config.get("trade_limit", 10) + self._disable_global_stop = protection_config.get("only_per_pair", False) + self._only_per_side = protection_config.get("only_per_side", False) + self._profit_limit = protection_config.get("required_profit", 0.0) def short_desc(self) -> str: """ Short method description - used for startup-messages """ - return (f"{self.name} - Frequent Stoploss Guard, {self._trade_limit} stoplosses " - f"with profit < {self._profit_limit:.2%} within {self.lookback_period_str}.") + return ( + f"{self.name} - Frequent Stoploss Guard, {self._trade_limit} stoplosses " + f"with profit < {self._profit_limit:.2%} within {self.lookback_period_str}." + ) def _reason(self) -> str: """ LockReason to use """ - return (f'{self._trade_limit} stoplosses in {self._lookback_period} min, ' - f'locking for {self._stop_duration} min.') + return ( + f"{self._trade_limit} stoplosses in {self._lookback_period} min, " + f"locking for {self._stop_duration} min." + ) - def _stoploss_guard(self, date_now: datetime, pair: Optional[str], - side: LongShort) -> Optional[ProtectionReturn]: + def _stoploss_guard( + self, date_now: datetime, pair: Optional[str], side: LongShort + ) -> Optional[ProtectionReturn]: """ Evaluate recent trades """ look_back_until = date_now - timedelta(minutes=self._lookback_period) trades1 = Trade.get_trades_proxy(pair=pair, is_open=False, close_date=look_back_until) - trades = [trade for trade in trades1 if (str(trade.exit_reason) in ( - ExitType.TRAILING_STOP_LOSS.value, ExitType.STOP_LOSS.value, - ExitType.STOPLOSS_ON_EXCHANGE.value, ExitType.LIQUIDATION.value) - and trade.close_profit and trade.close_profit < self._profit_limit)] + trades = [ + trade + for trade in trades1 + if ( + str(trade.exit_reason) + in ( + ExitType.TRAILING_STOP_LOSS.value, + ExitType.STOP_LOSS.value, + ExitType.STOPLOSS_ON_EXCHANGE.value, + ExitType.LIQUIDATION.value, + ) + and trade.close_profit + and trade.close_profit < self._profit_limit + ) + ] if self._only_per_side: # Long or short trades only @@ -59,15 +73,18 @@ class StoplossGuard(IProtection): if len(trades) < self._trade_limit: return None - self.log_once(f"Trading stopped due to {self._trade_limit} " - f"stoplosses within {self._lookback_period} minutes.", logger.info) + self.log_once( + f"Trading stopped due to {self._trade_limit} " + f"stoplosses within {self._lookback_period} minutes.", + logger.info, + ) until = self.calculate_lock_end(trades, self._stop_duration) return ProtectionReturn( lock=True, until=until, reason=self._reason(), - lock_side=(side if self._only_per_side else '*') - ) + lock_side=(side if self._only_per_side else "*"), + ) def global_stop(self, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: """ @@ -81,7 +98,8 @@ class StoplossGuard(IProtection): return self._stoploss_guard(date_now, None, side) def stop_per_pair( - self, pair: str, date_now: datetime, side: LongShort) -> Optional[ProtectionReturn]: + self, pair: str, date_now: datetime, side: LongShort + ) -> Optional[ProtectionReturn]: """ Stops trading (position entering) for this pair This must evaluate to true for the whole period of the "cooldown period". diff --git a/freqtrade/resolvers/__init__.py b/freqtrade/resolvers/__init__.py index 2f70a788a..4bc2e8220 100644 --- a/freqtrade/resolvers/__init__.py +++ b/freqtrade/resolvers/__init__.py @@ -2,6 +2,7 @@ # isort: off from freqtrade.resolvers.iresolver import IResolver from freqtrade.resolvers.exchange_resolver import ExchangeResolver + # isort: on # Don't import HyperoptResolver to avoid loading the whole Optimize tree # from freqtrade.resolvers.hyperopt_resolver import HyperOptResolver diff --git a/freqtrade/resolvers/exchange_resolver.py b/freqtrade/resolvers/exchange_resolver.py index 2f912c4ab..c0c3c906b 100644 --- a/freqtrade/resolvers/exchange_resolver.py +++ b/freqtrade/resolvers/exchange_resolver.py @@ -1,6 +1,7 @@ """ This module loads custom exchanges """ + import logging from inspect import isclass from typing import Any, Dict, List, Optional @@ -18,17 +19,23 @@ class ExchangeResolver(IResolver): """ This class contains all the logic to load a custom exchange class """ + object_type = Exchange @staticmethod - def load_exchange(config: Config, *, exchange_config: Optional[ExchangeConfig] = None, - validate: bool = True, load_leverage_tiers: bool = False) -> Exchange: + def load_exchange( + config: Config, + *, + exchange_config: Optional[ExchangeConfig] = None, + validate: bool = True, + load_leverage_tiers: bool = False, + ) -> Exchange: """ Load the custom class from config parameter :param exchange_name: name of the Exchange to load :param config: configuration dictionary """ - exchange_name: str = config['exchange']['name'] + exchange_name: str = config["exchange"]["name"] # Map exchange name to avoid duplicate classes for identical exchanges exchange_name = MAP_EXCHANGE_CHILDCLASS.get(exchange_name, exchange_name) exchange_name = exchange_name.title() @@ -37,16 +44,22 @@ class ExchangeResolver(IResolver): exchange = ExchangeResolver._load_exchange( exchange_name, kwargs={ - 'config': config, - 'validate': validate, - 'exchange_config': exchange_config, - 'load_leverage_tiers': load_leverage_tiers} + "config": config, + "validate": validate, + "exchange_config": exchange_config, + "load_leverage_tiers": load_leverage_tiers, + }, ) except ImportError: logger.info( - f"No {exchange_name} specific subclass found. Using the generic class instead.") + f"No {exchange_name} specific subclass found. Using the generic class instead." + ) if not exchange: - exchange = Exchange(config, validate=validate, exchange_config=exchange_config,) + exchange = Exchange( + config, + validate=validate, + exchange_config=exchange_config, + ) return exchange @staticmethod @@ -75,8 +88,9 @@ class ExchangeResolver(IResolver): ) @classmethod - def search_all_objects(cls, config: Config, enum_failed: bool, - recursive: bool = False) -> List[Dict[str, Any]]: + def search_all_objects( + cls, config: Config, enum_failed: bool, recursive: bool = False + ) -> List[Dict[str, Any]]: """ Searches for valid objects :param config: Config object @@ -89,10 +103,12 @@ class ExchangeResolver(IResolver): for exchange_name in dir(exchanges): exchange = getattr(exchanges, exchange_name) if isclass(exchange) and issubclass(exchange, Exchange): - result.append({ - 'name': exchange_name, - 'class': exchange, - 'location': exchange.__module__, - 'location_rel: ': exchange.__module__.replace('freqtrade.', ''), - }) + result.append( + { + "name": exchange_name, + "class": exchange, + "location": exchange.__module__, + "location_rel: ": exchange.__module__.replace("freqtrade.", ""), + } + ) return result diff --git a/freqtrade/resolvers/freqaimodel_resolver.py b/freqtrade/resolvers/freqaimodel_resolver.py index 3696b9e56..1bdf473d2 100644 --- a/freqtrade/resolvers/freqaimodel_resolver.py +++ b/freqtrade/resolvers/freqaimodel_resolver.py @@ -3,6 +3,7 @@ """ This module load a custom model for freqai """ + import logging from pathlib import Path diff --git a/freqtrade/resolvers/hyperopt_resolver.py b/freqtrade/resolvers/hyperopt_resolver.py index d050c6fbc..72bbfa886 100644 --- a/freqtrade/resolvers/hyperopt_resolver.py +++ b/freqtrade/resolvers/hyperopt_resolver.py @@ -3,6 +3,7 @@ """ This module load custom hyperopt """ + import logging from pathlib import Path @@ -19,10 +20,11 @@ class HyperOptLossResolver(IResolver): """ This class contains all the logic to load custom hyperopt loss class """ + object_type = IHyperOptLoss object_type_str = "HyperoptLoss" user_subdir = USERPATH_HYPEROPTS - initial_search_path = Path(__file__).parent.parent.joinpath('optimize/hyperopt_loss').resolve() + initial_search_path = Path(__file__).parent.parent.joinpath("optimize/hyperopt_loss").resolve() @staticmethod def load_hyperoptloss(config: Config) -> IHyperOptLoss: @@ -31,18 +33,18 @@ class HyperOptLossResolver(IResolver): :param config: configuration dictionary """ - hyperoptloss_name = config.get('hyperopt_loss') + hyperoptloss_name = config.get("hyperopt_loss") if not hyperoptloss_name: raise OperationalException( "No Hyperopt loss set. Please use `--hyperopt-loss` to " "specify the Hyperopt-Loss class to use.\n" f"Built-in Hyperopt-loss-functions are: {', '.join(HYPEROPT_LOSS_BUILTIN)}" ) - hyperoptloss = HyperOptLossResolver.load_object(hyperoptloss_name, - config, kwargs={}, - extra_dir=config.get('hyperopt_path')) + hyperoptloss = HyperOptLossResolver.load_object( + hyperoptloss_name, config, kwargs={}, extra_dir=config.get("hyperopt_path") + ) # Assign timeframe to be used in hyperopt - hyperoptloss.__class__.timeframe = str(config['timeframe']) + hyperoptloss.__class__.timeframe = str(config["timeframe"]) return hyperoptloss diff --git a/freqtrade/resolvers/iresolver.py b/freqtrade/resolvers/iresolver.py index bcbb5704b..fc6ac5ec3 100644 --- a/freqtrade/resolvers/iresolver.py +++ b/freqtrade/resolvers/iresolver.py @@ -3,6 +3,7 @@ """ This module load custom objects """ + import importlib.util import inspect import logging @@ -37,6 +38,7 @@ class IResolver: """ This class contains all the logic to load custom classes """ + # Childclasses need to override this object_type: Type[Any] object_type_str: str @@ -46,15 +48,18 @@ class IResolver: extra_path: Optional[str] = None @classmethod - def build_search_paths(cls, config: Config, user_subdir: Optional[str] = None, - extra_dirs: Optional[List[str]] = None) -> List[Path]: - + def build_search_paths( + cls, + config: Config, + user_subdir: Optional[str] = None, + extra_dirs: Optional[List[str]] = None, + ) -> List[Path]: abs_paths: List[Path] = [] if cls.initial_search_path: abs_paths.append(cls.initial_search_path) if user_subdir: - abs_paths.insert(0, config['user_data_dir'].joinpath(user_subdir)) + abs_paths.insert(0, config["user_data_dir"].joinpath(user_subdir)) # Add extra directory to the top of the search paths if extra_dirs: @@ -67,8 +72,9 @@ class IResolver: return abs_paths @classmethod - def _get_valid_object(cls, module_path: Path, object_name: Optional[str], - enum_failed: bool = False) -> Iterator[Any]: + def _get_valid_object( + cls, module_path: Path, object_name: Optional[str], enum_failed: bool = False + ) -> Iterator[Any]: """ Generator returning objects with matching object_type and object_name in the path given. :param module_path: absolute path to the module @@ -90,28 +96,35 @@ class IResolver: module = importlib.util.module_from_spec(spec) try: spec.loader.exec_module(module) # type: ignore # importlib does not use typehints - except (AttributeError, ModuleNotFoundError, SyntaxError, - ImportError, NameError) as err: + except ( + AttributeError, + ModuleNotFoundError, + SyntaxError, + ImportError, + NameError, + ) as err: # Catch errors in case a specific module is not installed logger.warning(f"Could not import {module_path} due to '{err}'") if enum_failed: return iter([None]) valid_objects_gen = ( - (obj, inspect.getsource(module)) for - name, obj in inspect.getmembers( - module, inspect.isclass) if ((object_name is None or object_name == name) - and issubclass(obj, cls.object_type) - and obj is not cls.object_type - and obj.__module__ == module_name - ) + (obj, inspect.getsource(module)) + for name, obj in inspect.getmembers(module, inspect.isclass) + if ( + (object_name is None or object_name == name) + and issubclass(obj, cls.object_type) + and obj is not cls.object_type + and obj.__module__ == module_name + ) ) # The __module__ check ensures we only use strategies that are defined in this folder. return valid_objects_gen @classmethod - def _search_object(cls, directory: Path, *, object_name: str, add_source: bool = False - ) -> Union[Tuple[Any, Path], Tuple[None, None]]: + def _search_object( + cls, directory: Path, *, object_name: str, add_source: bool = False + ) -> Union[Tuple[Any, Path], Tuple[None, None]]: """ Search for the objectname in the given directory :param directory: relative or absolute directory path @@ -121,11 +134,11 @@ class IResolver: logger.debug(f"Searching for {cls.object_type.__name__} {object_name} in '{directory}'") for entry in directory.iterdir(): # Only consider python files - if entry.suffix != '.py': - logger.debug('Ignoring %s', entry) + if entry.suffix != ".py": + logger.debug("Ignoring %s", entry) continue if entry.is_symlink() and not entry.is_file(): - logger.debug('Ignoring broken symlink %s', entry) + logger.debug("Ignoring broken symlink %s", entry) continue module_path = entry.resolve() @@ -139,21 +152,23 @@ class IResolver: return (None, None) @classmethod - def _load_object(cls, paths: List[Path], *, object_name: str, add_source: bool = False, - kwargs: Dict) -> Optional[Any]: + def _load_object( + cls, paths: List[Path], *, object_name: str, add_source: bool = False, kwargs: Dict + ) -> Optional[Any]: """ Try to load object from path list. """ for _path in paths: try: - (module, module_path) = cls._search_object(directory=_path, - object_name=object_name, - add_source=add_source) + (module, module_path) = cls._search_object( + directory=_path, object_name=object_name, add_source=add_source + ) if module: logger.info( f"Using resolved {cls.object_type.__name__.lower()[1:]} {object_name} " - f"from '{module_path}'...") + f"from '{module_path}'..." + ) return module(**kwargs) except FileNotFoundError: logger.warning('Path "%s" does not exist.', _path.resolve()) @@ -161,8 +176,9 @@ class IResolver: return None @classmethod - def load_object(cls, object_name: str, config: Config, *, kwargs: dict, - extra_dir: Optional[str] = None) -> Any: + def load_object( + cls, object_name: str, config: Config, *, kwargs: dict, extra_dir: Optional[str] = None + ) -> Any: """ Search and loads the specified object as configured in the child class. :param object_name: name of the module to import @@ -176,12 +192,11 @@ class IResolver: if extra_dir: extra_dirs.append(extra_dir) - abs_paths = cls.build_search_paths(config, - user_subdir=cls.user_subdir, - extra_dirs=extra_dirs) + abs_paths = cls.build_search_paths( + config, user_subdir=cls.user_subdir, extra_dirs=extra_dirs + ) - found_object = cls._load_object(paths=abs_paths, object_name=object_name, - kwargs=kwargs) + found_object = cls._load_object(paths=abs_paths, object_name=object_name, kwargs=kwargs) if found_object: return found_object raise OperationalException( @@ -190,8 +205,9 @@ class IResolver: ) @classmethod - def search_all_objects(cls, config: Config, enum_failed: bool, - recursive: bool = False) -> List[Dict[str, Any]]: + def search_all_objects( + cls, config: Config, enum_failed: bool, recursive: bool = False + ) -> List[Dict[str, Any]]: """ Searches for valid objects :param config: Config object @@ -209,15 +225,21 @@ class IResolver: @classmethod def _build_rel_location(cls, directory: Path, entry: Path) -> str: - builtin = cls.initial_search_path == directory - return f"/{entry.relative_to(directory)}" if builtin else str( - entry.relative_to(directory)) + return ( + f"/{entry.relative_to(directory)}" + if builtin + else str(entry.relative_to(directory)) + ) @classmethod def _search_all_objects( - cls, directory: Path, enum_failed: bool, recursive: bool = False, - basedir: Optional[Path] = None) -> List[Dict[str, Any]]: + cls, + directory: Path, + enum_failed: bool, + recursive: bool = False, + basedir: Optional[Path] = None, + ) -> List[Dict[str, Any]]: """ Searches a directory for valid objects :param directory: Path to search @@ -233,24 +255,29 @@ class IResolver: return objects for entry in directory.iterdir(): if ( - recursive and entry.is_dir() - and not entry.name.startswith('__') - and not entry.name.startswith('.') + recursive + and entry.is_dir() + and not entry.name.startswith("__") + and not entry.name.startswith(".") ): - objects.extend(cls._search_all_objects( - entry, enum_failed, recursive, basedir or directory)) + objects.extend( + cls._search_all_objects(entry, enum_failed, recursive, basedir or directory) + ) # Only consider python files - if entry.suffix != '.py': - logger.debug('Ignoring %s', entry) + if entry.suffix != ".py": + logger.debug("Ignoring %s", entry) continue module_path = entry.resolve() logger.debug(f"Path {module_path}") - for obj in cls._get_valid_object(module_path, object_name=None, - enum_failed=enum_failed): + for obj in cls._get_valid_object( + module_path, object_name=None, enum_failed=enum_failed + ): objects.append( - {'name': obj[0].__name__ if obj is not None else '', - 'class': obj[0] if obj is not None else None, - 'location': entry, - 'location_rel': cls._build_rel_location(basedir or directory, entry), - }) + { + "name": obj[0].__name__ if obj is not None else "", + "class": obj[0] if obj is not None else None, + "location": entry, + "location_rel": cls._build_rel_location(basedir or directory, entry), + } + ) return objects diff --git a/freqtrade/resolvers/pairlist_resolver.py b/freqtrade/resolvers/pairlist_resolver.py index f492bcb54..e6aed5ec0 100644 --- a/freqtrade/resolvers/pairlist_resolver.py +++ b/freqtrade/resolvers/pairlist_resolver.py @@ -3,6 +3,7 @@ """ This module load custom pairlists """ + import logging from pathlib import Path @@ -18,14 +19,21 @@ class PairListResolver(IResolver): """ This class contains all the logic to load custom PairList class """ + object_type = IPairList object_type_str = "Pairlist" user_subdir = None - initial_search_path = Path(__file__).parent.parent.joinpath('plugins/pairlist').resolve() + initial_search_path = Path(__file__).parent.parent.joinpath("plugins/pairlist").resolve() @staticmethod - def load_pairlist(pairlist_name: str, exchange, pairlistmanager, - config: Config, pairlistconfig: dict, pairlist_pos: int) -> IPairList: + def load_pairlist( + pairlist_name: str, + exchange, + pairlistmanager, + config: Config, + pairlistconfig: dict, + pairlist_pos: int, + ) -> IPairList: """ Load the pairlist with pairlist_name :param pairlist_name: Classname of the pairlist @@ -36,10 +44,14 @@ class PairListResolver(IResolver): :param pairlist_pos: Position of the pairlist in the list of pairlists :return: initialized Pairlist class """ - return PairListResolver.load_object(pairlist_name, config, - kwargs={'exchange': exchange, - 'pairlistmanager': pairlistmanager, - 'config': config, - 'pairlistconfig': pairlistconfig, - 'pairlist_pos': pairlist_pos}, - ) + return PairListResolver.load_object( + pairlist_name, + config, + kwargs={ + "exchange": exchange, + "pairlistmanager": pairlistmanager, + "config": config, + "pairlistconfig": pairlistconfig, + "pairlist_pos": pairlist_pos, + }, + ) diff --git a/freqtrade/resolvers/protection_resolver.py b/freqtrade/resolvers/protection_resolver.py index 11cd6f224..67b68f050 100644 --- a/freqtrade/resolvers/protection_resolver.py +++ b/freqtrade/resolvers/protection_resolver.py @@ -1,6 +1,7 @@ """ This module load custom pairlists """ + import logging from pathlib import Path from typing import Dict @@ -17,14 +18,16 @@ class ProtectionResolver(IResolver): """ This class contains all the logic to load custom PairList class """ + object_type = IProtection object_type_str = "Protection" user_subdir = None - initial_search_path = Path(__file__).parent.parent.joinpath('plugins/protections').resolve() + initial_search_path = Path(__file__).parent.parent.joinpath("plugins/protections").resolve() @staticmethod - def load_protection(protection_name: str, config: Config, - protection_config: Dict) -> IProtection: + def load_protection( + protection_name: str, config: Config, protection_config: Dict + ) -> IProtection: """ Load the protection with protection_name :param protection_name: Classname of the pairlist @@ -32,8 +35,11 @@ class ProtectionResolver(IResolver): :param protection_config: Configuration dedicated to this pairlist :return: initialized Protection class """ - return ProtectionResolver.load_object(protection_name, config, - kwargs={'config': config, - 'protection_config': protection_config, - }, - ) + return ProtectionResolver.load_object( + protection_name, + config, + kwargs={ + "config": config, + "protection_config": protection_config, + }, + ) diff --git a/freqtrade/resolvers/strategy_resolver.py b/freqtrade/resolvers/strategy_resolver.py index 7e0204c0e..72b1db034 100644 --- a/freqtrade/resolvers/strategy_resolver.py +++ b/freqtrade/resolvers/strategy_resolver.py @@ -3,6 +3,7 @@ """ This module load custom strategies """ + import logging import tempfile from base64 import urlsafe_b64decode @@ -26,6 +27,7 @@ class StrategyResolver(IResolver): """ This class contains the logic to load custom strategy class """ + object_type = IStrategy object_type_str = "Strategy" user_subdir = USERPATH_STRATEGIES @@ -40,47 +42,48 @@ class StrategyResolver(IResolver): """ config = config or {} - if not config.get('strategy'): - raise OperationalException("No strategy set. Please use `--strategy` to specify " - "the strategy class to use.") + if not config.get("strategy"): + raise OperationalException( + "No strategy set. Please use `--strategy` to specify the strategy class to use." + ) - strategy_name = config['strategy'] + strategy_name = config["strategy"] strategy: IStrategy = StrategyResolver._load_strategy( - strategy_name, config=config, - extra_dir=config.get('strategy_path')) + strategy_name, config=config, extra_dir=config.get("strategy_path") + ) strategy.ft_load_params_from_file() # Set attributes # Check if we need to override configuration # (Attribute name, default, subkey) - attributes = [("minimal_roi", {"0": 10.0}), - ("timeframe", None), - ("stoploss", None), - ("trailing_stop", None), - ("trailing_stop_positive", None), - ("trailing_stop_positive_offset", 0.0), - ("trailing_only_offset_is_reached", None), - ("use_custom_stoploss", None), - ("process_only_new_candles", None), - ("order_types", None), - ("order_time_in_force", None), - ("stake_currency", None), - ("stake_amount", None), - ("protections", None), - ("startup_candle_count", None), - ("unfilledtimeout", None), - ("use_exit_signal", True), - ("exit_profit_only", False), - ("ignore_roi_if_entry_signal", False), - ("exit_profit_offset", 0.0), - ("disable_dataframe_checks", False), - ("ignore_buying_expired_candle_after", 0), - ("position_adjustment_enable", False), - ("max_entry_position_adjustment", -1), - ("max_open_trades", -1) - ] + attributes = [ + ("minimal_roi", {"0": 10.0}), + ("timeframe", None), + ("stoploss", None), + ("trailing_stop", None), + ("trailing_stop_positive", None), + ("trailing_stop_positive_offset", 0.0), + ("trailing_only_offset_is_reached", None), + ("use_custom_stoploss", None), + ("process_only_new_candles", None), + ("order_types", None), + ("order_time_in_force", None), + ("stake_currency", None), + ("stake_amount", None), + ("protections", None), + ("startup_candle_count", None), + ("unfilledtimeout", None), + ("use_exit_signal", True), + ("exit_profit_only", False), + ("ignore_roi_if_entry_signal", False), + ("exit_profit_offset", 0.0), + ("disable_dataframe_checks", False), + ("ignore_buying_expired_candle_after", 0), + ("position_adjustment_enable", False), + ("max_entry_position_adjustment", -1), + ("max_open_trades", -1), + ] for attribute, default in attributes: - StrategyResolver._override_attribute_helper(strategy, config, - attribute, default) + StrategyResolver._override_attribute_helper(strategy, config, attribute, default) # Loop this list again to have output combined for attribute, _ in attributes: @@ -101,19 +104,23 @@ class StrategyResolver(IResolver): - Strategy - default (if not None) """ - if (attribute in config - and not isinstance(getattr(type(strategy), attribute, None), property)): + if attribute in config and not isinstance( + getattr(type(strategy), attribute, None), property + ): # Ensure Properties are not overwritten setattr(strategy, attribute, config[attribute]) - logger.info("Override strategy '%s' with value in config file: %s.", - attribute, config[attribute]) + logger.info( + "Override strategy '%s' with value in config file: %s.", + attribute, + config[attribute], + ) elif hasattr(strategy, attribute): val = getattr(strategy, attribute) # None's cannot exist in the config, so do not copy them if val is not None: # max_open_trades set to -1 in the strategy will be copied as infinity in the config - if attribute == 'max_open_trades' and val == -1: - config[attribute] = float('inf') + if attribute == "max_open_trades" and val == -1: + config[attribute] = float("inf") else: config[attribute] = val # Explicitly check for None here as other "falsy" values are possible @@ -127,14 +134,17 @@ class StrategyResolver(IResolver): Normalize attributes to have the correct type. """ # Sort and apply type conversions - if hasattr(strategy, 'minimal_roi'): - strategy.minimal_roi = dict(sorted( - {int(key): value for (key, value) in strategy.minimal_roi.items()}.items(), - key=lambda t: t[0])) - if hasattr(strategy, 'stoploss'): + if hasattr(strategy, "minimal_roi"): + strategy.minimal_roi = dict( + sorted( + {int(key): value for (key, value) in strategy.minimal_roi.items()}.items(), + key=lambda t: t[0], + ) + ) + if hasattr(strategy, "stoploss"): strategy.stoploss = float(strategy.stoploss) - if hasattr(strategy, 'max_open_trades') and strategy.max_open_trades < 0: - strategy.max_open_trades = float('inf') + if hasattr(strategy, "max_open_trades") and strategy.max_open_trades < 0: + strategy.max_open_trades = float("inf") return strategy @staticmethod @@ -143,92 +153,102 @@ class StrategyResolver(IResolver): validate_migrated_strategy_settings(strategy.config) if not all(k in strategy.order_types for k in REQUIRED_ORDERTYPES): - raise ImportError(f"Impossible to load Strategy '{strategy.__class__.__name__}'. " - f"Order-types mapping is incomplete.") + raise ImportError( + f"Impossible to load Strategy '{strategy.__class__.__name__}'. " + f"Order-types mapping is incomplete." + ) if not all(k in strategy.order_time_in_force for k in REQUIRED_ORDERTIF): - raise ImportError(f"Impossible to load Strategy '{strategy.__class__.__name__}'. " - f"Order-time-in-force mapping is incomplete.") - trading_mode = strategy.config.get('trading_mode', TradingMode.SPOT) + raise ImportError( + f"Impossible to load Strategy '{strategy.__class__.__name__}'. " + f"Order-time-in-force mapping is incomplete." + ) + trading_mode = strategy.config.get("trading_mode", TradingMode.SPOT) - if (strategy.can_short and trading_mode == TradingMode.SPOT): + if strategy.can_short and trading_mode == TradingMode.SPOT: raise ImportError( "Short strategies cannot run in spot markets. Please make sure that this " "is the correct strategy and that your trading mode configuration is correct. " "You can run this strategy in spot markets by setting `can_short=False`" " in your strategy. Please note that short signals will be ignored in that case." - ) + ) @staticmethod def validate_strategy(strategy: IStrategy) -> IStrategy: - if strategy.config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: + if strategy.config.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: # Require new method - warn_deprecated_setting(strategy, 'sell_profit_only', 'exit_profit_only', True) - warn_deprecated_setting(strategy, 'sell_profit_offset', 'exit_profit_offset', True) - warn_deprecated_setting(strategy, 'use_sell_signal', 'use_exit_signal', True) - warn_deprecated_setting(strategy, 'ignore_roi_if_buy_signal', - 'ignore_roi_if_entry_signal', True) + warn_deprecated_setting(strategy, "sell_profit_only", "exit_profit_only", True) + warn_deprecated_setting(strategy, "sell_profit_offset", "exit_profit_offset", True) + warn_deprecated_setting(strategy, "use_sell_signal", "use_exit_signal", True) + warn_deprecated_setting( + strategy, "ignore_roi_if_buy_signal", "ignore_roi_if_entry_signal", True + ) - if not check_override(strategy, IStrategy, 'populate_entry_trend'): + if not check_override(strategy, IStrategy, "populate_entry_trend"): raise OperationalException("`populate_entry_trend` must be implemented.") - if not check_override(strategy, IStrategy, 'populate_exit_trend'): + if not check_override(strategy, IStrategy, "populate_exit_trend"): raise OperationalException("`populate_exit_trend` must be implemented.") - if check_override(strategy, IStrategy, 'check_buy_timeout'): - raise OperationalException("Please migrate your implementation " - "of `check_buy_timeout` to `check_entry_timeout`.") - if check_override(strategy, IStrategy, 'check_sell_timeout'): - raise OperationalException("Please migrate your implementation " - "of `check_sell_timeout` to `check_exit_timeout`.") - - if check_override(strategy, IStrategy, 'custom_sell'): + if check_override(strategy, IStrategy, "check_buy_timeout"): raise OperationalException( - "Please migrate your implementation of `custom_sell` to `custom_exit`.") + "Please migrate your implementation " + "of `check_buy_timeout` to `check_entry_timeout`." + ) + if check_override(strategy, IStrategy, "check_sell_timeout"): + raise OperationalException( + "Please migrate your implementation " + "of `check_sell_timeout` to `check_exit_timeout`." + ) + + if check_override(strategy, IStrategy, "custom_sell"): + raise OperationalException( + "Please migrate your implementation of `custom_sell` to `custom_exit`." + ) else: # TODO: Implementing one of the following methods should show a deprecation warning # buy_trend and sell_trend, custom_sell - warn_deprecated_setting(strategy, 'sell_profit_only', 'exit_profit_only') - warn_deprecated_setting(strategy, 'sell_profit_offset', 'exit_profit_offset') - warn_deprecated_setting(strategy, 'use_sell_signal', 'use_exit_signal') - warn_deprecated_setting(strategy, 'ignore_roi_if_buy_signal', - 'ignore_roi_if_entry_signal') + warn_deprecated_setting(strategy, "sell_profit_only", "exit_profit_only") + warn_deprecated_setting(strategy, "sell_profit_offset", "exit_profit_offset") + warn_deprecated_setting(strategy, "use_sell_signal", "use_exit_signal") + warn_deprecated_setting( + strategy, "ignore_roi_if_buy_signal", "ignore_roi_if_entry_signal" + ) - if ( - not check_override(strategy, IStrategy, 'populate_buy_trend') - and not check_override(strategy, IStrategy, 'populate_entry_trend') + if not check_override(strategy, IStrategy, "populate_buy_trend") and not check_override( + strategy, IStrategy, "populate_entry_trend" ): raise OperationalException( - "`populate_entry_trend` or `populate_buy_trend` must be implemented.") - if ( - not check_override(strategy, IStrategy, 'populate_sell_trend') - and not check_override(strategy, IStrategy, 'populate_exit_trend') - ): + "`populate_entry_trend` or `populate_buy_trend` must be implemented." + ) + if not check_override( + strategy, IStrategy, "populate_sell_trend" + ) and not check_override(strategy, IStrategy, "populate_exit_trend"): raise OperationalException( - "`populate_exit_trend` or `populate_sell_trend` must be implemented.") + "`populate_exit_trend` or `populate_sell_trend` must be implemented." + ) _populate_fun_len = len(getfullargspec(strategy.populate_indicators).args) _buy_fun_len = len(getfullargspec(strategy.populate_buy_trend).args) _sell_fun_len = len(getfullargspec(strategy.populate_sell_trend).args) - if any(x == 2 for x in [ - _populate_fun_len, - _buy_fun_len, - _sell_fun_len - ]): + if any(x == 2 for x in [_populate_fun_len, _buy_fun_len, _sell_fun_len]): raise OperationalException( "Strategy Interface v1 is no longer supported. " "Please update your strategy to implement " "`populate_indicators`, `populate_entry_trend` and `populate_exit_trend` " - "with the metadata argument. ") + "with the metadata argument. " + ) - has_after_fill = ('after_fill' in getfullargspec(strategy.custom_stoploss).args - and check_override(strategy, IStrategy, 'custom_stoploss')) + has_after_fill = "after_fill" in getfullargspec( + strategy.custom_stoploss + ).args and check_override(strategy, IStrategy, "custom_stoploss") if has_after_fill: strategy._ft_stop_uses_after_fill = True return strategy @staticmethod - def _load_strategy(strategy_name: str, - config: Config, extra_dir: Optional[str] = None) -> IStrategy: + def _load_strategy( + strategy_name: str, config: Config, extra_dir: Optional[str] = None + ) -> IStrategy: """ Search and loads the specified strategy. :param strategy_name: name of the module to import @@ -236,7 +256,7 @@ class StrategyResolver(IResolver): :param extra_dir: additional directory to search for the given strategy :return: Strategy instance or None """ - if config.get('recursive_strategy_search', False): + if config.get("recursive_strategy_search", False): extra_dirs: List[str] = [ path[0] for path in walk(f"{config['user_data_dir']}/{USERPATH_STRATEGIES}") ] # sub-directories @@ -246,9 +266,9 @@ class StrategyResolver(IResolver): if extra_dir: extra_dirs.append(extra_dir) - abs_paths = StrategyResolver.build_search_paths(config, - user_subdir=USERPATH_STRATEGIES, - extra_dirs=extra_dirs) + abs_paths = StrategyResolver.build_search_paths( + config, user_subdir=USERPATH_STRATEGIES, extra_dirs=extra_dirs + ) if ":" in strategy_name: logger.info("loading base64 encoded strategy") @@ -258,7 +278,7 @@ class StrategyResolver(IResolver): temp = Path(tempfile.mkdtemp("freq", "strategy")) name = strat[0] + ".py" - temp.joinpath(name).write_text(urlsafe_b64decode(strat[1]).decode('utf-8')) + temp.joinpath(name).write_text(urlsafe_b64decode(strat[1]).decode("utf-8")) temp.joinpath("__init__.py").touch() strategy_name = strat[0] @@ -270,11 +290,10 @@ class StrategyResolver(IResolver): paths=abs_paths, object_name=strategy_name, add_source=True, - kwargs={'config': config}, + kwargs={"config": config}, ) if strategy: - return StrategyResolver.validate_strategy(strategy) raise OperationalException( @@ -289,7 +308,7 @@ def warn_deprecated_setting(strategy: IStrategy, old: str, new: str, error=False if error: raise OperationalException(errormsg) logger.warning(errormsg) - setattr(strategy, new, getattr(strategy, f'{old}')) + setattr(strategy, new, getattr(strategy, f"{old}")) def check_override(object, parentclass, attribute): diff --git a/freqtrade/rpc/api_server/api_auth.py b/freqtrade/rpc/api_server/api_auth.py index 257c1cc24..0e054220b 100644 --- a/freqtrade/rpc/api_server/api_auth.py +++ b/freqtrade/rpc/api_server/api_auth.py @@ -21,8 +21,9 @@ router_login = APIRouter() def verify_auth(api_config, username: str, password: str): """Verify username/password""" - return (secrets.compare_digest(username, api_config.get('username')) and - secrets.compare_digest(password, api_config.get('password'))) + return secrets.compare_digest(username, api_config.get("username")) and secrets.compare_digest( + password, api_config.get("password") + ) httpbasic = HTTPBasic(auto_error=False) @@ -38,7 +39,7 @@ def get_user_from_token(token, secret_key: str, token_type: str = "access") -> s ) try: payload = jwt.decode(token, secret_key, algorithms=[ALGORITHM]) - username: str = payload.get("identity", {}).get('u') + username: str = payload.get("identity", {}).get("u") if username is None: raise credentials_exception if payload.get("type") != token_type: @@ -55,10 +56,10 @@ def get_user_from_token(token, secret_key: str, token_type: str = "access") -> s async def validate_ws_token( ws: WebSocket, ws_token: Union[str, None] = Query(default=None, alias="token"), - api_config: Dict[str, Any] = Depends(get_api_config) + api_config: Dict[str, Any] = Depends(get_api_config), ): - secret_ws_token = api_config.get('ws_token', None) - secret_jwt_key = api_config.get('jwt_secret_key', 'super-secret') + secret_ws_token = api_config.get("ws_token", None) + secret_jwt_key = api_config.get("jwt_secret_key", "super-secret") # Check if ws_token is/in secret_ws_token if ws_token and secret_ws_token: @@ -66,10 +67,9 @@ async def validate_ws_token( if isinstance(secret_ws_token, str): is_valid_ws_token = secrets.compare_digest(secret_ws_token, ws_token) elif isinstance(secret_ws_token, list): - is_valid_ws_token = any([ - secrets.compare_digest(potential, ws_token) - for potential in secret_ws_token - ]) + is_valid_ws_token = any( + [secrets.compare_digest(potential, ws_token) for potential in secret_ws_token] + ) if is_valid_ws_token: return ws_token @@ -94,20 +94,24 @@ def create_token(data: dict, secret_key: str, token_type: str = "access") -> str expire = datetime.now(timezone.utc) + timedelta(days=30) else: raise ValueError() - to_encode.update({ - "exp": expire, - "iat": datetime.now(timezone.utc), - "type": token_type, - }) + to_encode.update( + { + "exp": expire, + "iat": datetime.now(timezone.utc), + "type": token_type, + } + ) encoded_jwt = jwt.encode(to_encode, secret_key, algorithm=ALGORITHM) return encoded_jwt -def http_basic_or_jwt_token(form_data: HTTPBasicCredentials = Depends(httpbasic), - token: str = Depends(oauth2_scheme), - api_config=Depends(get_api_config)): +def http_basic_or_jwt_token( + form_data: HTTPBasicCredentials = Depends(httpbasic), + token: str = Depends(oauth2_scheme), + api_config=Depends(get_api_config), +): if token: - return get_user_from_token(token, api_config.get('jwt_secret_key', 'super-secret')) + return get_user_from_token(token, api_config.get("jwt_secret_key", "super-secret")) elif form_data and verify_auth(api_config, form_data.username, form_data.password): return form_data.username @@ -117,15 +121,16 @@ def http_basic_or_jwt_token(form_data: HTTPBasicCredentials = Depends(httpbasic) ) -@router_login.post('/token/login', response_model=AccessAndRefreshToken) -def token_login(form_data: HTTPBasicCredentials = Depends(security), - api_config=Depends(get_api_config)): - +@router_login.post("/token/login", response_model=AccessAndRefreshToken) +def token_login( + form_data: HTTPBasicCredentials = Depends(security), api_config=Depends(get_api_config) +): if verify_auth(api_config, form_data.username, form_data.password): - token_data = {'identity': {'u': form_data.username}} - access_token = create_token(token_data, api_config.get('jwt_secret_key', 'super-secret')) - refresh_token = create_token(token_data, api_config.get('jwt_secret_key', 'super-secret'), - token_type="refresh") + token_data = {"identity": {"u": form_data.username}} + access_token = create_token(token_data, api_config.get("jwt_secret_key", "super-secret")) + refresh_token = create_token( + token_data, api_config.get("jwt_secret_key", "super-secret"), token_type="refresh" + ) return { "access_token": access_token, "refresh_token": refresh_token, @@ -137,12 +142,12 @@ def token_login(form_data: HTTPBasicCredentials = Depends(security), ) -@router_login.post('/token/refresh', response_model=AccessToken) +@router_login.post("/token/refresh", response_model=AccessToken) def token_refresh(token: str = Depends(oauth2_scheme), api_config=Depends(get_api_config)): # Refresh token - u = get_user_from_token(token, api_config.get( - 'jwt_secret_key', 'super-secret'), 'refresh') - token_data = {'identity': {'u': u}} - access_token = create_token(token_data, api_config.get('jwt_secret_key', 'super-secret'), - token_type="access") - return {'access_token': access_token} + u = get_user_from_token(token, api_config.get("jwt_secret_key", "super-secret"), "refresh") + token_data = {"identity": {"u": u}} + access_token = create_token( + token_data, api_config.get("jwt_secret_key", "super-secret"), token_type="access" + ) + return {"access_token": access_token} diff --git a/freqtrade/rpc/api_server/api_background_tasks.py b/freqtrade/rpc/api_server/api_background_tasks.py index 04e98c609..6df0411c8 100644 --- a/freqtrade/rpc/api_server/api_background_tasks.py +++ b/freqtrade/rpc/api_server/api_background_tasks.py @@ -1,5 +1,6 @@ import logging from copy import deepcopy +from typing import List from fastapi import APIRouter, BackgroundTasks, Depends from fastapi.exceptions import HTTPException @@ -8,9 +9,14 @@ from freqtrade.constants import Config from freqtrade.enums import CandleType from freqtrade.exceptions import OperationalException from freqtrade.persistence import FtNoDBContext -from freqtrade.rpc.api_server.api_schemas import (BackgroundTaskStatus, BgJobStarted, - ExchangeModePayloadMixin, PairListsPayload, - PairListsResponse, WhitelistEvaluateResponse) +from freqtrade.rpc.api_server.api_schemas import ( + BackgroundTaskStatus, + BgJobStarted, + ExchangeModePayloadMixin, + PairListsPayload, + PairListsResponse, + WhitelistEvaluateResponse, +) from freqtrade.rpc.api_server.deps import get_config, get_exchange from freqtrade.rpc.api_server.webserver_bgwork import ApiBG @@ -21,92 +27,113 @@ logger = logging.getLogger(__name__) router = APIRouter() -@router.get('/background/{jobid}', response_model=BackgroundTaskStatus, tags=['webserver']) +@router.get("/background", response_model=List[BackgroundTaskStatus], tags=["webserver"]) +def background_job_list(): + return [ + { + "job_id": jobid, + "job_category": job["category"], + "status": job["status"], + "running": job["is_running"], + "progress": job.get("progress"), + "error": job.get("error", None), + } + for jobid, job in ApiBG.jobs.items() + ] + + +@router.get("/background/{jobid}", response_model=BackgroundTaskStatus, tags=["webserver"]) def background_job(jobid: str): if not (job := ApiBG.jobs.get(jobid)): - raise HTTPException(status_code=404, detail='Job not found.') + raise HTTPException(status_code=404, detail="Job not found.") return { - 'job_id': jobid, - 'job_category': job['category'], - 'status': job['status'], - 'running': job['is_running'], - 'progress': job.get('progress'), - # 'job_error': job['error'], + "job_id": jobid, + "job_category": job["category"], + "status": job["status"], + "running": job["is_running"], + "progress": job.get("progress"), + "error": job.get("error", None), } -@router.get('/pairlists/available', - response_model=PairListsResponse, tags=['pairlists', 'webserver']) +@router.get( + "/pairlists/available", response_model=PairListsResponse, tags=["pairlists", "webserver"] +) def list_pairlists(config=Depends(get_config)): from freqtrade.resolvers import PairListResolver - pairlists = PairListResolver.search_all_objects( - config, False) - pairlists = sorted(pairlists, key=lambda x: x['name']) - return {'pairlists': [{ - "name": x['name'], - "is_pairlist_generator": x['class'].is_pairlist_generator, - "params": x['class'].available_parameters(), - "description": x['class'].description(), - } for x in pairlists - ]} + pairlists = PairListResolver.search_all_objects(config, False) + pairlists = sorted(pairlists, key=lambda x: x["name"]) + + return { + "pairlists": [ + { + "name": x["name"], + "is_pairlist_generator": x["class"].is_pairlist_generator, + "params": x["class"].available_parameters(), + "description": x["class"].description(), + } + for x in pairlists + ] + } def __run_pairlist(job_id: str, config_loc: Config): try: - - ApiBG.jobs[job_id]['is_running'] = True + ApiBG.jobs[job_id]["is_running"] = True from freqtrade.plugins.pairlistmanager import PairListManager + with FtNoDBContext(): exchange = get_exchange(config_loc) pairlists = PairListManager(exchange, config_loc) pairlists.refresh_pairlist() - ApiBG.jobs[job_id]['result'] = { - 'method': pairlists.name_list, - 'length': len(pairlists.whitelist), - 'whitelist': pairlists.whitelist - } - ApiBG.jobs[job_id]['status'] = 'success' + ApiBG.jobs[job_id]["result"] = { + "method": pairlists.name_list, + "length": len(pairlists.whitelist), + "whitelist": pairlists.whitelist, + } + ApiBG.jobs[job_id]["status"] = "success" except (OperationalException, Exception) as e: logger.exception(e) - ApiBG.jobs[job_id]['error'] = str(e) - ApiBG.jobs[job_id]['status'] = 'failed' + ApiBG.jobs[job_id]["error"] = str(e) + ApiBG.jobs[job_id]["status"] = "failed" finally: - ApiBG.jobs[job_id]['is_running'] = False + ApiBG.jobs[job_id]["is_running"] = False ApiBG.pairlist_running = False -@router.post('/pairlists/evaluate', response_model=BgJobStarted, tags=['pairlists', 'webserver']) -def pairlists_evaluate(payload: PairListsPayload, background_tasks: BackgroundTasks, - config=Depends(get_config)): +@router.post("/pairlists/evaluate", response_model=BgJobStarted, tags=["pairlists", "webserver"]) +def pairlists_evaluate( + payload: PairListsPayload, background_tasks: BackgroundTasks, config=Depends(get_config) +): if ApiBG.pairlist_running: - raise HTTPException(status_code=400, detail='Pairlist evaluation is already running.') + raise HTTPException(status_code=400, detail="Pairlist evaluation is already running.") config_loc = deepcopy(config) - config_loc['stake_currency'] = payload.stake_currency - config_loc['pairlists'] = payload.pairlists + config_loc["stake_currency"] = payload.stake_currency + config_loc["pairlists"] = payload.pairlists handleExchangePayload(payload, config_loc) # TODO: overwrite blacklist? make it optional and fall back to the one in config? # Outcome depends on the UI approach. - config_loc['exchange']['pair_blacklist'] = payload.blacklist + config_loc["exchange"]["pair_blacklist"] = payload.blacklist # Random job id job_id = ApiBG.get_job_id() ApiBG.jobs[job_id] = { - 'category': 'pairlist', - 'status': 'pending', - 'progress': None, - 'is_running': False, - 'result': {}, - 'error': None, + "category": "pairlist", + "status": "pending", + "progress": None, + "is_running": False, + "result": {}, + "error": None, } background_tasks.add_task(__run_pairlist, job_id, config_loc) ApiBG.pairlist_running = True return { - 'status': 'Pairlist evaluation started in background.', - 'job_id': job_id, + "status": "Pairlist evaluation started in background.", + "job_id": job_id, } @@ -116,31 +143,35 @@ def handleExchangePayload(payload: ExchangeModePayloadMixin, config_loc: Config) Updates the configuration with the payload values. """ if payload.exchange: - config_loc['exchange']['name'] = payload.exchange + config_loc["exchange"]["name"] = payload.exchange if payload.trading_mode: - config_loc['trading_mode'] = payload.trading_mode - config_loc['candle_type_def'] = CandleType.get_default( - config_loc.get('trading_mode', 'spot') or 'spot') + config_loc["trading_mode"] = payload.trading_mode + config_loc["candle_type_def"] = CandleType.get_default( + config_loc.get("trading_mode", "spot") or "spot" + ) if payload.margin_mode: - config_loc['margin_mode'] = payload.margin_mode + config_loc["margin_mode"] = payload.margin_mode -@router.get('/pairlists/evaluate/{jobid}', response_model=WhitelistEvaluateResponse, - tags=['pairlists', 'webserver']) +@router.get( + "/pairlists/evaluate/{jobid}", + response_model=WhitelistEvaluateResponse, + tags=["pairlists", "webserver"], +) def pairlists_evaluate_get(jobid: str): if not (job := ApiBG.jobs.get(jobid)): - raise HTTPException(status_code=404, detail='Job not found.') + raise HTTPException(status_code=404, detail="Job not found.") - if job['is_running']: - raise HTTPException(status_code=400, detail='Job not finished yet.') + if job["is_running"]: + raise HTTPException(status_code=400, detail="Job not finished yet.") - if error := job['error']: + if error := job["error"]: return { - 'status': 'failed', - 'error': error, + "status": "failed", + "error": error, } return { - 'status': 'success', - 'result': job['result'], + "status": "success", + "result": job["result"], } diff --git a/freqtrade/rpc/api_server/api_backtest.py b/freqtrade/rpc/api_server/api_backtest.py index 345f835a4..42b09de0a 100644 --- a/freqtrade/rpc/api_server/api_backtest.py +++ b/freqtrade/rpc/api_server/api_backtest.py @@ -10,16 +10,25 @@ from fastapi.exceptions import HTTPException from freqtrade.configuration.config_validation import validate_config_consistency from freqtrade.constants import Config -from freqtrade.data.btanalysis import (delete_backtest_result, get_backtest_market_change, - get_backtest_result, get_backtest_resultlist, - load_and_merge_backtest_result, update_backtest_metadata) +from freqtrade.data.btanalysis import ( + delete_backtest_result, + get_backtest_market_change, + get_backtest_result, + get_backtest_resultlist, + load_and_merge_backtest_result, + update_backtest_metadata, +) from freqtrade.enums import BacktestState from freqtrade.exceptions import ConfigurationError, DependencyException, OperationalException from freqtrade.exchange.common import remove_exchange_credentials from freqtrade.misc import deep_merge_dicts, is_file_in_dir -from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestMarketChange, - BacktestMetadataUpdate, BacktestRequest, - BacktestResponse) +from freqtrade.rpc.api_server.api_schemas import ( + BacktestHistoryEntry, + BacktestMarketChange, + BacktestMetadataUpdate, + BacktestRequest, + BacktestResponse, +) from freqtrade.rpc.api_server.deps import get_config from freqtrade.rpc.api_server.webserver_bgwork import ApiBG from freqtrade.rpc.rpc import RPCException @@ -40,67 +49,67 @@ def __run_backtest_bg(btconfig: Config): asyncio.set_event_loop(asyncio.new_event_loop()) try: # Reload strategy - lastconfig = ApiBG.bt['last_config'] + lastconfig = ApiBG.bt["last_config"] strat = StrategyResolver.load_strategy(btconfig) validate_config_consistency(btconfig) if ( - not ApiBG.bt['bt'] - or lastconfig.get('timeframe') != strat.timeframe - or lastconfig.get('timeframe_detail') != btconfig.get('timeframe_detail') - or lastconfig.get('timerange') != btconfig['timerange'] + not ApiBG.bt["bt"] + or lastconfig.get("timeframe") != strat.timeframe + or lastconfig.get("timeframe_detail") != btconfig.get("timeframe_detail") + or lastconfig.get("timerange") != btconfig["timerange"] ): from freqtrade.optimize.backtesting import Backtesting - ApiBG.bt['bt'] = Backtesting(btconfig) - ApiBG.bt['bt'].load_bt_data_detail() + + ApiBG.bt["bt"] = Backtesting(btconfig) + ApiBG.bt["bt"].load_bt_data_detail() else: - ApiBG.bt['bt'].config = btconfig - ApiBG.bt['bt'].init_backtest() + ApiBG.bt["bt"].config = btconfig + ApiBG.bt["bt"].init_backtest() # Only reload data if timeframe changed. if ( - not ApiBG.bt['data'] - or not ApiBG.bt['timerange'] - or lastconfig.get('timeframe') != strat.timeframe - or lastconfig.get('timerange') != btconfig['timerange'] + not ApiBG.bt["data"] + or not ApiBG.bt["timerange"] + or lastconfig.get("timeframe") != strat.timeframe + or lastconfig.get("timerange") != btconfig["timerange"] ): - ApiBG.bt['data'], ApiBG.bt['timerange'] = ApiBG.bt[ - 'bt'].load_bt_data() + ApiBG.bt["data"], ApiBG.bt["timerange"] = ApiBG.bt["bt"].load_bt_data() - lastconfig['timerange'] = btconfig['timerange'] - lastconfig['timeframe'] = strat.timeframe - lastconfig['protections'] = btconfig.get('protections', []) - lastconfig['enable_protections'] = btconfig.get('enable_protections') - lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet') + lastconfig["timerange"] = btconfig["timerange"] + lastconfig["timeframe"] = strat.timeframe + lastconfig["protections"] = btconfig.get("protections", []) + lastconfig["enable_protections"] = btconfig.get("enable_protections") + lastconfig["dry_run_wallet"] = btconfig.get("dry_run_wallet") - ApiBG.bt['bt'].enable_protections = btconfig.get('enable_protections', False) - ApiBG.bt['bt'].strategylist = [strat] - ApiBG.bt['bt'].results = get_BacktestResultType_default() - ApiBG.bt['bt'].load_prior_backtest() + ApiBG.bt["bt"].enable_protections = btconfig.get("enable_protections", False) + ApiBG.bt["bt"].strategylist = [strat] + ApiBG.bt["bt"].results = get_BacktestResultType_default() + ApiBG.bt["bt"].load_prior_backtest() - ApiBG.bt['bt'].abort = False + ApiBG.bt["bt"].abort = False strategy_name = strat.get_strategy_name() - if (ApiBG.bt['bt'].results and - strategy_name in ApiBG.bt['bt'].results['strategy']): + if ApiBG.bt["bt"].results and strategy_name in ApiBG.bt["bt"].results["strategy"]: # When previous result hash matches - reuse that result and skip backtesting. - logger.info(f'Reusing result of previous backtest for {strategy_name}') + logger.info(f"Reusing result of previous backtest for {strategy_name}") else: - min_date, max_date = ApiBG.bt['bt'].backtest_one_strategy( - strat, ApiBG.bt['data'], ApiBG.bt['timerange']) + min_date, max_date = ApiBG.bt["bt"].backtest_one_strategy( + strat, ApiBG.bt["data"], ApiBG.bt["timerange"] + ) - ApiBG.bt['bt'].results = generate_backtest_stats( - ApiBG.bt['data'], ApiBG.bt['bt'].all_results, - min_date=min_date, max_date=max_date) + ApiBG.bt["bt"].results = generate_backtest_stats( + ApiBG.bt["data"], ApiBG.bt["bt"].all_results, min_date=min_date, max_date=max_date + ) - if btconfig.get('export', 'none') == 'trades': - combined_res = combined_dataframes_with_rel_mean(ApiBG.bt['data'], min_date, max_date) + if btconfig.get("export", "none") == "trades": + combined_res = combined_dataframes_with_rel_mean(ApiBG.bt["data"], min_date, max_date) fn = store_backtest_stats( - btconfig['exportfilename'], - ApiBG.bt['bt'].results, + btconfig["exportfilename"], + ApiBG.bt["bt"].results, datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), - market_change_data=combined_res - ) - ApiBG.bt['bt'].results['metadata'][strategy_name]['filename'] = str(fn.stem) - ApiBG.bt['bt'].results['metadata'][strategy_name]['strategy'] = strategy_name + market_change_data=combined_res, + ) + ApiBG.bt["bt"].results["metadata"][strategy_name]["filename"] = str(fn.stem) + ApiBG.bt["bt"].results["metadata"][strategy_name]["strategy"] = strategy_name logger.info("Backtest finished.") @@ -109,38 +118,38 @@ def __run_backtest_bg(btconfig: Config): except (Exception, OperationalException, DependencyException) as e: logger.exception(f"Backtesting caused an error: {e}") - ApiBG.bt['bt_error'] = str(e) + ApiBG.bt["bt_error"] = str(e) finally: ApiBG.bgtask_running = False -@router.post('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) +@router.post("/backtest", response_model=BacktestResponse, tags=["webserver", "backtest"]) async def api_start_backtest( - bt_settings: BacktestRequest, background_tasks: BackgroundTasks, - config=Depends(get_config)): - ApiBG.bt['bt_error'] = None + bt_settings: BacktestRequest, background_tasks: BackgroundTasks, config=Depends(get_config) +): + ApiBG.bt["bt_error"] = None """Start backtesting if not done so already""" if ApiBG.bgtask_running: - raise RPCException('Bot Background task already running') + raise RPCException("Bot Background task already running") - if ':' in bt_settings.strategy: + if ":" in bt_settings.strategy: raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.") btconfig = deepcopy(config) - remove_exchange_credentials(btconfig['exchange'], True) + remove_exchange_credentials(btconfig["exchange"], True) settings = dict(bt_settings) - if settings.get('freqai', None) is not None: - settings['freqai'] = dict(settings['freqai']) + if settings.get("freqai", None) is not None: + settings["freqai"] = dict(settings["freqai"]) # Pydantic models will contain all keys, but non-provided ones are None btconfig = deep_merge_dicts(settings, btconfig, allow_null_overrides=False) try: - btconfig['stake_amount'] = float(btconfig['stake_amount']) + btconfig["stake_amount"] = float(btconfig["stake_amount"]) except ValueError: pass # Force dry-run for backtesting - btconfig['dry_run'] = True + btconfig["dry_run"] = True # Start backtesting # Initialize backtesting object @@ -157,39 +166,41 @@ async def api_start_backtest( } -@router.get('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) +@router.get("/backtest", response_model=BacktestResponse, tags=["webserver", "backtest"]) def api_get_backtest(): """ Get backtesting result. Returns Result after backtesting has been ran. """ from freqtrade.persistence import LocalTrade + if ApiBG.bgtask_running: return { "status": "running", "running": True, - "step": (ApiBG.bt['bt'].progress.action if ApiBG.bt['bt'] - else str(BacktestState.STARTUP)), - "progress": ApiBG.bt['bt'].progress.progress if ApiBG.bt['bt'] else 0, + "step": ( + ApiBG.bt["bt"].progress.action if ApiBG.bt["bt"] else str(BacktestState.STARTUP) + ), + "progress": ApiBG.bt["bt"].progress.progress if ApiBG.bt["bt"] else 0, "trade_count": len(LocalTrade.trades), "status_msg": "Backtest running", } - if not ApiBG.bt['bt']: + if not ApiBG.bt["bt"]: return { "status": "not_started", "running": False, "step": "", "progress": 0, - "status_msg": "Backtest not yet executed" + "status_msg": "Backtest not yet executed", } - if ApiBG.bt['bt_error']: + if ApiBG.bt["bt_error"]: return { "status": "error", "running": False, "step": "", "progress": 0, - "status_msg": f"Backtest failed with {ApiBG.bt['bt_error']}" + "status_msg": f"Backtest failed with {ApiBG.bt['bt_error']}", } return { @@ -198,11 +209,11 @@ def api_get_backtest(): "status_msg": "Backtest ended", "step": "finished", "progress": 1, - "backtest_result": ApiBG.bt['bt'].results, + "backtest_result": ApiBG.bt["bt"].results, } -@router.delete('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest']) +@router.delete("/backtest", response_model=BacktestResponse, tags=["webserver", "backtest"]) def api_delete_backtest(): """Reset backtesting""" if ApiBG.bgtask_running: @@ -213,12 +224,12 @@ def api_delete_backtest(): "progress": 0, "status_msg": "Backtest running", } - if ApiBG.bt['bt']: - ApiBG.bt['bt'].cleanup() - del ApiBG.bt['bt'] - ApiBG.bt['bt'] = None - del ApiBG.bt['data'] - ApiBG.bt['data'] = None + if ApiBG.bt["bt"]: + ApiBG.bt["bt"].cleanup() + del ApiBG.bt["bt"] + ApiBG.bt["bt"] = None + del ApiBG.bt["data"] + ApiBG.bt["data"] = None logger.info("Backtesting reset") return { "status": "reset", @@ -229,7 +240,7 @@ def api_delete_backtest(): } -@router.get('/backtest/abort', response_model=BacktestResponse, tags=['webserver', 'backtest']) +@router.get("/backtest/abort", response_model=BacktestResponse, tags=["webserver", "backtest"]) def api_backtest_abort(): if not ApiBG.bgtask_running: return { @@ -239,7 +250,7 @@ def api_backtest_abort(): "progress": 0, "status_msg": "Backtest ended", } - ApiBG.bt['bt'].abort = True + ApiBG.bt["bt"].abort = True return { "status": "stopping", "running": False, @@ -249,24 +260,26 @@ def api_backtest_abort(): } -@router.get('/backtest/history', response_model=List[BacktestHistoryEntry], - tags=['webserver', 'backtest']) +@router.get( + "/backtest/history", response_model=List[BacktestHistoryEntry], tags=["webserver", "backtest"] +) def api_backtest_history(config=Depends(get_config)): # Get backtest result history, read from metadata files - return get_backtest_resultlist(config['user_data_dir'] / 'backtest_results') + return get_backtest_resultlist(config["user_data_dir"] / "backtest_results") -@router.get('/backtest/history/result', response_model=BacktestResponse, - tags=['webserver', 'backtest']) +@router.get( + "/backtest/history/result", response_model=BacktestResponse, tags=["webserver", "backtest"] +) def api_backtest_history_result(filename: str, strategy: str, config=Depends(get_config)): # Get backtest result history, read from metadata files - bt_results_base: Path = config['user_data_dir'] / 'backtest_results' - fn = (bt_results_base / filename).with_suffix('.json') + bt_results_base: Path = config["user_data_dir"] / "backtest_results" + fn = (bt_results_base / filename).with_suffix(".json") results: Dict[str, Any] = { - 'metadata': {}, - 'strategy': {}, - 'strategy_comparison': [], + "metadata": {}, + "strategy": {}, + "strategy_comparison": [], } if not is_file_in_dir(fn, bt_results_base): raise HTTPException(status_code=404, detail="File not found.") @@ -281,33 +294,38 @@ def api_backtest_history_result(filename: str, strategy: str, config=Depends(get } -@router.delete('/backtest/history/{file}', response_model=List[BacktestHistoryEntry], - tags=['webserver', 'backtest']) +@router.delete( + "/backtest/history/{file}", + response_model=List[BacktestHistoryEntry], + tags=["webserver", "backtest"], +) def api_delete_backtest_history_entry(file: str, config=Depends(get_config)): # Get backtest result history, read from metadata files - bt_results_base: Path = config['user_data_dir'] / 'backtest_results' - file_abs = (bt_results_base / file).with_suffix('.json') + bt_results_base: Path = config["user_data_dir"] / "backtest_results" + file_abs = (bt_results_base / file).with_suffix(".json") # Ensure file is in backtest_results directory if not is_file_in_dir(file_abs, bt_results_base): raise HTTPException(status_code=404, detail="File not found.") delete_backtest_result(file_abs) - return get_backtest_resultlist(config['user_data_dir'] / 'backtest_results') + return get_backtest_resultlist(config["user_data_dir"] / "backtest_results") -@router.patch('/backtest/history/{file}', response_model=List[BacktestHistoryEntry], - tags=['webserver', 'backtest']) -def api_update_backtest_history_entry(file: str, body: BacktestMetadataUpdate, - config=Depends(get_config)): +@router.patch( + "/backtest/history/{file}", + response_model=List[BacktestHistoryEntry], + tags=["webserver", "backtest"], +) +def api_update_backtest_history_entry( + file: str, body: BacktestMetadataUpdate, config=Depends(get_config) +): # Get backtest result history, read from metadata files - bt_results_base: Path = config['user_data_dir'] / 'backtest_results' - file_abs = (bt_results_base / file).with_suffix('.json') + bt_results_base: Path = config["user_data_dir"] / "backtest_results" + file_abs = (bt_results_base / file).with_suffix(".json") # Ensure file is in backtest_results directory if not is_file_in_dir(file_abs, bt_results_base): raise HTTPException(status_code=404, detail="File not found.") - content = { - 'notes': body.notes - } + content = {"notes": body.notes} try: update_backtest_metadata(file_abs, body.strategy, content) except ValueError as e: @@ -316,18 +334,21 @@ def api_update_backtest_history_entry(file: str, body: BacktestMetadataUpdate, return get_backtest_result(file_abs) -@router.get('/backtest/history/{file}/market_change', response_model=BacktestMarketChange, - tags=['webserver', 'backtest']) +@router.get( + "/backtest/history/{file}/market_change", + response_model=BacktestMarketChange, + tags=["webserver", "backtest"], +) def api_get_backtest_market_change(file: str, config=Depends(get_config)): - bt_results_base: Path = config['user_data_dir'] / 'backtest_results' - file_abs = (bt_results_base / f"{file}_market_change").with_suffix('.feather') + bt_results_base: Path = config["user_data_dir"] / "backtest_results" + file_abs = (bt_results_base / f"{file}_market_change").with_suffix(".feather") # Ensure file is in backtest_results directory if not is_file_in_dir(file_abs, bt_results_base): raise HTTPException(status_code=404, detail="File not found.") df = get_backtest_market_change(file_abs) return { - 'columns': df.columns.tolist(), - 'data': df.values.tolist(), - 'length': len(df), + "columns": df.columns.tolist(), + "data": df.values.tolist(), + "length": len(df), } diff --git a/freqtrade/rpc/api_server/api_schemas.py b/freqtrade/rpc/api_server/api_schemas.py index 97f851b1d..0e36c0992 100644 --- a/freqtrade/rpc/api_server/api_schemas.py +++ b/freqtrade/rpc/api_server/api_schemas.py @@ -44,6 +44,7 @@ class BackgroundTaskStatus(BaseModel): status: str running: bool progress: Optional[float] = None + error: Optional[str] = None class BackgroundTaskResult(BaseModel): @@ -380,7 +381,7 @@ class Locks(BaseModel): class LocksPayload(BaseModel): pair: str - side: str = '*' # Default to both sides + side: str = "*" # Default to both sides until: AwareDatetime reason: Optional[str] = None @@ -489,12 +490,26 @@ class AvailablePairs(BaseModel): pair_interval: List[List[str]] +class PairCandlesRequest(BaseModel): + pair: str + timeframe: str + limit: Optional[int] = None + columns: Optional[List[str]] = None + + +class PairHistoryRequest(PairCandlesRequest): + timerange: str + strategy: str + freqaimodel: Optional[str] = None + + class PairHistory(BaseModel): strategy: str pair: str timeframe: str timeframe_ms: int columns: List[str] + all_columns: List[str] = [] data: SerializeAsAny[List[Any]] length: int buy_signals: int @@ -546,7 +561,7 @@ class BacktestHistoryEntry(BaseModel): strategy: str run_id: str backtest_start_time: int - notes: Optional[str] = '' + notes: Optional[str] = "" backtest_start_ts: Optional[int] = None backtest_end_ts: Optional[int] = None timeframe: Optional[str] = None @@ -555,7 +570,7 @@ class BacktestHistoryEntry(BaseModel): class BacktestMetadataUpdate(BaseModel): strategy: str - notes: str = '' + notes: str = "" class BacktestMarketChange(BaseModel): diff --git a/freqtrade/rpc/api_server/api_v1.py b/freqtrade/rpc/api_server/api_v1.py index 8146fe276..f3ffd3a9c 100644 --- a/freqtrade/rpc/api_server/api_v1.py +++ b/freqtrade/rpc/api_server/api_v1.py @@ -10,17 +10,45 @@ from freqtrade.data.history import get_datahandler from freqtrade.enums import CandleType, TradingMode from freqtrade.exceptions import OperationalException from freqtrade.rpc import RPC -from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload, - BlacklistResponse, Count, DailyWeeklyMonthly, - DeleteLockRequest, DeleteTrade, Entry, - ExchangeListResponse, Exit, ForceEnterPayload, - ForceEnterResponse, ForceExitPayload, - FreqAIModelListResponse, Health, Locks, - LocksPayload, Logs, MixTag, OpenTradeSchema, - PairHistory, PerformanceEntry, Ping, PlotConfig, - Profit, ResultMsg, ShowConfig, Stats, StatusMsg, - StrategyListResponse, StrategyResponse, SysInfo, - Version, WhitelistResponse) +from freqtrade.rpc.api_server.api_schemas import ( + AvailablePairs, + Balances, + BlacklistPayload, + BlacklistResponse, + Count, + DailyWeeklyMonthly, + DeleteLockRequest, + DeleteTrade, + Entry, + ExchangeListResponse, + Exit, + ForceEnterPayload, + ForceEnterResponse, + ForceExitPayload, + FreqAIModelListResponse, + Health, + Locks, + LocksPayload, + Logs, + MixTag, + OpenTradeSchema, + PairCandlesRequest, + PairHistory, + PairHistoryRequest, + PerformanceEntry, + Ping, + PlotConfig, + Profit, + ResultMsg, + ShowConfig, + Stats, + StatusMsg, + StrategyListResponse, + StrategyResponse, + SysInfo, + Version, + WhitelistResponse, +) from freqtrade.rpc.api_server.deps import get_config, get_exchange, get_rpc, get_rpc_optional from freqtrade.rpc.rpc import RPCException @@ -53,7 +81,8 @@ logger = logging.getLogger(__name__) # 2.32: new /backtest/history/ patch endpoint # 2.33: Additional weekly/monthly metrics # 2.34: new entries/exits/mix_tags endpoints -API_VERSION = 2.34 +# 2.35: pair_candles and pair_history endpoints as Post variant +API_VERSION = 2.35 # Public API, requires no auth. router_public = APIRouter() @@ -61,80 +90,84 @@ router_public = APIRouter() router = APIRouter() -@router_public.get('/ping', response_model=Ping) +@router_public.get("/ping", response_model=Ping) def ping(): """simple ping""" return {"status": "pong"} -@router.get('/version', response_model=Version, tags=['info']) +@router.get("/version", response_model=Version, tags=["info"]) def version(): - """ Bot Version info""" + """Bot Version info""" return {"version": __version__} -@router.get('/balance', response_model=Balances, tags=['info']) +@router.get("/balance", response_model=Balances, tags=["info"]) def balance(rpc: RPC = Depends(get_rpc), config=Depends(get_config)): """Account Balances""" - return rpc._rpc_balance(config['stake_currency'], config.get('fiat_display_currency', ''),) + return rpc._rpc_balance( + config["stake_currency"], + config.get("fiat_display_currency", ""), + ) -@router.get('/count', response_model=Count, tags=['info']) +@router.get("/count", response_model=Count, tags=["info"]) def count(rpc: RPC = Depends(get_rpc)): return rpc._rpc_count() -@router.get('/entries', response_model=List[Entry], tags=['info']) +@router.get("/entries", response_model=List[Entry], tags=["info"]) def entries(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)): return rpc._rpc_enter_tag_performance(pair) -@router.get('/exits', response_model=List[Exit], tags=['info']) +@router.get("/exits", response_model=List[Exit], tags=["info"]) def exits(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)): return rpc._rpc_exit_reason_performance(pair) -@router.get('/mix_tags', response_model=List[MixTag], tags=['info']) +@router.get("/mix_tags", response_model=List[MixTag], tags=["info"]) def mix_tags(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)): return rpc._rpc_mix_tag_performance(pair) -@router.get('/performance', response_model=List[PerformanceEntry], tags=['info']) +@router.get("/performance", response_model=List[PerformanceEntry], tags=["info"]) def performance(rpc: RPC = Depends(get_rpc)): return rpc._rpc_performance() -@router.get('/profit', response_model=Profit, tags=['info']) +@router.get("/profit", response_model=Profit, tags=["info"]) def profit(rpc: RPC = Depends(get_rpc), config=Depends(get_config)): - return rpc._rpc_trade_statistics(config['stake_currency'], - config.get('fiat_display_currency') - ) + return rpc._rpc_trade_statistics(config["stake_currency"], config.get("fiat_display_currency")) -@router.get('/stats', response_model=Stats, tags=['info']) +@router.get("/stats", response_model=Stats, tags=["info"]) def stats(rpc: RPC = Depends(get_rpc)): return rpc._rpc_stats() -@router.get('/daily', response_model=DailyWeeklyMonthly, tags=['info']) +@router.get("/daily", response_model=DailyWeeklyMonthly, tags=["info"]) def daily(timescale: int = 7, rpc: RPC = Depends(get_rpc), config=Depends(get_config)): - return rpc._rpc_timeunit_profit(timescale, config['stake_currency'], - config.get('fiat_display_currency', '')) + return rpc._rpc_timeunit_profit( + timescale, config["stake_currency"], config.get("fiat_display_currency", "") + ) -@router.get('/weekly', response_model=DailyWeeklyMonthly, tags=['info']) +@router.get("/weekly", response_model=DailyWeeklyMonthly, tags=["info"]) def weekly(timescale: int = 4, rpc: RPC = Depends(get_rpc), config=Depends(get_config)): - return rpc._rpc_timeunit_profit(timescale, config['stake_currency'], - config.get('fiat_display_currency', ''), 'weeks') + return rpc._rpc_timeunit_profit( + timescale, config["stake_currency"], config.get("fiat_display_currency", ""), "weeks" + ) -@router.get('/monthly', response_model=DailyWeeklyMonthly, tags=['info']) +@router.get("/monthly", response_model=DailyWeeklyMonthly, tags=["info"]) def monthly(timescale: int = 3, rpc: RPC = Depends(get_rpc), config=Depends(get_config)): - return rpc._rpc_timeunit_profit(timescale, config['stake_currency'], - config.get('fiat_display_currency', ''), 'months') + return rpc._rpc_timeunit_profit( + timescale, config["stake_currency"], config.get("fiat_display_currency", ""), "months" + ) -@router.get('/status', response_model=List[OpenTradeSchema], tags=['info']) +@router.get("/status", response_model=List[OpenTradeSchema], tags=["info"]) def status(rpc: RPC = Depends(get_rpc)): try: return rpc._rpc_trade_status() @@ -144,249 +177,305 @@ def status(rpc: RPC = Depends(get_rpc)): # Using the responsemodel here will cause a ~100% increase in response time (from 1s to 2s) # on big databases. Correct response model: response_model=TradeResponse, -@router.get('/trades', tags=['info', 'trading']) +@router.get("/trades", tags=["info", "trading"]) def trades(limit: int = 500, offset: int = 0, rpc: RPC = Depends(get_rpc)): return rpc._rpc_trade_history(limit, offset=offset, order_by_id=True) -@router.get('/trade/{tradeid}', response_model=OpenTradeSchema, tags=['info', 'trading']) +@router.get("/trade/{tradeid}", response_model=OpenTradeSchema, tags=["info", "trading"]) def trade(tradeid: int = 0, rpc: RPC = Depends(get_rpc)): try: return rpc._rpc_trade_status([tradeid])[0] except (RPCException, KeyError): - raise HTTPException(status_code=404, detail='Trade not found.') + raise HTTPException(status_code=404, detail="Trade not found.") -@router.delete('/trades/{tradeid}', response_model=DeleteTrade, tags=['info', 'trading']) +@router.delete("/trades/{tradeid}", response_model=DeleteTrade, tags=["info", "trading"]) def trades_delete(tradeid: int, rpc: RPC = Depends(get_rpc)): return rpc._rpc_delete(tradeid) -@router.delete('/trades/{tradeid}/open-order', response_model=OpenTradeSchema, tags=['trading']) +@router.delete("/trades/{tradeid}/open-order", response_model=OpenTradeSchema, tags=["trading"]) def trade_cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)): rpc._rpc_cancel_open_order(tradeid) return rpc._rpc_trade_status([tradeid])[0] -@router.post('/trades/{tradeid}/reload', response_model=OpenTradeSchema, tags=['trading']) +@router.post("/trades/{tradeid}/reload", response_model=OpenTradeSchema, tags=["trading"]) def trade_reload(tradeid: int, rpc: RPC = Depends(get_rpc)): rpc._rpc_reload_trade_from_exchange(tradeid) return rpc._rpc_trade_status([tradeid])[0] # TODO: Missing response model -@router.get('/edge', tags=['info']) +@router.get("/edge", tags=["info"]) def edge(rpc: RPC = Depends(get_rpc)): return rpc._rpc_edge() -@router.get('/show_config', response_model=ShowConfig, tags=['info']) +@router.get("/show_config", response_model=ShowConfig, tags=["info"]) def show_config(rpc: Optional[RPC] = Depends(get_rpc_optional), config=Depends(get_config)): - state = '' + state = "" strategy_version = None if rpc: state = rpc._freqtrade.state strategy_version = rpc._freqtrade.strategy.version() resp = RPC._rpc_show_config(config, state, strategy_version) - resp['api_version'] = API_VERSION + resp["api_version"] = API_VERSION return resp # /forcebuy is deprecated with short addition. use /forceentry instead -@router.post('/forceenter', response_model=ForceEnterResponse, tags=['trading']) -@router.post('/forcebuy', response_model=ForceEnterResponse, tags=['trading']) +@router.post("/forceenter", response_model=ForceEnterResponse, tags=["trading"]) +@router.post("/forcebuy", response_model=ForceEnterResponse, tags=["trading"]) def force_entry(payload: ForceEnterPayload, rpc: RPC = Depends(get_rpc)): ordertype = payload.ordertype.value if payload.ordertype else None - trade = rpc._rpc_force_entry(payload.pair, payload.price, order_side=payload.side, - order_type=ordertype, stake_amount=payload.stakeamount, - enter_tag=payload.entry_tag or 'force_entry', - leverage=payload.leverage) + trade = rpc._rpc_force_entry( + payload.pair, + payload.price, + order_side=payload.side, + order_type=ordertype, + stake_amount=payload.stakeamount, + enter_tag=payload.entry_tag or "force_entry", + leverage=payload.leverage, + ) if trade: return ForceEnterResponse.model_validate(trade.to_json()) else: return ForceEnterResponse.model_validate( - {"status": f"Error entering {payload.side} trade for pair {payload.pair}."}) + {"status": f"Error entering {payload.side} trade for pair {payload.pair}."} + ) # /forcesell is deprecated with short addition. use /forceexit instead -@router.post('/forceexit', response_model=ResultMsg, tags=['trading']) -@router.post('/forcesell', response_model=ResultMsg, tags=['trading']) +@router.post("/forceexit", response_model=ResultMsg, tags=["trading"]) +@router.post("/forcesell", response_model=ResultMsg, tags=["trading"]) def forceexit(payload: ForceExitPayload, rpc: RPC = Depends(get_rpc)): ordertype = payload.ordertype.value if payload.ordertype else None return rpc._rpc_force_exit(str(payload.tradeid), ordertype, amount=payload.amount) -@router.get('/blacklist', response_model=BlacklistResponse, tags=['info', 'pairlist']) +@router.get("/blacklist", response_model=BlacklistResponse, tags=["info", "pairlist"]) def blacklist(rpc: RPC = Depends(get_rpc)): return rpc._rpc_blacklist() -@router.post('/blacklist', response_model=BlacklistResponse, tags=['info', 'pairlist']) +@router.post("/blacklist", response_model=BlacklistResponse, tags=["info", "pairlist"]) def blacklist_post(payload: BlacklistPayload, rpc: RPC = Depends(get_rpc)): return rpc._rpc_blacklist(payload.blacklist) -@router.delete('/blacklist', response_model=BlacklistResponse, tags=['info', 'pairlist']) +@router.delete("/blacklist", response_model=BlacklistResponse, tags=["info", "pairlist"]) def blacklist_delete(pairs_to_delete: List[str] = Query([]), rpc: RPC = Depends(get_rpc)): """Provide a list of pairs to delete from the blacklist""" return rpc._rpc_blacklist_delete(pairs_to_delete) -@router.get('/whitelist', response_model=WhitelistResponse, tags=['info', 'pairlist']) +@router.get("/whitelist", response_model=WhitelistResponse, tags=["info", "pairlist"]) def whitelist(rpc: RPC = Depends(get_rpc)): return rpc._rpc_whitelist() -@router.get('/locks', response_model=Locks, tags=['info', 'locks']) +@router.get("/locks", response_model=Locks, tags=["info", "locks"]) def locks(rpc: RPC = Depends(get_rpc)): return rpc._rpc_locks() -@router.delete('/locks/{lockid}', response_model=Locks, tags=['info', 'locks']) +@router.delete("/locks/{lockid}", response_model=Locks, tags=["info", "locks"]) def delete_lock(lockid: int, rpc: RPC = Depends(get_rpc)): return rpc._rpc_delete_lock(lockid=lockid) -@router.post('/locks/delete', response_model=Locks, tags=['info', 'locks']) +@router.post("/locks/delete", response_model=Locks, tags=["info", "locks"]) def delete_lock_pair(payload: DeleteLockRequest, rpc: RPC = Depends(get_rpc)): return rpc._rpc_delete_lock(lockid=payload.lockid, pair=payload.pair) -@router.post('/locks', response_model=Locks, tags=['info', 'locks']) +@router.post("/locks", response_model=Locks, tags=["info", "locks"]) def add_locks(payload: List[LocksPayload], rpc: RPC = Depends(get_rpc)): for lock in payload: rpc._rpc_add_lock(lock.pair, lock.until, lock.reason, lock.side) return rpc._rpc_locks() -@router.get('/logs', response_model=Logs, tags=['info']) +@router.get("/logs", response_model=Logs, tags=["info"]) def logs(limit: Optional[int] = None): return RPC._rpc_get_logs(limit) -@router.post('/start', response_model=StatusMsg, tags=['botcontrol']) +@router.post("/start", response_model=StatusMsg, tags=["botcontrol"]) def start(rpc: RPC = Depends(get_rpc)): return rpc._rpc_start() -@router.post('/stop', response_model=StatusMsg, tags=['botcontrol']) +@router.post("/stop", response_model=StatusMsg, tags=["botcontrol"]) def stop(rpc: RPC = Depends(get_rpc)): return rpc._rpc_stop() -@router.post('/stopentry', response_model=StatusMsg, tags=['botcontrol']) -@router.post('/stopbuy', response_model=StatusMsg, tags=['botcontrol']) +@router.post("/stopentry", response_model=StatusMsg, tags=["botcontrol"]) +@router.post("/stopbuy", response_model=StatusMsg, tags=["botcontrol"]) def stop_buy(rpc: RPC = Depends(get_rpc)): return rpc._rpc_stopentry() -@router.post('/reload_config', response_model=StatusMsg, tags=['botcontrol']) +@router.post("/reload_config", response_model=StatusMsg, tags=["botcontrol"]) def reload_config(rpc: RPC = Depends(get_rpc)): return rpc._rpc_reload_config() -@router.get('/pair_candles', response_model=PairHistory, tags=['candle data']) +@router.get("/pair_candles", response_model=PairHistory, tags=["candle data"]) def pair_candles( - pair: str, timeframe: str, limit: Optional[int] = None, rpc: RPC = Depends(get_rpc)): - return rpc._rpc_analysed_dataframe(pair, timeframe, limit) + pair: str, timeframe: str, limit: Optional[int] = None, rpc: RPC = Depends(get_rpc) +): + return rpc._rpc_analysed_dataframe(pair, timeframe, limit, None) -@router.get('/pair_history', response_model=PairHistory, tags=['candle data']) -def pair_history(pair: str, timeframe: str, timerange: str, strategy: str, - freqaimodel: Optional[str] = None, - config=Depends(get_config), exchange=Depends(get_exchange)): +@router.post("/pair_candles", response_model=PairHistory, tags=["candle data"]) +def pair_candles_filtered(payload: PairCandlesRequest, rpc: RPC = Depends(get_rpc)): + # Advanced pair_candles endpoint with column filtering + return rpc._rpc_analysed_dataframe( + payload.pair, payload.timeframe, payload.limit, payload.columns + ) + + +@router.get("/pair_history", response_model=PairHistory, tags=["candle data"]) +def pair_history( + pair: str, + timeframe: str, + timerange: str, + strategy: str, + freqaimodel: Optional[str] = None, + config=Depends(get_config), + exchange=Depends(get_exchange), +): # The initial call to this endpoint can be slow, as it may need to initialize # the exchange class. config = deepcopy(config) - config.update({ - 'strategy': strategy, - 'timerange': timerange, - 'freqaimodel': freqaimodel if freqaimodel else config.get('freqaimodel'), - }) + config.update( + { + "strategy": strategy, + "timerange": timerange, + "freqaimodel": freqaimodel if freqaimodel else config.get("freqaimodel"), + } + ) try: - return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange) + return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange, None) except Exception as e: raise HTTPException(status_code=502, detail=str(e)) -@router.get('/plot_config', response_model=PlotConfig, tags=['candle data']) -def plot_config(strategy: Optional[str] = None, config=Depends(get_config), - rpc: Optional[RPC] = Depends(get_rpc_optional)): +@router.post("/pair_history", response_model=PairHistory, tags=["candle data"]) +def pair_history_filtered( + payload: PairHistoryRequest, config=Depends(get_config), exchange=Depends(get_exchange) +): + # The initial call to this endpoint can be slow, as it may need to initialize + # the exchange class. + config = deepcopy(config) + config.update( + { + "strategy": payload.strategy, + "timerange": payload.timerange, + "freqaimodel": ( + payload.freqaimodel if payload.freqaimodel else config.get("freqaimodel") + ), + } + ) + try: + return RPC._rpc_analysed_history_full( + config, payload.pair, payload.timeframe, exchange, payload.columns + ) + except Exception as e: + raise HTTPException(status_code=502, detail=str(e)) + + +@router.get("/plot_config", response_model=PlotConfig, tags=["candle data"]) +def plot_config( + strategy: Optional[str] = None, + config=Depends(get_config), + rpc: Optional[RPC] = Depends(get_rpc_optional), +): if not strategy: if not rpc: raise RPCException("Strategy is mandatory in webserver mode.") return PlotConfig.model_validate(rpc._rpc_plot_config()) else: config1 = deepcopy(config) - config1.update({ - 'strategy': strategy - }) + config1.update({"strategy": strategy}) try: return PlotConfig.model_validate(RPC._rpc_plot_config_with_strategy(config1)) except Exception as e: raise HTTPException(status_code=502, detail=str(e)) -@router.get('/strategies', response_model=StrategyListResponse, tags=['strategy']) +@router.get("/strategies", response_model=StrategyListResponse, tags=["strategy"]) def list_strategies(config=Depends(get_config)): from freqtrade.resolvers.strategy_resolver import StrategyResolver + strategies = StrategyResolver.search_all_objects( - config, False, config.get('recursive_strategy_search', False)) - strategies = sorted(strategies, key=lambda x: x['name']) + config, False, config.get("recursive_strategy_search", False) + ) + strategies = sorted(strategies, key=lambda x: x["name"]) - return {'strategies': [x['name'] for x in strategies]} + return {"strategies": [x["name"] for x in strategies]} -@router.get('/strategy/{strategy}', response_model=StrategyResponse, tags=['strategy']) +@router.get("/strategy/{strategy}", response_model=StrategyResponse, tags=["strategy"]) def get_strategy(strategy: str, config=Depends(get_config)): if ":" in strategy: raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.") config_ = deepcopy(config) from freqtrade.resolvers.strategy_resolver import StrategyResolver + try: - strategy_obj = StrategyResolver._load_strategy(strategy, config_, - extra_dir=config_.get('strategy_path')) + strategy_obj = StrategyResolver._load_strategy( + strategy, config_, extra_dir=config_.get("strategy_path") + ) except OperationalException: - raise HTTPException(status_code=404, detail='Strategy not found') + raise HTTPException(status_code=404, detail="Strategy not found") except Exception as e: raise HTTPException(status_code=502, detail=str(e)) return { - 'strategy': strategy_obj.get_strategy_name(), - 'code': strategy_obj.__source__, - 'timeframe': getattr(strategy_obj, 'timeframe', None), + "strategy": strategy_obj.get_strategy_name(), + "code": strategy_obj.__source__, + "timeframe": getattr(strategy_obj, "timeframe", None), } -@router.get('/exchanges', response_model=ExchangeListResponse, tags=[]) +@router.get("/exchanges", response_model=ExchangeListResponse, tags=[]) def list_exchanges(config=Depends(get_config)): from freqtrade.exchange import list_available_exchanges + exchanges = list_available_exchanges(config) return { - 'exchanges': exchanges, + "exchanges": exchanges, } -@router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai']) +@router.get("/freqaimodels", response_model=FreqAIModelListResponse, tags=["freqai"]) def list_freqaimodels(config=Depends(get_config)): from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver - models = FreqaiModelResolver.search_all_objects( - config, False) - models = sorted(models, key=lambda x: x['name']) - return {'freqaimodels': [x['name'] for x in models]} + models = FreqaiModelResolver.search_all_objects(config, False) + models = sorted(models, key=lambda x: x["name"]) + + return {"freqaimodels": [x["name"] for x in models]} -@router.get('/available_pairs', response_model=AvailablePairs, tags=['candle data']) -def list_available_pairs(timeframe: Optional[str] = None, stake_currency: Optional[str] = None, - candletype: Optional[CandleType] = None, config=Depends(get_config)): - - dh = get_datahandler(config['datadir'], config.get('dataformat_ohlcv')) - trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) - pair_interval = dh.ohlcv_get_available_data(config['datadir'], trading_mode) +@router.get("/available_pairs", response_model=AvailablePairs, tags=["candle data"]) +def list_available_pairs( + timeframe: Optional[str] = None, + stake_currency: Optional[str] = None, + candletype: Optional[CandleType] = None, + config=Depends(get_config), +): + dh = get_datahandler(config["datadir"], config.get("dataformat_ohlcv")) + trading_mode: TradingMode = config.get("trading_mode", TradingMode.SPOT) + pair_interval = dh.ohlcv_get_available_data(config["datadir"], trading_mode) if timeframe: pair_interval = [pair for pair in pair_interval if pair[1] == timeframe] @@ -403,18 +492,18 @@ def list_available_pairs(timeframe: Optional[str] = None, stake_currency: Option pairs = list({x[0] for x in pair_interval}) pairs.sort() result = { - 'length': len(pairs), - 'pairs': pairs, - 'pair_interval': pair_interval, + "length": len(pairs), + "pairs": pairs, + "pair_interval": pair_interval, } return result -@router.get('/sysinfo', response_model=SysInfo, tags=['info']) +@router.get("/sysinfo", response_model=SysInfo, tags=["info"]) def sysinfo(): return RPC._rpc_sysinfo() -@router.get('/health', response_model=Health, tags=['info']) +@router.get("/health", response_model=Health, tags=["info"]) def health(rpc: RPC = Depends(get_rpc)): return rpc.health() diff --git a/freqtrade/rpc/api_server/api_ws.py b/freqtrade/rpc/api_server/api_ws.py index 16aeb56f3..5e2eddc68 100644 --- a/freqtrade/rpc/api_server/api_ws.py +++ b/freqtrade/rpc/api_server/api_ws.py @@ -12,9 +12,13 @@ from freqtrade.rpc.api_server.api_auth import validate_ws_token from freqtrade.rpc.api_server.deps import get_message_stream, get_rpc from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel from freqtrade.rpc.api_server.ws.message_stream import MessageStream -from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSErrorMessage, - WSMessageSchema, WSRequestSchema, - WSWhitelistMessage) +from freqtrade.rpc.api_server.ws_schemas import ( + WSAnalyzedDFMessage, + WSErrorMessage, + WSMessageSchema, + WSRequestSchema, + WSWhitelistMessage, +) from freqtrade.rpc.rpc import RPC @@ -33,7 +37,7 @@ async def channel_reader(channel: WebSocketChannel, rpc: RPC): await _process_consumer_request(message, channel, rpc) except FreqtradeException: logger.exception(f"Error processing request from {channel}") - response = WSErrorMessage(data='Error processing request') + response = WSErrorMessage(data="Error processing request") await channel.send(response.dict(exclude_none=True)) @@ -43,23 +47,21 @@ async def channel_broadcaster(channel: WebSocketChannel, message_stream: Message Iterate over messages in the message stream and send them """ async for message, ts in message_stream: - if channel.subscribed_to(message.get('type')): + if channel.subscribed_to(message.get("type")): # Log a warning if this channel is behind # on the message stream by a lot if (time.time() - ts) > 60: - logger.warning(f"Channel {channel} is behind MessageStream by 1 minute," - " this can cause a memory leak if you see this message" - " often, consider reducing pair list size or amount of" - " consumers.") + logger.warning( + f"Channel {channel} is behind MessageStream by 1 minute," + " this can cause a memory leak if you see this message" + " often, consider reducing pair list size or amount of" + " consumers." + ) await channel.send(message, timeout=True) -async def _process_consumer_request( - request: Dict[str, Any], - channel: WebSocketChannel, - rpc: RPC -): +async def _process_consumer_request(request: Dict[str, Any], channel: WebSocketChannel, rpc: RPC): """ Validate and handle a request from a websocket consumer """ @@ -98,8 +100,8 @@ async def _process_consumer_request( elif type_ == RPCRequestType.ANALYZED_DF: # Limit the amount of candles per dataframe to 'limit' or 1500 - limit = int(min(data.get('limit', 1500), 1500)) if data else None - pair = data.get('pair', None) if data else None + limit = int(min(data.get("limit", 1500), 1500)) if data else None + pair = data.get("pair", None) if data else None # For every pair in the generator, send a separate message for message in rpc._ws_request_analyzed_df(limit, pair): @@ -113,11 +115,10 @@ async def message_endpoint( websocket: WebSocket, token: str = Depends(validate_ws_token), rpc: RPC = Depends(get_rpc), - message_stream: MessageStream = Depends(get_message_stream) + message_stream: MessageStream = Depends(get_message_stream), ): if token: async with create_channel(websocket) as channel: await channel.run_channel_tasks( - channel_reader(channel, rpc), - channel_broadcaster(channel, message_stream) + channel_reader(channel, rpc), channel_broadcaster(channel, message_stream) ) diff --git a/freqtrade/rpc/api_server/deps.py b/freqtrade/rpc/api_server/deps.py index c8c06695a..766673be7 100644 --- a/freqtrade/rpc/api_server/deps.py +++ b/freqtrade/rpc/api_server/deps.py @@ -20,7 +20,6 @@ def get_rpc_optional() -> Optional[RPC]: async def get_rpc() -> Optional[AsyncIterator[RPC]]: - _rpc = get_rpc_optional() if _rpc: request_id = str(uuid4()) @@ -33,7 +32,7 @@ async def get_rpc() -> Optional[AsyncIterator[RPC]]: _request_id_ctx_var.reset(ctx_token) else: - raise RPCException('Bot is not in the correct state') + raise RPCException("Bot is not in the correct state") def get_config() -> Dict[str, Any]: @@ -41,7 +40,7 @@ def get_config() -> Dict[str, Any]: def get_api_config() -> Dict[str, Any]: - return ApiServer._config['api_server'] + return ApiServer._config["api_server"] def _generate_exchange_key(config: Config) -> str: @@ -55,8 +54,8 @@ def get_exchange(config=Depends(get_config)): exchange_key = _generate_exchange_key(config) if not (exchange := ApiBG.exchanges.get(exchange_key)): from freqtrade.resolvers import ExchangeResolver - exchange = ExchangeResolver.load_exchange( - config, validate=False, load_leverage_tiers=False) + + exchange = ExchangeResolver.load_exchange(config, validate=False, load_leverage_tiers=False) ApiBG.exchanges[exchange_key] = exchange return exchange @@ -66,7 +65,6 @@ def get_message_stream(): def is_webserver_mode(config=Depends(get_config)): - if config['runmode'] != RunMode.WEBSERVER: - raise HTTPException(status_code=503, - detail='Bot is not in the correct state.') + if config["runmode"] != RunMode.WEBSERVER: + raise HTTPException(status_code=503, detail="Bot is not in the correct state.") return None diff --git a/freqtrade/rpc/api_server/uvicorn_threaded.py b/freqtrade/rpc/api_server/uvicorn_threaded.py index 48786bec2..cad8251db 100644 --- a/freqtrade/rpc/api_server/uvicorn_threaded.py +++ b/freqtrade/rpc/api_server/uvicorn_threaded.py @@ -14,6 +14,7 @@ def asyncio_setup() -> None: # pragma: no cover if sys.version_info >= (3, 8) and sys.platform == "win32": import asyncio import selectors + selector = selectors.SelectSelector() loop = asyncio.SelectorEventLoop(selector) asyncio.set_event_loop(loop) @@ -42,7 +43,6 @@ class UvicornServer(uvicorn.Server): try: import uvloop # noqa except ImportError: # pragma: no cover - asyncio_setup() else: asyncio.set_event_loop(uvloop.new_event_loop()) @@ -55,7 +55,7 @@ class UvicornServer(uvicorn.Server): @contextlib.contextmanager def run_in_thread(self): - self.thread = threading.Thread(target=self.run, name='FTUvicorn') + self.thread = threading.Thread(target=self.run, name="FTUvicorn") self.thread.start() while not self.started: time.sleep(1e-3) diff --git a/freqtrade/rpc/api_server/web_ui.py b/freqtrade/rpc/api_server/web_ui.py index b701b4901..6d37ec308 100644 --- a/freqtrade/rpc/api_server/web_ui.py +++ b/freqtrade/rpc/api_server/web_ui.py @@ -9,20 +9,21 @@ from starlette.responses import FileResponse router_ui = APIRouter() -@router_ui.get('/favicon.ico', include_in_schema=False) +@router_ui.get("/favicon.ico", include_in_schema=False) async def favicon(): - return FileResponse(str(Path(__file__).parent / 'ui/favicon.ico')) + return FileResponse(str(Path(__file__).parent / "ui/favicon.ico")) -@router_ui.get('/fallback_file.html', include_in_schema=False) +@router_ui.get("/fallback_file.html", include_in_schema=False) async def fallback(): - return FileResponse(str(Path(__file__).parent / 'ui/fallback_file.html')) + return FileResponse(str(Path(__file__).parent / "ui/fallback_file.html")) -@router_ui.get('/ui_version', include_in_schema=False) +@router_ui.get("/ui_version", include_in_schema=False) async def ui_version(): from freqtrade.commands.deploy_commands import read_ui_version - uibase = Path(__file__).parent / 'ui/installed/' + + uibase = Path(__file__).parent / "ui/installed/" version = read_ui_version(uibase) return { @@ -40,26 +41,26 @@ def is_relative_to(path: Path, base: Path) -> bool: return False -@router_ui.get('/{rest_of_path:path}', include_in_schema=False) +@router_ui.get("/{rest_of_path:path}", include_in_schema=False) async def index_html(rest_of_path: str): """ Emulate path fallback to index.html. """ - if rest_of_path.startswith('api') or rest_of_path.startswith('.'): + if rest_of_path.startswith("api") or rest_of_path.startswith("."): raise HTTPException(status_code=404, detail="Not Found") - uibase = Path(__file__).parent / 'ui/installed/' + uibase = Path(__file__).parent / "ui/installed/" filename = uibase / rest_of_path # It's security relevant to check "relative_to". # Without this, Directory-traversal is possible. media_type: Optional[str] = None - if filename.suffix == '.js': + if filename.suffix == ".js": # Force text/javascript for .js files - Circumvent faulty system configuration - media_type = 'application/javascript' + media_type = "application/javascript" if filename.is_file() and is_relative_to(filename, uibase): return FileResponse(str(filename), media_type=media_type) - index_file = uibase / 'index.html' + index_file = uibase / "index.html" if not index_file.is_file(): - return FileResponse(str(uibase.parent / 'fallback_file.html')) + return FileResponse(str(uibase.parent / "fallback_file.html")) # Fall back to index.html, as indicated by vue router docs return FileResponse(str(index_file)) diff --git a/freqtrade/rpc/api_server/webserver.py b/freqtrade/rpc/api_server/webserver.py index c28d6da6c..57f321739 100644 --- a/freqtrade/rpc/api_server/webserver.py +++ b/freqtrade/rpc/api_server/webserver.py @@ -1,5 +1,5 @@ import logging -from ipaddress import IPv4Address +from ipaddress import ip_address from typing import Any, Optional import orjson @@ -32,7 +32,6 @@ class FTJSONResponse(JSONResponse): class ApiServer(RPCHandler): - __instance = None __initialized = False @@ -61,13 +60,14 @@ class ApiServer(RPCHandler): ApiServer.__initialized = True - api_config = self._config['api_server'] + api_config = self._config["api_server"] - self.app = FastAPI(title="Freqtrade API", - docs_url='/docs' if api_config.get('enable_openapi', False) else None, - redoc_url=None, - default_response_class=FTJSONResponse, - ) + self.app = FastAPI( + title="Freqtrade API", + docs_url="/docs" if api_config.get("enable_openapi", False) else None, + redoc_url=None, + default_response_class=FTJSONResponse, + ) self.configure_app(self.app, self._config) self.start_api() @@ -80,10 +80,10 @@ class ApiServer(RPCHandler): ApiServer._has_rpc = True else: # This should not happen assuming we didn't mess up. - raise OperationalException('RPC Handler already attached.') + raise OperationalException("RPC Handler already attached.") def cleanup(self) -> None: - """ Cleanup pending module resources """ + """Cleanup pending module resources""" ApiServer._has_rpc = False del ApiServer._rpc if self._server and not self._standalone: @@ -109,8 +109,7 @@ class ApiServer(RPCHandler): def handle_rpc_exception(self, request, exc): logger.error(f"API Error calling: {exc}") return JSONResponse( - status_code=502, - content={'error': f"Error querying {request.url.path}: {exc.message}"} + status_code=502, content={"error": f"Error querying {request.url.path}: {exc.message}"} ) def configure_app(self, app: FastAPI, config): @@ -125,39 +124,37 @@ class ApiServer(RPCHandler): app.include_router(api_v1_public, prefix="/api/v1") - app.include_router(api_v1, prefix="/api/v1", - dependencies=[Depends(http_basic_or_jwt_token)], - ) - app.include_router(api_backtest, prefix="/api/v1", - dependencies=[Depends(http_basic_or_jwt_token), - Depends(is_webserver_mode)], - ) - app.include_router(api_bg_tasks, prefix="/api/v1", - dependencies=[Depends(http_basic_or_jwt_token), - Depends(is_webserver_mode)], - ) - app.include_router(ws_router, prefix="/api/v1") app.include_router(router_login, prefix="/api/v1", tags=["auth"]) + app.include_router( + api_v1, + prefix="/api/v1", + dependencies=[Depends(http_basic_or_jwt_token)], + ) + app.include_router( + api_backtest, + prefix="/api/v1", + dependencies=[Depends(http_basic_or_jwt_token), Depends(is_webserver_mode)], + ) + app.include_router( + api_bg_tasks, + prefix="/api/v1", + dependencies=[Depends(http_basic_or_jwt_token), Depends(is_webserver_mode)], + ) + app.include_router(ws_router, prefix="/api/v1") # UI Router MUST be last! - app.include_router(router_ui, prefix='') + app.include_router(router_ui, prefix="") app.add_middleware( CORSMiddleware, - allow_origins=config['api_server'].get('CORS_origins', []), + allow_origins=config["api_server"].get("CORS_origins", []), allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.add_exception_handler(RPCException, self.handle_rpc_exception) - app.add_event_handler( - event_type="startup", - func=self._api_startup_event - ) - app.add_event_handler( - event_type="shutdown", - func=self._api_shutdown_event - ) + app.add_event_handler(event_type="startup", func=self._api_startup_event) + app.add_event_handler(event_type="shutdown", func=self._api_shutdown_event) async def _api_startup_event(self): """ @@ -179,35 +176,43 @@ class ApiServer(RPCHandler): """ Start API ... should be run in thread. """ - rest_ip = self._config['api_server']['listen_ip_address'] - rest_port = self._config['api_server']['listen_port'] + rest_ip = self._config["api_server"]["listen_ip_address"] + rest_port = self._config["api_server"]["listen_port"] - logger.info(f'Starting HTTP Server at {rest_ip}:{rest_port}') - if not IPv4Address(rest_ip).is_loopback and not running_in_docker(): + logger.info(f"Starting HTTP Server at {rest_ip}:{rest_port}") + if not ip_address(rest_ip).is_loopback and not running_in_docker(): logger.warning("SECURITY WARNING - Local Rest Server listening to external connections") - logger.warning("SECURITY WARNING - This is insecure please set to your loopback," - "e.g 127.0.0.1 in config.json") + logger.warning( + "SECURITY WARNING - This is insecure please set to your loopback," + "e.g 127.0.0.1 in config.json" + ) - if not self._config['api_server'].get('password'): - logger.warning("SECURITY WARNING - No password for local REST Server defined. " - "Please make sure that this is intentional!") + if not self._config["api_server"].get("password"): + logger.warning( + "SECURITY WARNING - No password for local REST Server defined. " + "Please make sure that this is intentional!" + ) - if (self._config['api_server'].get('jwt_secret_key', 'super-secret') - in ('super-secret, somethingrandom')): - logger.warning("SECURITY WARNING - `jwt_secret_key` seems to be default." - "Others may be able to log into your bot.") + if self._config["api_server"].get("jwt_secret_key", "super-secret") in ( + "super-secret, somethingrandom" + ): + logger.warning( + "SECURITY WARNING - `jwt_secret_key` seems to be default." + "Others may be able to log into your bot." + ) - logger.info('Starting Local Rest Server.') - verbosity = self._config['api_server'].get('verbosity', 'error') + logger.info("Starting Local Rest Server.") + verbosity = self._config["api_server"].get("verbosity", "error") - uvconfig = uvicorn.Config(self.app, - port=rest_port, - host=rest_ip, - use_colors=False, - log_config=None, - access_log=True if verbosity != 'error' else False, - ws_ping_interval=None # We do this explicitly ourselves - ) + uvconfig = uvicorn.Config( + self.app, + port=rest_port, + host=rest_ip, + use_colors=False, + log_config=None, + access_log=True if verbosity != "error" else False, + ws_ping_interval=None, # We do this explicitly ourselves + ) try: self._server = UvicornServer(uvconfig) if self._standalone: diff --git a/freqtrade/rpc/api_server/webserver_bgwork.py b/freqtrade/rpc/api_server/webserver_bgwork.py index 13f45227e..d3cf4d2ea 100644 --- a/freqtrade/rpc/api_server/webserver_bgwork.py +++ b/freqtrade/rpc/api_server/webserver_bgwork.py @@ -1,4 +1,3 @@ - from typing import Any, Dict, Literal, Optional, TypedDict from uuid import uuid4 @@ -6,7 +5,7 @@ from freqtrade.exchange.exchange import Exchange class JobsContainer(TypedDict): - category: Literal['pairlist'] + category: Literal["pairlist"] is_running: bool status: str progress: Optional[float] @@ -17,11 +16,11 @@ class JobsContainer(TypedDict): class ApiBG: # Backtesting type: Backtesting bt: Dict[str, Any] = { - 'bt': None, - 'data': None, - 'timerange': None, - 'last_config': {}, - 'bt_error': None, + "bt": None, + "data": None, + "timerange": None, + "last_config": {}, + "bt_error": None, } bgtask_running: bool = False # Exchange - only available in webserver mode. diff --git a/freqtrade/rpc/api_server/ws/channel.py b/freqtrade/rpc/api_server/ws/channel.py index 01bc7d276..0041bb6b2 100644 --- a/freqtrade/rpc/api_server/ws/channel.py +++ b/freqtrade/rpc/api_server/ws/channel.py @@ -10,8 +10,10 @@ from fastapi import WebSocketDisconnect from websockets.exceptions import ConnectionClosed from freqtrade.rpc.api_server.ws.proxy import WebSocketProxy -from freqtrade.rpc.api_server.ws.serializer import (HybridJSONWebSocketSerializer, - WebSocketSerializer) +from freqtrade.rpc.api_server.ws.serializer import ( + HybridJSONWebSocketSerializer, + WebSocketSerializer, +) from freqtrade.rpc.api_server.ws.types import WebSocketType from freqtrade.rpc.api_server.ws_schemas import WSMessageSchemaType @@ -23,12 +25,13 @@ class WebSocketChannel: """ Object to help facilitate managing a websocket connection """ + def __init__( self, websocket: WebSocketType, channel_id: Optional[str] = None, serializer_cls: Type[WebSocketSerializer] = HybridJSONWebSocketSerializer, - send_throttle: float = 0.01 + send_throttle: float = 0.01, ): self.channel_id = channel_id if channel_id else uuid4().hex[:8] self._websocket = WebSocketProxy(websocket) @@ -77,9 +80,7 @@ class WebSocketChannel: self._send_high_limit = min(max(self.avg_send_time * 2, 1), 3) async def send( - self, - message: Union[WSMessageSchemaType, Dict[str, Any]], - timeout: bool = False + self, message: Union[WSMessageSchemaType, Dict[str, Any]], timeout: bool = False ): """ Send a message on the wrapped websocket. If the sending @@ -95,8 +96,7 @@ class WebSocketChannel: # a TimeoutError and bubble up to the # message_endpoint to close the connection await asyncio.wait_for( - self._wrapped_ws.send(message), - timeout=self._send_high_limit if timeout else None + self._wrapped_ws.send(message), timeout=self._send_high_limit if timeout else None ) total_time = time.time() - _ self._send_times.append(total_time) @@ -205,7 +205,7 @@ class WebSocketChannel: asyncio.TimeoutError, WebSocketDisconnect, ConnectionClosed, - RuntimeError + RuntimeError, ): pass except Exception as e: @@ -225,10 +225,7 @@ class WebSocketChannel: @asynccontextmanager -async def create_channel( - websocket: WebSocketType, - **kwargs -) -> AsyncIterator[WebSocketChannel]: +async def create_channel(websocket: WebSocketType, **kwargs) -> AsyncIterator[WebSocketChannel]: """ Context manager for safely opening and closing a WebSocketChannel """ diff --git a/freqtrade/rpc/api_server/ws/message_stream.py b/freqtrade/rpc/api_server/ws/message_stream.py index a55a0da3c..f33bd7aef 100644 --- a/freqtrade/rpc/api_server/ws/message_stream.py +++ b/freqtrade/rpc/api_server/ws/message_stream.py @@ -7,6 +7,7 @@ class MessageStream: A message stream for consumers to subscribe to, and for producers to publish to. """ + def __init__(self): self._loop = asyncio.get_running_loop() self._waiter = self._loop.create_future() diff --git a/freqtrade/rpc/api_server/ws/serializer.py b/freqtrade/rpc/api_server/ws/serializer.py index 9a894e1bf..c07c6295f 100644 --- a/freqtrade/rpc/api_server/ws/serializer.py +++ b/freqtrade/rpc/api_server/ws/serializer.py @@ -46,15 +46,12 @@ class HybridJSONWebSocketSerializer(WebSocketSerializer): # Support serializing pandas DataFrames def _json_default(z): if isinstance(z, DataFrame): - return { - '__type__': 'dataframe', - '__value__': dataframe_to_json(z) - } + return {"__type__": "dataframe", "__value__": dataframe_to_json(z)} raise TypeError # Support deserializing JSON to pandas DataFrames def _json_object_hook(z): - if z.get('__type__') == 'dataframe': - return json_to_dataframe(z.get('__value__')) + if z.get("__type__") == "dataframe": + return json_to_dataframe(z.get("__value__")) return z diff --git a/freqtrade/rpc/api_server/ws_schemas.py b/freqtrade/rpc/api_server/ws_schemas.py index 970ea8cf8..70b12af8d 100644 --- a/freqtrade/rpc/api_server/ws_schemas.py +++ b/freqtrade/rpc/api_server/ws_schemas.py @@ -26,7 +26,7 @@ class WSMessageSchemaType(TypedDict): class WSMessageSchema(BaseArbitraryModel): type: RPCMessageType data: Optional[Any] = None - model_config = ConfigDict(extra='allow') + model_config = ConfigDict(extra="allow") # ------------------------------ REQUEST SCHEMAS ---------------------------- @@ -49,6 +49,7 @@ class WSAnalyzedDFRequest(WSRequestSchema): # ------------------------------ MESSAGE SCHEMAS ---------------------------- + class WSWhitelistMessage(WSMessageSchema): type: RPCMessageType = RPCMessageType.WHITELIST data: List[str] @@ -68,4 +69,5 @@ class WSErrorMessage(WSMessageSchema): type: RPCMessageType = RPCMessageType.EXCEPTION data: str + # -------------------------------------------------------------------------- diff --git a/freqtrade/rpc/discord.py b/freqtrade/rpc/discord.py index 43190e395..03f5fb2f8 100644 --- a/freqtrade/rpc/discord.py +++ b/freqtrade/rpc/discord.py @@ -10,18 +10,18 @@ logger = logging.getLogger(__name__) class Discord(Webhook): - def __init__(self, rpc: 'RPC', config: Config): + def __init__(self, rpc: "RPC", config: Config): self._config = config self.rpc = rpc - self.strategy = config.get('strategy', '') - self.timeframe = config.get('timeframe', '') - self.bot_name = config.get('bot_name', '') + self.strategy = config.get("strategy", "") + self.timeframe = config.get("timeframe", "") + self.bot_name = config.get("bot_name", "") - self._url = config['discord']['webhook_url'] - self._format = 'json' + self._url = config["discord"]["webhook_url"] + self._format = "json" self._retries = 1 self._retry_delay = 0.1 - self._timeout = self._config['discord'].get('timeout', 10) + self._timeout = self._config["discord"].get("timeout", 10) def cleanup(self) -> None: """ @@ -31,32 +31,31 @@ class Discord(Webhook): pass def send_msg(self, msg) -> None: - - if (fields := self._config['discord'].get(msg['type'].value)): + if fields := self._config["discord"].get(msg["type"].value): logger.info(f"Sending discord message: {msg}") - msg['strategy'] = self.strategy - msg['timeframe'] = self.timeframe - msg['bot_name'] = self.bot_name + msg["strategy"] = self.strategy + msg["timeframe"] = self.timeframe + msg["bot_name"] = self.bot_name color = 0x0000FF - if msg['type'] in (RPCMessageType.EXIT, RPCMessageType.EXIT_FILL): - profit_ratio = msg.get('profit_ratio') - color = (0x00FF00 if profit_ratio > 0 else 0xFF0000) - title = msg['type'].value - if 'pair' in msg: + if msg["type"] in (RPCMessageType.EXIT, RPCMessageType.EXIT_FILL): + profit_ratio = msg.get("profit_ratio") + color = 0x00FF00 if profit_ratio > 0 else 0xFF0000 + title = msg["type"].value + if "pair" in msg: title = f"Trade: {msg['pair']} {msg['type'].value}" - embeds = [{ - 'title': title, - 'color': color, - 'fields': [], - - }] + embeds = [ + { + "title": title, + "color": color, + "fields": [], + } + ] for f in fields: for k, v in f.items(): v = v.format(**msg) - embeds[0]['fields'].append( - {'name': k, 'value': v, 'inline': True}) + embeds[0]["fields"].append({"name": k, "value": v, "inline": True}) # Send the message to discord channel - payload = {'embeds': embeds} + payload = {"embeds": embeds} self._send_msg(payload) diff --git a/freqtrade/rpc/external_message_consumer.py b/freqtrade/rpc/external_message_consumer.py index bb0b3139f..7d33efea6 100644 --- a/freqtrade/rpc/external_message_consumer.py +++ b/freqtrade/rpc/external_message_consumer.py @@ -4,6 +4,7 @@ ExternalMessageConsumer module Main purpose is to connect to external bot's message websocket to consume data from it """ + import asyncio import logging import socket @@ -19,10 +20,15 @@ from freqtrade.enums import RPCMessageType from freqtrade.misc import remove_entry_exit_signals from freqtrade.rpc.api_server.ws.channel import WebSocketChannel, create_channel from freqtrade.rpc.api_server.ws.message_stream import MessageStream -from freqtrade.rpc.api_server.ws_schemas import (WSAnalyzedDFMessage, WSAnalyzedDFRequest, - WSMessageSchema, WSRequestSchema, - WSSubscribeRequest, WSWhitelistMessage, - WSWhitelistRequest) +from freqtrade.rpc.api_server.ws_schemas import ( + WSAnalyzedDFMessage, + WSAnalyzedDFRequest, + WSMessageSchema, + WSRequestSchema, + WSSubscribeRequest, + WSWhitelistMessage, + WSWhitelistRequest, +) if TYPE_CHECKING: @@ -50,11 +56,7 @@ class ExternalMessageConsumer: other freqtrade bot's """ - def __init__( - self, - config: Dict[str, Any], - dataprovider: DataProvider - ): + def __init__(self, config: Dict[str, Any], dataprovider: DataProvider): self._config = config self._dp = dataprovider @@ -64,21 +66,21 @@ class ExternalMessageConsumer: self._main_task = None self._sub_tasks = None - self._emc_config = self._config.get('external_message_consumer', {}) + self._emc_config = self._config.get("external_message_consumer", {}) - self.enabled = self._emc_config.get('enabled', False) - self.producers: List[Producer] = self._emc_config.get('producers', []) + self.enabled = self._emc_config.get("enabled", False) + self.producers: List[Producer] = self._emc_config.get("producers", []) - self.wait_timeout = self._emc_config.get('wait_timeout', 30) # in seconds - self.ping_timeout = self._emc_config.get('ping_timeout', 10) # in seconds - self.sleep_time = self._emc_config.get('sleep_time', 10) # in seconds + self.wait_timeout = self._emc_config.get("wait_timeout", 30) # in seconds + self.ping_timeout = self._emc_config.get("ping_timeout", 10) # in seconds + self.sleep_time = self._emc_config.get("sleep_time", 10) # in seconds # The amount of candles per dataframe on the initial request - self.initial_candle_limit = self._emc_config.get('initial_candle_limit', 1500) + self.initial_candle_limit = self._emc_config.get("initial_candle_limit", 1500) # Message size limit, in megabytes. Default 8mb, Use bitwise operator << 20 to convert # as the websockets client expects bytes. - self.message_size_limit = (self._emc_config.get('message_size_limit', 8) << 20) + self.message_size_limit = self._emc_config.get("message_size_limit", 8) << 20 # Setting these explicitly as they probably shouldn't be changed by a user # Unless we somehow integrate this with the strategy to allow creating @@ -89,7 +91,7 @@ class ExternalMessageConsumer: self._initial_requests: List[WSRequestSchema] = [ WSSubscribeRequest(data=self.topics), WSWhitelistRequest(), - WSAnalyzedDFRequest() + WSAnalyzedDFRequest(), ] # Specify which function to use for which RPCMessageType @@ -187,31 +189,24 @@ class ExternalMessageConsumer: """ while self._running: try: - host, port = producer['host'], producer['port'] - token = producer['ws_token'] - name = producer['name'] - scheme = 'wss' if producer.get('secure', False) else 'ws' + host, port = producer["host"], producer["port"] + token = producer["ws_token"] + name = producer["name"] + scheme = "wss" if producer.get("secure", False) else "ws" ws_url = f"{scheme}://{host}:{port}/api/v1/message/ws?token={token}" # This will raise InvalidURI if the url is bad async with websockets.connect( - ws_url, - max_size=self.message_size_limit, - ping_interval=None + ws_url, max_size=self.message_size_limit, ping_interval=None ) as ws: - async with create_channel( - ws, - channel_id=name, - send_throttle=0.5 - ) as channel: - + async with create_channel(ws, channel_id=name, send_throttle=0.5) as channel: # Create the message stream for this channel self._channel_streams[name] = MessageStream() # Run the channel tasks while connected await channel.run_channel_tasks( self._receive_messages(channel, producer, lock), - self._send_requests(channel, self._channel_streams[name]) + self._send_requests(channel, self._channel_streams[name]), ) except (websockets.exceptions.InvalidURI, ValueError) as e: @@ -222,7 +217,7 @@ class ExternalMessageConsumer: socket.gaierror, ConnectionRefusedError, websockets.exceptions.InvalidStatusCode, - websockets.exceptions.InvalidMessage + websockets.exceptions.InvalidMessage, ) as e: logger.error(f"Connection Refused - {e} retrying in {self.sleep_time}s") await asyncio.sleep(self.sleep_time) @@ -230,7 +225,7 @@ class ExternalMessageConsumer: except ( websockets.exceptions.ConnectionClosedError, - websockets.exceptions.ConnectionClosedOK + websockets.exceptions.ConnectionClosedOK, ): # Just keep trying to connect again indefinitely await asyncio.sleep(self.sleep_time) @@ -255,10 +250,7 @@ class ExternalMessageConsumer: await channel.send(request) async def _receive_messages( - self, - channel: WebSocketChannel, - producer: Producer, - lock: asyncio.Lock + self, channel: WebSocketChannel, producer: Producer, lock: asyncio.Lock ): """ Loop to handle receiving messages from a Producer @@ -269,10 +261,7 @@ class ExternalMessageConsumer: """ while self._running: try: - message = await asyncio.wait_for( - channel.recv(), - timeout=self.wait_timeout - ) + message = await asyncio.wait_for(channel.recv(), timeout=self.wait_timeout) try: async with lock: @@ -286,7 +275,7 @@ class ExternalMessageConsumer: try: # ping pong = await channel.ping() - latency = (await asyncio.wait_for(pong, timeout=self.ping_timeout) * 1000) + latency = await asyncio.wait_for(pong, timeout=self.ping_timeout) * 1000 logger.info(f"Connection to {channel} still alive, latency: {latency}ms") continue @@ -298,9 +287,7 @@ class ExternalMessageConsumer: raise def send_producer_request( - self, - producer_name: str, - request: Union[WSRequestSchema, Dict[str, Any]] + self, producer_name: str, request: Union[WSRequestSchema, Dict[str, Any]] ): """ Publish a message to the producer's message stream to be @@ -319,7 +306,7 @@ class ExternalMessageConsumer: """ Handles external messages from a Producer """ - producer_name = producer.get('name', 'default') + producer_name = producer.get("name", "default") try: producer_message = WSMessageSchema.model_validate(message) @@ -372,7 +359,7 @@ class ExternalMessageConsumer: return # If set, remove the Entry and Exit signals from the Producer - if self._emc_config.get('remove_entry_exit_signals', False): + if self._emc_config.get("remove_entry_exit_signals", False): df = remove_entry_exit_signals(df) logger.debug(f"Received {len(df)} candle(s) for {key}") @@ -383,8 +370,8 @@ class ExternalMessageConsumer: last_analyzed=la, timeframe=timeframe, candle_type=candle_type, - producer_name=producer_name - ) + producer_name=producer_name, + ) if not did_append: # We want an overlap in candles in case some data has changed @@ -392,20 +379,17 @@ class ExternalMessageConsumer: # Set to None for all candles if we missed a full df's worth of candles n_missing = n_missing if n_missing < FULL_DATAFRAME_THRESHOLD else 1500 - logger.warning(f"Holes in data or no existing df, requesting {n_missing} candles " - f"for {key} from `{producer_name}`") + logger.warning( + f"Holes in data or no existing df, requesting {n_missing} candles " + f"for {key} from `{producer_name}`" + ) self.send_producer_request( - producer_name, - WSAnalyzedDFRequest( - data={ - "limit": n_missing, - "pair": pair - } - ) + producer_name, WSAnalyzedDFRequest(data={"limit": n_missing, "pair": pair}) ) return logger.debug( f"Consumed message from `{producer_name}` " - f"of type `RPCMessageType.ANALYZED_DF` for {key}") + f"of type `RPCMessageType.ANALYZED_DF` for {key}" + ) diff --git a/freqtrade/rpc/fiat_convert.py b/freqtrade/rpc/fiat_convert.py index 2b44d0546..20f8df468 100644 --- a/freqtrade/rpc/fiat_convert.py +++ b/freqtrade/rpc/fiat_convert.py @@ -5,14 +5,14 @@ e.g BTC to USD import logging from datetime import datetime -from typing import Dict, List +from typing import Any, Dict, List from cachetools import TTLCache -from pycoingecko import CoinGeckoAPI from requests.exceptions import RequestException -from freqtrade.constants import SUPPORTED_FIAT +from freqtrade.constants import SUPPORTED_FIAT, Config from freqtrade.mixins.logging_mixin import LoggingMixin +from freqtrade.util.coin_gecko import FtCoinGeckoApi logger = logging.getLogger(__name__) @@ -21,14 +21,14 @@ logger = logging.getLogger(__name__) # Manually map symbol to ID for some common coins # with duplicate coingecko entries coingecko_mapping = { - 'eth': 'ethereum', - 'bnb': 'binancecoin', - 'sol': 'solana', - 'usdt': 'tether', - 'busd': 'binance-usd', - 'tusd': 'true-usd', - 'usdc': 'usd-coin', - 'btc': 'bitcoin' + "eth": "ethereum", + "bnb": "binancecoin", + "sol": "solana", + "usdt": "tether", + "busd": "binance-usd", + "tusd": "true-usd", + "usdc": "usd-coin", + "btc": "bitcoin", } @@ -38,29 +38,30 @@ class CryptoToFiatConverter(LoggingMixin): This object contains a list of pair Crypto, FIAT This object is also a Singleton """ + __instance = None - _coingecko: CoinGeckoAPI = None + _coinlistings: List[Dict] = [] _backoff: float = 0.0 - def __new__(cls): + def __new__(cls, *args: Any, **kwargs: Any) -> Any: """ - This class is a singleton - cannot be instantiated twice. + Singleton pattern to ensure only one instance is created. """ - if CryptoToFiatConverter.__instance is None: - CryptoToFiatConverter.__instance = object.__new__(cls) - try: - # Limit retires to 1 (0 and 1) - # otherwise we risk bot impact if coingecko is down. - CryptoToFiatConverter._coingecko = CoinGeckoAPI(retries=1) - except BaseException: - CryptoToFiatConverter._coingecko = None - return CryptoToFiatConverter.__instance + if not cls.__instance: + cls.__instance = super().__new__(cls) + return cls.__instance - def __init__(self) -> None: + def __init__(self, config: Config) -> None: # Timeout: 6h self._pair_price: TTLCache = TTLCache(maxsize=500, ttl=6 * 60 * 60) + _coingecko_config = config.get("coingecko", {}) + self._coingecko = FtCoinGeckoApi( + api_key=_coingecko_config.get("api_key", ""), + is_demo=_coingecko_config.get("is_demo", True), + retries=1, + ) LoggingMixin.__init__(self, logger, 3600) self._load_cryptomap() @@ -71,7 +72,8 @@ class CryptoToFiatConverter(LoggingMixin): except RequestException as request_exception: if "429" in str(request_exception): logger.warning( - "Too many requests for CoinGecko API, backing off and trying again later.") + "Too many requests for CoinGecko API, backing off and trying again later." + ) # Set backoff timestamp to 60 seconds in the future self._backoff = datetime.now().timestamp() + 60 return @@ -80,9 +82,10 @@ class CryptoToFiatConverter(LoggingMixin): "Could not load FIAT Cryptocurrency map for the following problem: " f"{request_exception}" ) - except (Exception) as exception: + except Exception as exception: logger.error( - f"Could not load FIAT Cryptocurrency map for the following problem: {exception}") + f"Could not load FIAT Cryptocurrency map for the following problem: {exception}" + ) def _get_gecko_id(self, crypto_symbol): if not self._coinlistings: @@ -93,13 +96,13 @@ class CryptoToFiatConverter(LoggingMixin): return None else: return None - found = [x for x in self._coinlistings if x['symbol'].lower() == crypto_symbol] + found = [x for x in self._coinlistings if x["symbol"].lower() == crypto_symbol] if crypto_symbol in coingecko_mapping.keys(): - found = [x for x in self._coinlistings if x['id'] == coingecko_mapping[crypto_symbol]] + found = [x for x in self._coinlistings if x["id"] == coingecko_mapping[crypto_symbol]] if len(found) == 1: - return found[0]['id'] + return found[0]["id"] if len(found) > 0: # Wrong! @@ -130,26 +133,23 @@ class CryptoToFiatConverter(LoggingMixin): fiat_symbol = fiat_symbol.lower() inverse = False - if crypto_symbol == 'usd': + if crypto_symbol == "usd": # usd corresponds to "uniswap-state-dollar" for coingecko. # We'll therefore need to "swap" the currencies logger.info(f"reversing Rates {crypto_symbol}, {fiat_symbol}") crypto_symbol = fiat_symbol - fiat_symbol = 'usd' + fiat_symbol = "usd" inverse = True symbol = f"{crypto_symbol}/{fiat_symbol}" # Check if the fiat conversion you want is supported if not self._is_supported_fiat(fiat=fiat_symbol): - raise ValueError(f'The fiat {fiat_symbol} is not supported.') + raise ValueError(f"The fiat {fiat_symbol} is not supported.") price = self._pair_price.get(symbol, None) if not price: - price = self._find_price( - crypto_symbol=crypto_symbol, - fiat_symbol=fiat_symbol - ) + price = self._find_price(crypto_symbol=crypto_symbol, fiat_symbol=fiat_symbol) if inverse and price != 0.0: price = 1 / price self._pair_price[symbol] = price @@ -174,7 +174,7 @@ class CryptoToFiatConverter(LoggingMixin): """ # Check if the fiat conversion you want is supported if not self._is_supported_fiat(fiat=fiat_symbol): - raise ValueError(f'The fiat {fiat_symbol} is not supported.') + raise ValueError(f"The fiat {fiat_symbol} is not supported.") # No need to convert if both crypto and fiat are the same if crypto_symbol == fiat_symbol: @@ -185,16 +185,15 @@ class CryptoToFiatConverter(LoggingMixin): if not _gecko_id: # return 0 for unsupported stake currencies (fiat-convert should not break the bot) self.log_once( - f"unsupported crypto-symbol {crypto_symbol.upper()} - returning 0.0", - logger.warning) + f"unsupported crypto-symbol {crypto_symbol.upper()} - returning 0.0", logger.warning + ) return 0.0 try: return float( - self._coingecko.get_price( - ids=_gecko_id, - vs_currencies=fiat_symbol - )[_gecko_id][fiat_symbol] + self._coingecko.get_price(ids=_gecko_id, vs_currencies=fiat_symbol)[_gecko_id][ + fiat_symbol + ] ) except Exception as exception: logger.error("Error in _find_price: %s", exception) diff --git a/freqtrade/rpc/rpc.py b/freqtrade/rpc/rpc.py index 43be0fd94..4cafa12ad 100644 --- a/freqtrade/rpc/rpc.py +++ b/freqtrade/rpc/rpc.py @@ -1,6 +1,7 @@ """ This module contains class to define a RPC communications """ + import logging from abc import abstractmethod from datetime import date, datetime, timedelta, timezone @@ -16,11 +17,18 @@ from sqlalchemy import func, select from freqtrade import __version__ from freqtrade.configuration.timerange import TimeRange -from freqtrade.constants import CANCEL_REASON, Config +from freqtrade.constants import CANCEL_REASON, DEFAULT_DATAFRAME_COLUMNS, Config from freqtrade.data.history import load_data -from freqtrade.data.metrics import calculate_expectancy, calculate_max_drawdown -from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, MarketDirection, SignalDirection, - State, TradingMode) +from freqtrade.data.metrics import DrawDownResult, calculate_expectancy, calculate_max_drawdown +from freqtrade.enums import ( + CandleType, + ExitCheckTuple, + ExitType, + MarketDirection, + SignalDirection, + State, + TradingMode, +) from freqtrade.exceptions import ExchangeError, PricingError from freqtrade.exchange import timeframe_to_minutes, timeframe_to_msecs from freqtrade.exchange.types import Tickers @@ -54,14 +62,11 @@ class RPCException(Exception): return self.message def __json__(self): - return { - 'msg': self.message - } + return {"msg": self.message} class RPCHandler: - - def __init__(self, rpc: 'RPC', config: Config) -> None: + def __init__(self, rpc: "RPC", config: Config) -> None: """ Initializes RPCHandlers :param rpc: instance of RPC Helper class @@ -73,22 +78,23 @@ class RPCHandler: @property def name(self) -> str: - """ Returns the lowercase name of the implementation """ + """Returns the lowercase name of the implementation""" return self.__class__.__name__.lower() @abstractmethod def cleanup(self) -> None: - """ Cleanup pending module resources """ + """Cleanup pending module resources""" @abstractmethod def send_msg(self, msg: RPCSendMsg) -> None: - """ Sends a message to all registered rpc modules """ + """Sends a message to all registered rpc modules""" class RPC: """ RPC class can be used to have extra feature, like bot data, and access to DB data """ + # Bind _fiat_converter if needed _fiat_converter: Optional[CryptoToFiatConverter] = None @@ -100,58 +106,64 @@ class RPC: """ self._freqtrade = freqtrade self._config: Config = freqtrade.config - if self._config.get('fiat_display_currency'): - self._fiat_converter = CryptoToFiatConverter() + if self._config.get("fiat_display_currency"): + self._fiat_converter = CryptoToFiatConverter(self._config) @staticmethod - def _rpc_show_config(config, botstate: Union[State, str], - strategy_version: Optional[str] = None) -> Dict[str, Any]: + def _rpc_show_config( + config, botstate: Union[State, str], strategy_version: Optional[str] = None + ) -> Dict[str, Any]: """ Return a dict of config options. Explicitly does NOT return the full config to avoid leakage of sensitive information via rpc. """ val = { - 'version': __version__, - 'strategy_version': strategy_version, - 'dry_run': config['dry_run'], - 'trading_mode': config.get('trading_mode', 'spot'), - 'short_allowed': config.get('trading_mode', 'spot') != 'spot', - 'stake_currency': config['stake_currency'], - 'stake_currency_decimals': decimals_per_coin(config['stake_currency']), - 'stake_amount': str(config['stake_amount']), - 'available_capital': config.get('available_capital'), - 'max_open_trades': (config.get('max_open_trades', 0) - if config.get('max_open_trades', 0) != float('inf') else -1), - 'minimal_roi': config['minimal_roi'].copy() if 'minimal_roi' in config else {}, - 'stoploss': config.get('stoploss'), - 'stoploss_on_exchange': config.get('order_types', - {}).get('stoploss_on_exchange', False), - 'trailing_stop': config.get('trailing_stop'), - 'trailing_stop_positive': config.get('trailing_stop_positive'), - 'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset'), - 'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached'), - 'unfilledtimeout': config.get('unfilledtimeout'), - 'use_custom_stoploss': config.get('use_custom_stoploss'), - 'order_types': config.get('order_types'), - 'bot_name': config.get('bot_name', 'freqtrade'), - 'timeframe': config.get('timeframe'), - 'timeframe_ms': timeframe_to_msecs(config['timeframe'] - ) if 'timeframe' in config else 0, - 'timeframe_min': timeframe_to_minutes(config['timeframe'] - ) if 'timeframe' in config else 0, - 'exchange': config['exchange']['name'], - 'strategy': config['strategy'], - 'force_entry_enable': config.get('force_entry_enable', False), - 'exit_pricing': config.get('exit_pricing', {}), - 'entry_pricing': config.get('entry_pricing', {}), - 'state': str(botstate), - 'runmode': config['runmode'].value, - 'position_adjustment_enable': config.get('position_adjustment_enable', False), - 'max_entry_position_adjustment': ( - config.get('max_entry_position_adjustment', -1) - if config.get('max_entry_position_adjustment') != float('inf') - else -1) + "version": __version__, + "strategy_version": strategy_version, + "dry_run": config["dry_run"], + "trading_mode": config.get("trading_mode", "spot"), + "short_allowed": config.get("trading_mode", "spot") != "spot", + "stake_currency": config["stake_currency"], + "stake_currency_decimals": decimals_per_coin(config["stake_currency"]), + "stake_amount": str(config["stake_amount"]), + "available_capital": config.get("available_capital"), + "max_open_trades": ( + config.get("max_open_trades", 0) + if config.get("max_open_trades", 0) != float("inf") + else -1 + ), + "minimal_roi": config["minimal_roi"].copy() if "minimal_roi" in config else {}, + "stoploss": config.get("stoploss"), + "stoploss_on_exchange": config.get("order_types", {}).get( + "stoploss_on_exchange", False + ), + "trailing_stop": config.get("trailing_stop"), + "trailing_stop_positive": config.get("trailing_stop_positive"), + "trailing_stop_positive_offset": config.get("trailing_stop_positive_offset"), + "trailing_only_offset_is_reached": config.get("trailing_only_offset_is_reached"), + "unfilledtimeout": config.get("unfilledtimeout"), + "use_custom_stoploss": config.get("use_custom_stoploss"), + "order_types": config.get("order_types"), + "bot_name": config.get("bot_name", "freqtrade"), + "timeframe": config.get("timeframe"), + "timeframe_ms": timeframe_to_msecs(config["timeframe"]) if "timeframe" in config else 0, + "timeframe_min": ( + timeframe_to_minutes(config["timeframe"]) if "timeframe" in config else 0 + ), + "exchange": config["exchange"]["name"], + "strategy": config["strategy"], + "force_entry_enable": config.get("force_entry_enable", False), + "exit_pricing": config.get("exit_pricing", {}), + "entry_pricing": config.get("entry_pricing", {}), + "state": str(botstate), + "runmode": config["runmode"].value, + "position_adjustment_enable": config.get("position_adjustment_enable", False), + "max_entry_position_adjustment": ( + config.get("max_entry_position_adjustment", -1) + if config.get("max_entry_position_adjustment") != float("inf") + else -1 + ), } return val @@ -167,7 +179,7 @@ class RPC: trades = Trade.get_open_trades() if not trades: - raise RPCException('no active trade') + raise RPCException("no active trade") else: results = [] for trade in trades: @@ -177,11 +189,11 @@ class RPC: # prepare open orders details oo_details: Optional[str] = "" oo_details_lst = [ - f'({oo.order_type} {oo.side} rem={oo.safe_remaining:.8f})' + f"({oo.order_type} {oo.side} rem={oo.safe_remaining:.8f})" for oo in trade.open_orders - if oo.ft_order_side not in ['stoploss'] + if oo.ft_order_side not in ["stoploss"] ] - oo_details = ', '.join(oo_details_lst) + oo_details = ", ".join(oo_details_lst) total_profit_abs = 0.0 total_profit_ratio: Optional[float] = None @@ -189,11 +201,11 @@ class RPC: if trade.is_open: try: current_rate = self._freqtrade.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=False) + trade.pair, side="exit", is_short=trade.is_short, refresh=False + ) except (ExchangeError, PricingError): current_rate = NAN if len(trade.select_filled_orders(trade.entry_side)) > 0: - current_profit = current_profit_abs = current_profit_fiat = NAN if not isnan(current_rate): prof = trade.calculate_profit(current_rate) @@ -214,13 +226,13 @@ class RPC: if not isnan(current_profit_abs) and self._fiat_converter: current_profit_fiat = self._fiat_converter.convert_amount( current_profit_abs, - self._freqtrade.config['stake_currency'], - self._freqtrade.config['fiat_display_currency'] + self._freqtrade.config["stake_currency"], + self._freqtrade.config["fiat_display_currency"], ) total_profit_fiat = self._fiat_converter.convert_amount( total_profit_abs, - self._freqtrade.config['stake_currency'], - self._freqtrade.config['fiat_display_currency'] + self._freqtrade.config["stake_currency"], + self._freqtrade.config["fiat_display_currency"], ) # Calculate guaranteed profit (in case of trailing stop) @@ -234,32 +246,35 @@ class RPC: stoploss_current_dist_ratio = stoploss_current_dist / current_rate trade_dict = trade.to_json() - trade_dict.update(dict( - close_profit=trade.close_profit if not trade.is_open else None, - current_rate=current_rate, - profit_ratio=current_profit, - profit_pct=round(current_profit * 100, 2), - profit_abs=current_profit_abs, - profit_fiat=current_profit_fiat, - total_profit_abs=total_profit_abs, - total_profit_fiat=total_profit_fiat, - total_profit_ratio=total_profit_ratio, - stoploss_current_dist=stoploss_current_dist, - stoploss_current_dist_ratio=round(stoploss_current_dist_ratio, 8), - stoploss_current_dist_pct=round(stoploss_current_dist_ratio * 100, 2), - stoploss_entry_dist=stoploss_entry_dist, - stoploss_entry_dist_ratio=round(stoploss_entry_dist_ratio, 8), - open_orders=oo_details - )) + trade_dict.update( + dict( + close_profit=trade.close_profit if not trade.is_open else None, + current_rate=current_rate, + profit_ratio=current_profit, + profit_pct=round(current_profit * 100, 2), + profit_abs=current_profit_abs, + profit_fiat=current_profit_fiat, + total_profit_abs=total_profit_abs, + total_profit_fiat=total_profit_fiat, + total_profit_ratio=total_profit_ratio, + stoploss_current_dist=stoploss_current_dist, + stoploss_current_dist_ratio=round(stoploss_current_dist_ratio, 8), + stoploss_current_dist_pct=round(stoploss_current_dist_ratio * 100, 2), + stoploss_entry_dist=stoploss_entry_dist, + stoploss_entry_dist_ratio=round(stoploss_entry_dist_ratio, 8), + open_orders=oo_details, + ) + ) results.append(trade_dict) return results - def _rpc_status_table(self, stake_currency: str, - fiat_display_currency: str) -> Tuple[List, List, float]: + def _rpc_status_table( + self, stake_currency: str, fiat_display_currency: str + ) -> Tuple[List, List, float]: trades: List[Trade] = Trade.get_open_trades() - nonspot = self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT + nonspot = self._config.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT if not trades: - raise RPCException('no active trade') + raise RPCException("no active trade") else: trades_list = [] fiat_profit_sum = NAN @@ -267,53 +282,54 @@ class RPC: # calculate profit and send message to user try: current_rate = self._freqtrade.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=False) + trade.pair, side="exit", is_short=trade.is_short, refresh=False + ) except (PricingError, ExchangeError): current_rate = NAN trade_profit = NAN - profit_str = f'{NAN:.2%}' + profit_str = f"{NAN:.2%}" else: if trade.nr_of_successful_entries > 0: profit = trade.calculate_profit(current_rate) trade_profit = profit.profit_abs - profit_str = f'{profit.profit_ratio:.2%}' + profit_str = f"{profit.profit_ratio:.2%}" else: trade_profit = 0.0 - profit_str = f'{0.0:.2f}' - direction_str = ('S' if trade.is_short else 'L') if nonspot else '' + profit_str = f"{0.0:.2f}" + direction_str = ("S" if trade.is_short else "L") if nonspot else "" if self._fiat_converter: fiat_profit = self._fiat_converter.convert_amount( - trade_profit, - stake_currency, - fiat_display_currency + trade_profit, stake_currency, fiat_display_currency ) if not isnan(fiat_profit): profit_str += f" ({fiat_profit:.2f})" - fiat_profit_sum = fiat_profit if isnan(fiat_profit_sum) \ - else fiat_profit_sum + fiat_profit + fiat_profit_sum = ( + fiat_profit if isnan(fiat_profit_sum) else fiat_profit_sum + fiat_profit + ) else: profit_str += f" ({trade_profit:.2f})" - fiat_profit_sum = trade_profit if isnan(fiat_profit_sum) \ - else fiat_profit_sum + trade_profit + fiat_profit_sum = ( + trade_profit if isnan(fiat_profit_sum) else fiat_profit_sum + trade_profit + ) active_attempt_side_symbols = [ - '*' if (oo and oo.ft_order_side == trade.entry_side) else '**' + "*" if (oo and oo.ft_order_side == trade.entry_side) else "**" for oo in trade.open_orders ] # example: '*.**.**' trying to enter, exit and exit with 3 different orders - active_attempt_side_symbols_str = '.'.join(active_attempt_side_symbols) + active_attempt_side_symbols_str = ".".join(active_attempt_side_symbols) detail_trade = [ - f'{trade.id} {direction_str}', + f"{trade.id} {direction_str}", trade.pair + active_attempt_side_symbols_str, shorten_date(dt_humanize_delta(trade.open_date_utc)), - profit_str + profit_str, ] - if self._config.get('position_adjustment_enable', False): - max_entry_str = '' - if self._config.get('max_entry_position_adjustment', -1) > 0: + if self._config.get("position_adjustment_enable", False): + max_entry_str = "" + if self._config.get("max_entry_position_adjustment", -1) > 0: max_entry_str = f"/{self._config['max_entry_position_adjustment'] + 1}" filled_entries = trade.nr_of_successful_entries detail_trade.append(f"{filled_entries}{max_entry_str}") @@ -324,36 +340,35 @@ class RPC: else: profitcol += " (" + stake_currency + ")" - columns = [ - 'ID L/S' if nonspot else 'ID', - 'Pair', - 'Since', - profitcol] - if self._config.get('position_adjustment_enable', False): - columns.append('# Entries') + columns = ["ID L/S" if nonspot else "ID", "Pair", "Since", profitcol] + if self._config.get("position_adjustment_enable", False): + columns.append("# Entries") return trades_list, columns, fiat_profit_sum def _rpc_timeunit_profit( - self, timescale: int, - stake_currency: str, fiat_display_currency: str, - timeunit: str = 'days') -> Dict[str, Any]: + self, + timescale: int, + stake_currency: str, + fiat_display_currency: str, + timeunit: str = "days", + ) -> Dict[str, Any]: """ :param timeunit: Valid entries are 'days', 'weeks', 'months' """ start_date = datetime.now(timezone.utc).date() - if timeunit == 'weeks': + if timeunit == "weeks": # weekly start_date = start_date - timedelta(days=start_date.weekday()) # Monday - if timeunit == 'months': + if timeunit == "months": start_date = start_date.replace(day=1) def time_offset(step: int): - if timeunit == 'months': + if timeunit == "months": return relativedelta(months=step) return timedelta(**{timeunit: step}) if not (isinstance(timescale, int) and timescale > 0): - raise RPCException('timescale must be an integer greater than 0') + raise RPCException("timescale must be an integer greater than 0") profit_units: Dict[date, Dict] = {} daily_stake = self._freqtrade.wallets.get_total_stake_amount() @@ -363,61 +378,68 @@ class RPC: # Only query for necessary columns for performance reasons. trades = Trade.session.execute( select(Trade.close_profit_abs) - .filter(Trade.is_open.is_(False), - Trade.close_date >= profitday, - Trade.close_date < (profitday + time_offset(1))) + .filter( + Trade.is_open.is_(False), + Trade.close_date >= profitday, + Trade.close_date < (profitday + time_offset(1)), + ) .order_by(Trade.close_date) ).all() curdayprofit = sum( - trade.close_profit_abs for trade in trades if trade.close_profit_abs is not None) + trade.close_profit_abs for trade in trades if trade.close_profit_abs is not None + ) # Calculate this periods starting balance daily_stake = daily_stake - curdayprofit profit_units[profitday] = { - 'amount': curdayprofit, - 'daily_stake': daily_stake, - 'rel_profit': round(curdayprofit / daily_stake, 8) if daily_stake > 0 else 0, - 'trades': len(trades), + "amount": curdayprofit, + "daily_stake": daily_stake, + "rel_profit": round(curdayprofit / daily_stake, 8) if daily_stake > 0 else 0, + "trades": len(trades), } data = [ { - 'date': key, - 'abs_profit': value["amount"], - 'starting_balance': value["daily_stake"], - 'rel_profit': value["rel_profit"], - 'fiat_value': self._fiat_converter.convert_amount( - value['amount'], - stake_currency, - fiat_display_currency - ) if self._fiat_converter else 0, - 'trade_count': value["trades"], + "date": key, + "abs_profit": value["amount"], + "starting_balance": value["daily_stake"], + "rel_profit": value["rel_profit"], + "fiat_value": ( + self._fiat_converter.convert_amount( + value["amount"], stake_currency, fiat_display_currency + ) + if self._fiat_converter + else 0 + ), + "trade_count": value["trades"], } for key, value in profit_units.items() ] return { - 'stake_currency': stake_currency, - 'fiat_display_currency': fiat_display_currency, - 'data': data + "stake_currency": stake_currency, + "fiat_display_currency": fiat_display_currency, + "data": data, } def _rpc_trade_history(self, limit: int, offset: int = 0, order_by_id: bool = False) -> Dict: - """ Returns the X last trades """ + """Returns the X last trades""" order_by: Any = Trade.id if order_by_id else Trade.close_date.desc() if limit: trades = Trade.session.scalars( Trade.get_trades_query([Trade.is_open.is_(False)]) .order_by(order_by) .limit(limit) - .offset(offset)) + .offset(offset) + ) else: trades = Trade.session.scalars( - Trade.get_trades_query([Trade.is_open.is_(False)]) - .order_by(Trade.close_date.desc())) + Trade.get_trades_query([Trade.is_open.is_(False)]).order_by(Trade.close_date.desc()) + ) output = [trade.to_json() for trade in trades] total_trades = Trade.session.scalar( - select(func.count(Trade.id)).filter(Trade.is_open.is_(False))) + select(func.count(Trade.id)).filter(Trade.is_open.is_(False)) + ) return { "trades": output, @@ -430,45 +452,49 @@ class RPC: """ Generate generic stats for trades in database """ + def trade_win_loss(trade): if trade.close_profit > 0: - return 'wins' + return "wins" elif trade.close_profit < 0: - return 'losses' + return "losses" else: - return 'draws' + return "draws" + trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False) # Duration - dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []} + dur: Dict[str, List[float]] = {"wins": [], "draws": [], "losses": []} # Exit reason exit_reasons = {} for trade in trades: if trade.exit_reason not in exit_reasons: - exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0} + exit_reasons[trade.exit_reason] = {"wins": 0, "losses": 0, "draws": 0} exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1 if trade.close_date is not None and trade.open_date is not None: trade_dur = (trade.close_date - trade.open_date).total_seconds() dur[trade_win_loss(trade)].append(trade_dur) - wins_dur = sum(dur['wins']) / len(dur['wins']) if len(dur['wins']) > 0 else None - draws_dur = sum(dur['draws']) / len(dur['draws']) if len(dur['draws']) > 0 else None - losses_dur = sum(dur['losses']) / len(dur['losses']) if len(dur['losses']) > 0 else None + wins_dur = sum(dur["wins"]) / len(dur["wins"]) if len(dur["wins"]) > 0 else None + draws_dur = sum(dur["draws"]) / len(dur["draws"]) if len(dur["draws"]) > 0 else None + losses_dur = sum(dur["losses"]) / len(dur["losses"]) if len(dur["losses"]) > 0 else None - durations = {'wins': wins_dur, 'draws': draws_dur, 'losses': losses_dur} - return {'exit_reasons': exit_reasons, 'durations': durations} + durations = {"wins": wins_dur, "draws": draws_dur, "losses": losses_dur} + return {"exit_reasons": exit_reasons, "durations": durations} def _rpc_trade_statistics( - self, stake_currency: str, fiat_display_currency: str, - start_date: Optional[datetime] = None) -> Dict[str, Any]: - """ Returns cumulative profit statistics """ + self, stake_currency: str, fiat_display_currency: str, start_date: Optional[datetime] = None + ) -> Dict[str, Any]: + """Returns cumulative profit statistics""" start_date = datetime.fromtimestamp(0) if start_date is None else start_date - trade_filter = ((Trade.is_open.is_(False) & (Trade.close_date >= start_date)) | - Trade.is_open.is_(True)) - trades: Sequence[Trade] = Trade.session.scalars(Trade.get_trades_query( - trade_filter, include_orders=False).order_by(Trade.id)).all() + trade_filter = ( + Trade.is_open.is_(False) & (Trade.close_date >= start_date) + ) | Trade.is_open.is_(True) + trades: Sequence[Trade] = Trade.session.scalars( + Trade.get_trades_query(trade_filter, include_orders=False).order_by(Trade.id) + ).all() profit_all_coin = [] profit_all_ratio = [] @@ -499,19 +525,22 @@ class RPC: losing_profit += profit_abs else: # Get current rate + if len(trade.select_filled_orders(trade.entry_side)) == 0: + # Skip trades with no filled orders + continue try: current_rate = self._freqtrade.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=False) + trade.pair, side="exit", is_short=trade.is_short, refresh=False + ) except (PricingError, ExchangeError): current_rate = NAN - if isnan(current_rate): profit_ratio = NAN profit_abs = NAN else: - profit = trade.calculate_profit(trade.close_rate or current_rate) + _profit = trade.calculate_profit(trade.close_rate or current_rate) - profit_ratio = profit.profit_ratio - profit_abs = profit.total_profit + profit_ratio = _profit.profit_ratio + profit_abs = _profit.total_profit profit_all_coin.append(profit_abs) profit_all_ratio.append(profit_ratio) @@ -526,11 +555,13 @@ class RPC: profit_closed_ratio_mean = float(mean(profit_closed_ratio) if profit_closed_ratio else 0.0) profit_closed_ratio_sum = sum(profit_closed_ratio) if profit_closed_ratio else 0.0 - profit_closed_fiat = self._fiat_converter.convert_amount( - profit_closed_coin_sum, - stake_currency, - fiat_display_currency - ) if self._fiat_converter else 0 + profit_closed_fiat = ( + self._fiat_converter.convert_amount( + profit_closed_coin_sum, stake_currency, fiat_display_currency + ) + if self._fiat_converter + else 0 + ) profit_all_coin_sum = round(sum(profit_all_coin), 8) profit_all_ratio_mean = float(mean(profit_all_ratio) if profit_all_ratio else 0.0) @@ -543,104 +574,111 @@ class RPC: profit_closed_ratio_fromstart = profit_closed_coin_sum / starting_balance profit_all_ratio_fromstart = profit_all_coin_sum / starting_balance - profit_factor = winning_profit / abs(losing_profit) if losing_profit else float('inf') + profit_factor = winning_profit / abs(losing_profit) if losing_profit else float("inf") winrate = (winning_trades / closed_trade_count) if closed_trade_count > 0 else 0 - trades_df = DataFrame([{'close_date': format_date(trade.close_date), - 'close_date_dt': trade.close_date, - 'profit_abs': trade.close_profit_abs} - for trade in trades if not trade.is_open and trade.close_date]) + trades_df = DataFrame( + [ + { + "close_date": format_date(trade.close_date), + "close_date_dt": trade.close_date, + "profit_abs": trade.close_profit_abs, + } + for trade in trades + if not trade.is_open and trade.close_date + ] + ) expectancy, expectancy_ratio = calculate_expectancy(trades_df) - max_drawdown_abs = 0.0 - max_drawdown = 0.0 - drawdown_start: Optional[datetime] = None - drawdown_end: Optional[datetime] = None - dd_high_val = dd_low_val = 0.0 + drawdown = DrawDownResult() if len(trades_df) > 0: try: - (max_drawdown_abs, drawdown_start, drawdown_end, dd_high_val, dd_low_val, - max_drawdown) = calculate_max_drawdown( - trades_df, value_col='profit_abs', date_col='close_date_dt', - starting_balance=starting_balance) + drawdown = calculate_max_drawdown( + trades_df, + value_col="profit_abs", + date_col="close_date_dt", + starting_balance=starting_balance, + ) except ValueError: # ValueError if no losing trade. pass - profit_all_fiat = self._fiat_converter.convert_amount( - profit_all_coin_sum, - stake_currency, - fiat_display_currency - ) if self._fiat_converter else 0 + profit_all_fiat = ( + self._fiat_converter.convert_amount( + profit_all_coin_sum, stake_currency, fiat_display_currency + ) + if self._fiat_converter + else 0 + ) first_date = trades[0].open_date_utc if trades else None last_date = trades[-1].open_date_utc if trades else None num = float(len(durations) or 1) bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME) return { - 'profit_closed_coin': profit_closed_coin_sum, - 'profit_closed_percent_mean': round(profit_closed_ratio_mean * 100, 2), - 'profit_closed_ratio_mean': profit_closed_ratio_mean, - 'profit_closed_percent_sum': round(profit_closed_ratio_sum * 100, 2), - 'profit_closed_ratio_sum': profit_closed_ratio_sum, - 'profit_closed_ratio': profit_closed_ratio_fromstart, - 'profit_closed_percent': round(profit_closed_ratio_fromstart * 100, 2), - 'profit_closed_fiat': profit_closed_fiat, - 'profit_all_coin': profit_all_coin_sum, - 'profit_all_percent_mean': round(profit_all_ratio_mean * 100, 2), - 'profit_all_ratio_mean': profit_all_ratio_mean, - 'profit_all_percent_sum': round(profit_all_ratio_sum * 100, 2), - 'profit_all_ratio_sum': profit_all_ratio_sum, - 'profit_all_ratio': profit_all_ratio_fromstart, - 'profit_all_percent': round(profit_all_ratio_fromstart * 100, 2), - 'profit_all_fiat': profit_all_fiat, - 'trade_count': len(trades), - 'closed_trade_count': closed_trade_count, - 'first_trade_date': format_date(first_date), - 'first_trade_humanized': dt_humanize_delta(first_date) if first_date else '', - 'first_trade_timestamp': dt_ts_def(first_date, 0), - 'latest_trade_date': format_date(last_date), - 'latest_trade_humanized': dt_humanize_delta(last_date) if last_date else '', - 'latest_trade_timestamp': dt_ts_def(last_date, 0), - 'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0], - 'best_pair': best_pair[0] if best_pair else '', - 'best_rate': round(best_pair[1] * 100, 2) if best_pair else 0, # Deprecated - 'best_pair_profit_ratio': best_pair[1] if best_pair else 0, - 'winning_trades': winning_trades, - 'losing_trades': losing_trades, - 'profit_factor': profit_factor, - 'winrate': winrate, - 'expectancy': expectancy, - 'expectancy_ratio': expectancy_ratio, - 'max_drawdown': max_drawdown, - 'max_drawdown_abs': max_drawdown_abs, - 'max_drawdown_start': format_date(drawdown_start), - 'max_drawdown_start_timestamp': dt_ts_def(drawdown_start), - 'max_drawdown_end': format_date(drawdown_end), - 'max_drawdown_end_timestamp': dt_ts_def(drawdown_end), - 'drawdown_high': dd_high_val, - 'drawdown_low': dd_low_val, - 'trading_volume': trading_volume, - 'bot_start_timestamp': dt_ts_def(bot_start, 0), - 'bot_start_date': format_date(bot_start), + "profit_closed_coin": profit_closed_coin_sum, + "profit_closed_percent_mean": round(profit_closed_ratio_mean * 100, 2), + "profit_closed_ratio_mean": profit_closed_ratio_mean, + "profit_closed_percent_sum": round(profit_closed_ratio_sum * 100, 2), + "profit_closed_ratio_sum": profit_closed_ratio_sum, + "profit_closed_ratio": profit_closed_ratio_fromstart, + "profit_closed_percent": round(profit_closed_ratio_fromstart * 100, 2), + "profit_closed_fiat": profit_closed_fiat, + "profit_all_coin": profit_all_coin_sum, + "profit_all_percent_mean": round(profit_all_ratio_mean * 100, 2), + "profit_all_ratio_mean": profit_all_ratio_mean, + "profit_all_percent_sum": round(profit_all_ratio_sum * 100, 2), + "profit_all_ratio_sum": profit_all_ratio_sum, + "profit_all_ratio": profit_all_ratio_fromstart, + "profit_all_percent": round(profit_all_ratio_fromstart * 100, 2), + "profit_all_fiat": profit_all_fiat, + "trade_count": len(trades), + "closed_trade_count": closed_trade_count, + "first_trade_date": format_date(first_date), + "first_trade_humanized": dt_humanize_delta(first_date) if first_date else "", + "first_trade_timestamp": dt_ts_def(first_date, 0), + "latest_trade_date": format_date(last_date), + "latest_trade_humanized": dt_humanize_delta(last_date) if last_date else "", + "latest_trade_timestamp": dt_ts_def(last_date, 0), + "avg_duration": str(timedelta(seconds=sum(durations) / num)).split(".")[0], + "best_pair": best_pair[0] if best_pair else "", + "best_rate": round(best_pair[1] * 100, 2) if best_pair else 0, # Deprecated + "best_pair_profit_ratio": best_pair[1] if best_pair else 0, + "winning_trades": winning_trades, + "losing_trades": losing_trades, + "profit_factor": profit_factor, + "winrate": winrate, + "expectancy": expectancy, + "expectancy_ratio": expectancy_ratio, + "max_drawdown": drawdown.relative_account_drawdown, + "max_drawdown_abs": drawdown.drawdown_abs, + "max_drawdown_start": format_date(drawdown.high_date), + "max_drawdown_start_timestamp": dt_ts_def(drawdown.high_date), + "max_drawdown_end": format_date(drawdown.low_date), + "max_drawdown_end_timestamp": dt_ts_def(drawdown.low_date), + "drawdown_high": drawdown.high_value, + "drawdown_low": drawdown.low_value, + "trading_volume": trading_volume, + "bot_start_timestamp": dt_ts_def(bot_start, 0), + "bot_start_date": format_date(bot_start), } def __balance_get_est_stake( - self, coin: str, stake_currency: str, amount: float, - balance: Wallet, tickers) -> Tuple[float, float]: + self, coin: str, stake_currency: str, amount: float, balance: Wallet, tickers + ) -> Tuple[float, float]: est_stake = 0.0 est_bot_stake = 0.0 if coin == stake_currency: est_stake = balance.total - if self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT: + if self._config.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT: # in Futures, "total" includes the locked stake, and therefore all positions est_stake = balance.free est_bot_stake = amount else: pair = self._freqtrade.exchange.get_valid_pair_combination(coin, stake_currency) - rate: Optional[float] = tickers.get(pair, {}).get('last', None) + rate: Optional[float] = tickers.get(pair, {}).get("last", None) if rate: if pair.startswith(stake_currency) and not pair.endswith(stake_currency): rate = 1.0 / rate @@ -650,21 +688,26 @@ class RPC: return est_stake, est_bot_stake def _rpc_balance(self, stake_currency: str, fiat_display_currency: str) -> Dict: - """ Returns current account balance per crypto """ + """Returns current account balance per crypto""" currencies: List[Dict] = [] total = 0.0 total_bot = 0.0 try: tickers: Tickers = self._freqtrade.exchange.get_tickers(cached=True) - except (ExchangeError): - raise RPCException('Error getting current tickers.') + except ExchangeError: + raise RPCException("Error getting current tickers.") open_trades: List[Trade] = Trade.get_open_trades() open_assets: Dict[str, Trade] = {t.safe_base_currency: t for t in open_trades} self._freqtrade.wallets.update(require_update=False) starting_capital = self._freqtrade.wallets.get_starting_balance() - starting_cap_fiat = self._fiat_converter.convert_amount( - starting_capital, stake_currency, fiat_display_currency) if self._fiat_converter else 0 + starting_cap_fiat = ( + self._fiat_converter.convert_amount( + starting_capital, stake_currency, fiat_display_currency + ) + if self._fiat_converter + else 0 + ) coin: str balance: Wallet for coin, balance in self._freqtrade.wallets.get_all_balances().items(): @@ -679,7 +722,8 @@ class RPC: try: est_stake, est_stake_bot = self.__balance_get_est_stake( - coin, stake_currency, trade_amount, balance, tickers) + coin, stake_currency, trade_amount, balance, tickers + ) except ValueError: continue @@ -687,89 +731,99 @@ class RPC: if is_bot_managed: total_bot += est_stake_bot - currencies.append({ - 'currency': coin, - 'free': balance.free, - 'balance': balance.total, - 'used': balance.used, - 'bot_owned': trade_amount, - 'est_stake': est_stake or 0, - 'est_stake_bot': est_stake_bot if is_bot_managed else 0, - 'stake': stake_currency, - 'side': 'long', - 'leverage': 1, - 'position': 0, - 'is_bot_managed': is_bot_managed, - 'is_position': False, - }) + currencies.append( + { + "currency": coin, + "free": balance.free, + "balance": balance.total, + "used": balance.used, + "bot_owned": trade_amount, + "est_stake": est_stake or 0, + "est_stake_bot": est_stake_bot if is_bot_managed else 0, + "stake": stake_currency, + "side": "long", + "leverage": 1, + "position": 0, + "is_bot_managed": is_bot_managed, + "is_position": False, + } + ) symbol: str position: PositionWallet for symbol, position in self._freqtrade.wallets.get_all_positions().items(): total += position.collateral total_bot += position.collateral - currencies.append({ - 'currency': symbol, - 'free': 0, - 'balance': 0, - 'used': 0, - 'position': position.position, - 'est_stake': position.collateral, - 'est_stake_bot': position.collateral, - 'stake': stake_currency, - 'leverage': position.leverage, - 'side': position.side, - 'is_bot_managed': True, - 'is_position': True - }) + currencies.append( + { + "currency": symbol, + "free": 0, + "balance": 0, + "used": 0, + "position": position.position, + "est_stake": position.collateral, + "est_stake_bot": position.collateral, + "stake": stake_currency, + "leverage": position.leverage, + "side": position.side, + "is_bot_managed": True, + "is_position": True, + } + ) - value = self._fiat_converter.convert_amount( - total, stake_currency, fiat_display_currency) if self._fiat_converter else 0 - value_bot = self._fiat_converter.convert_amount( - total_bot, stake_currency, fiat_display_currency) if self._fiat_converter else 0 + value = ( + self._fiat_converter.convert_amount(total, stake_currency, fiat_display_currency) + if self._fiat_converter + else 0 + ) + value_bot = ( + self._fiat_converter.convert_amount(total_bot, stake_currency, fiat_display_currency) + if self._fiat_converter + else 0 + ) trade_count = len(Trade.get_trades_proxy()) starting_capital_ratio = (total_bot / starting_capital) - 1 if starting_capital else 0.0 starting_cap_fiat_ratio = (value_bot / starting_cap_fiat) - 1 if starting_cap_fiat else 0.0 return { - 'currencies': currencies, - 'total': total, - 'total_bot': total_bot, - 'symbol': fiat_display_currency, - 'value': value, - 'value_bot': value_bot, - 'stake': stake_currency, - 'starting_capital': starting_capital, - 'starting_capital_ratio': starting_capital_ratio, - 'starting_capital_pct': round(starting_capital_ratio * 100, 2), - 'starting_capital_fiat': starting_cap_fiat, - 'starting_capital_fiat_ratio': starting_cap_fiat_ratio, - 'starting_capital_fiat_pct': round(starting_cap_fiat_ratio * 100, 2), - 'trade_count': trade_count, - 'note': 'Simulated balances' if self._freqtrade.config['dry_run'] else '' + "currencies": currencies, + "total": total, + "total_bot": total_bot, + "symbol": fiat_display_currency, + "value": value, + "value_bot": value_bot, + "stake": stake_currency, + "starting_capital": starting_capital, + "starting_capital_ratio": starting_capital_ratio, + "starting_capital_pct": round(starting_capital_ratio * 100, 2), + "starting_capital_fiat": starting_cap_fiat, + "starting_capital_fiat_ratio": starting_cap_fiat_ratio, + "starting_capital_fiat_pct": round(starting_cap_fiat_ratio * 100, 2), + "trade_count": trade_count, + "note": "Simulated balances" if self._freqtrade.config["dry_run"] else "", } def _rpc_start(self) -> Dict[str, str]: - """ Handler for start """ + """Handler for start""" if self._freqtrade.state == State.RUNNING: - return {'status': 'already running'} + return {"status": "already running"} self._freqtrade.state = State.RUNNING - return {'status': 'starting trader ...'} + return {"status": "starting trader ..."} def _rpc_stop(self) -> Dict[str, str]: - """ Handler for stop """ + """Handler for stop""" if self._freqtrade.state == State.RUNNING: self._freqtrade.state = State.STOPPED - return {'status': 'stopping trader ...'} + return {"status": "stopping trader ..."} - return {'status': 'already stopped'} + return {"status": "already stopped"} def _rpc_reload_config(self) -> Dict[str, str]: - """ Handler for reload_config. """ + """Handler for reload_config.""" self._freqtrade.state = State.RELOAD_CONFIG - return {'status': 'Reloading config ...'} + return {"status": "Reloading config ..."} def _rpc_stopentry(self) -> Dict[str, str]: """ @@ -777,10 +831,10 @@ class RPC: """ if self._freqtrade.state == State.RUNNING: # Set 'max_open_trades' to 0 - self._freqtrade.config['max_open_trades'] = 0 + self._freqtrade.config["max_open_trades"] = 0 self._freqtrade.strategy.max_open_trades = 0 - return {'status': 'No more entries will occur from now. Run /reload_config to reset.'} + return {"status": "No more entries will occur from now. Run /reload_config to reset."} def _rpc_reload_trade_from_exchange(self, trade_id: int) -> Dict[str, str]: """ @@ -792,112 +846,126 @@ class RPC: raise RPCException(f"Could not find trade with id {trade_id}.") self._freqtrade.handle_onexchange_order(trade) - return {'status': 'Reloaded from orders from exchange'} + return {"status": "Reloaded from orders from exchange"} - def __exec_force_exit(self, trade: Trade, ordertype: Optional[str], - amount: Optional[float] = None) -> bool: + def __exec_force_exit( + self, trade: Trade, ordertype: Optional[str], amount: Optional[float] = None + ) -> bool: # Check if there is there are open orders trade_entry_cancelation_registry = [] for oo in trade.open_orders: - trade_entry_cancelation_res = {'order_id': oo.order_id, 'cancel_state': False} + trade_entry_cancelation_res = {"order_id": oo.order_id, "cancel_state": False} order = self._freqtrade.exchange.fetch_order(oo.order_id, trade.pair) - if order['side'] == trade.entry_side: + if order["side"] == trade.entry_side: fully_canceled = self._freqtrade.handle_cancel_enter( - trade, order, oo, CANCEL_REASON['FORCE_EXIT']) - trade_entry_cancelation_res['cancel_state'] = fully_canceled + trade, order, oo, CANCEL_REASON["FORCE_EXIT"] + ) + trade_entry_cancelation_res["cancel_state"] = fully_canceled trade_entry_cancelation_registry.append(trade_entry_cancelation_res) - if order['side'] == trade.exit_side: + if order["side"] == trade.exit_side: # Cancel order - so it is placed anew with a fresh price. - self._freqtrade.handle_cancel_exit( - trade, order, oo, CANCEL_REASON['FORCE_EXIT']) + self._freqtrade.handle_cancel_exit(trade, order, oo, CANCEL_REASON["FORCE_EXIT"]) - if all(tocr['cancel_state'] is False for tocr in trade_entry_cancelation_registry): + if all(tocr["cancel_state"] is False for tocr in trade_entry_cancelation_registry): if trade.has_open_orders: # Order cancellation failed, so we can't exit. return False # Get current rate and execute sell current_rate = self._freqtrade.exchange.get_rate( - trade.pair, side='exit', is_short=trade.is_short, refresh=True) + trade.pair, side="exit", is_short=trade.is_short, refresh=True + ) exit_check = ExitCheckTuple(exit_type=ExitType.FORCE_EXIT) order_type = ordertype or self._freqtrade.strategy.order_types.get( - "force_exit", self._freqtrade.strategy.order_types["exit"]) + "force_exit", self._freqtrade.strategy.order_types["exit"] + ) sub_amount: Optional[float] = None if amount and amount < trade.amount: # Partial exit ... min_exit_stake = self._freqtrade.exchange.get_min_pair_stake_amount( - trade.pair, current_rate, trade.stop_loss_pct) + trade.pair, current_rate, trade.stop_loss_pct + ) remaining = (trade.amount - amount) * current_rate if remaining < min_exit_stake: - raise RPCException(f'Remaining amount of {remaining} would be too small.') + raise RPCException(f"Remaining amount of {remaining} would be too small.") sub_amount = amount self._freqtrade.execute_trade_exit( - trade, current_rate, exit_check, ordertype=order_type, - sub_trade_amt=sub_amount) + trade, current_rate, exit_check, ordertype=order_type, sub_trade_amt=sub_amount + ) return True return False - def _rpc_force_exit(self, trade_id: str, ordertype: Optional[str] = None, *, - amount: Optional[float] = None) -> Dict[str, str]: + def _rpc_force_exit( + self, trade_id: str, ordertype: Optional[str] = None, *, amount: Optional[float] = None + ) -> Dict[str, str]: """ Handler for forceexit . Sells the given trade at current price """ if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') + raise RPCException("trader is not running") with self._freqtrade._exit_lock: - if trade_id == 'all': + if trade_id == "all": # Execute exit for all open orders for trade in Trade.get_open_trades(): self.__exec_force_exit(trade, ordertype) Trade.commit() self._freqtrade.wallets.update() - return {'result': 'Created exit orders for all open trades.'} + return {"result": "Created exit orders for all open trades."} # Query for trade trade = Trade.get_trades( - trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True), ] + trade_filter=[ + Trade.id == trade_id, + Trade.is_open.is_(True), + ] ).first() if not trade: - logger.warning('force_exit: Invalid argument received') - raise RPCException('invalid argument') + logger.warning("force_exit: Invalid argument received") + raise RPCException("invalid argument") result = self.__exec_force_exit(trade, ordertype, amount) Trade.commit() self._freqtrade.wallets.update() if not result: - raise RPCException('Failed to exit trade.') - return {'result': f'Created exit order for trade {trade_id}.'} + raise RPCException("Failed to exit trade.") + return {"result": f"Created exit order for trade {trade_id}."} def _force_entry_validations(self, pair: str, order_side: SignalDirection): - if not self._freqtrade.config.get('force_entry_enable', False): - raise RPCException('Force_entry not enabled.') + if not self._freqtrade.config.get("force_entry_enable", False): + raise RPCException("Force_entry not enabled.") if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') + raise RPCException("trader is not running") if order_side == SignalDirection.SHORT and self._freqtrade.trading_mode == TradingMode.SPOT: raise RPCException("Can't go short on Spot markets.") if pair not in self._freqtrade.exchange.get_markets(tradable_only=True): - raise RPCException('Symbol does not exist or market is not active.') + raise RPCException("Symbol does not exist or market is not active.") # Check if pair quote currency equals to the stake currency. - stake_currency = self._freqtrade.config.get('stake_currency') + stake_currency = self._freqtrade.config.get("stake_currency") if not self._freqtrade.exchange.get_pair_quote_currency(pair) == stake_currency: raise RPCException( - f'Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed.') + f"Wrong pair selected. Only pairs with stake-currency {stake_currency} allowed." + ) - def _rpc_force_entry(self, pair: str, price: Optional[float], *, - order_type: Optional[str] = None, - order_side: SignalDirection = SignalDirection.LONG, - stake_amount: Optional[float] = None, - enter_tag: Optional[str] = 'force_entry', - leverage: Optional[float] = None) -> Optional[Trade]: + def _rpc_force_entry( + self, + pair: str, + price: Optional[float], + *, + order_type: Optional[str] = None, + order_side: SignalDirection = SignalDirection.LONG, + stake_amount: Optional[float] = None, + enter_tag: Optional[str] = "force_entry", + leverage: Optional[float] = None, + ) -> Optional[Trade]: """ Handler for forcebuy Buys a pair trade at the given or current price @@ -908,56 +976,68 @@ class RPC: # check if pair already has an open pair trade: Optional[Trade] = Trade.get_trades( - [Trade.is_open.is_(True), Trade.pair == pair]).first() - is_short = (order_side == SignalDirection.SHORT) + [Trade.is_open.is_(True), Trade.pair == pair] + ).first() + is_short = order_side == SignalDirection.SHORT if trade: is_short = trade.is_short if not self._freqtrade.strategy.position_adjustment_enable: raise RPCException(f"position for {pair} already open - id: {trade.id}") if trade.has_open_orders: - raise RPCException(f"position for {pair} already open - id: {trade.id} " - f"and has open order {','.join(trade.open_orders_ids)}") + raise RPCException( + f"position for {pair} already open - id: {trade.id} " + f"and has open order {','.join(trade.open_orders_ids)}" + ) else: - if Trade.get_open_trade_count() >= self._config['max_open_trades']: + if Trade.get_open_trade_count() >= self._config["max_open_trades"]: raise RPCException("Maximum number of trades is reached.") if not stake_amount: # gen stake amount stake_amount = self._freqtrade.wallets.get_trade_stake_amount( - pair, self._config['max_open_trades']) + pair, self._config["max_open_trades"] + ) # execute buy if not order_type: order_type = self._freqtrade.strategy.order_types.get( - 'force_entry', self._freqtrade.strategy.order_types['entry']) + "force_entry", self._freqtrade.strategy.order_types["entry"] + ) with self._freqtrade._exit_lock: - if self._freqtrade.execute_entry(pair, stake_amount, price, - ordertype=order_type, trade=trade, - is_short=is_short, - enter_tag=enter_tag, - leverage_=leverage, - mode='pos_adjust' if trade else 'initial' - ): + if self._freqtrade.execute_entry( + pair, + stake_amount, + price, + ordertype=order_type, + trade=trade, + is_short=is_short, + enter_tag=enter_tag, + leverage_=leverage, + mode="pos_adjust" if trade else "initial", + ): Trade.commit() trade = Trade.get_trades([Trade.is_open.is_(True), Trade.pair == pair]).first() return trade else: - raise RPCException(f'Failed to enter position for {pair}.') + raise RPCException(f"Failed to enter position for {pair}.") def _rpc_cancel_open_order(self, trade_id: int): if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') + raise RPCException("trader is not running") with self._freqtrade._exit_lock: # Query for trade trade = Trade.get_trades( - trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True), ] + trade_filter=[ + Trade.id == trade_id, + Trade.is_open.is_(True), + ] ).first() if not trade: - logger.warning('cancel_open_order: Invalid trade_id received.') - raise RPCException('Invalid trade_id.') + logger.warning("cancel_open_order: Invalid trade_id received.") + raise RPCException("Invalid trade_id.") if not trade.has_open_orders: - logger.warning('cancel_open_order: No open order for trade_id.') - raise RPCException('No open order for trade_id.') + logger.warning("cancel_open_order: No open order for trade_id.") + raise RPCException("No open order for trade_id.") for open_order in trade.open_orders: try: @@ -966,7 +1046,8 @@ class RPC: logger.info(f"Cannot query order for {trade} due to {e}.", exc_info=True) raise RPCException("Order not found.") self._freqtrade.handle_cancel_order( - order, open_order, trade, CANCEL_REASON['USER_CANCEL']) + order, open_order, trade, CANCEL_REASON["USER_CANCEL"] + ) Trade.commit() def _rpc_delete(self, trade_id: int) -> Dict[str, Union[str, int]]: @@ -978,35 +1059,36 @@ class RPC: c_count = 0 trade = Trade.get_trades(trade_filter=[Trade.id == trade_id]).first() if not trade: - logger.warning('delete trade: Invalid argument received') - raise RPCException('invalid argument') + logger.warning("delete trade: Invalid argument received") + raise RPCException("invalid argument") # Try cancelling regular order if that exists for open_order in trade.open_orders: try: self._freqtrade.exchange.cancel_order(open_order.order_id, trade.pair) c_count += 1 - except (ExchangeError): + except ExchangeError: pass # cancel stoploss on exchange orders ... - if (self._freqtrade.strategy.order_types.get('stoploss_on_exchange') - and trade.has_open_sl_orders): - + if ( + self._freqtrade.strategy.order_types.get("stoploss_on_exchange") + and trade.has_open_sl_orders + ): for oslo in trade.open_sl_orders: try: self._freqtrade.exchange.cancel_stoploss_order(oslo.order_id, trade.pair) c_count += 1 - except (ExchangeError): + except ExchangeError: pass trade.delete() self._freqtrade.wallets.update() return { - 'result': 'success', - 'trade_id': trade_id, - 'result_msg': f'Deleted trade {trade_id}. Closed {c_count} open orders.', - 'cancel_order_count': c_count, + "result": "success", + "trade_id": trade_id, + "result_msg": f"Deleted trade {trade_id}. Closed {c_count} open orders.", + "cancel_order_count": c_count, } def _rpc_list_custom_data(self, trade_id: int, key: Optional[str]) -> List[Dict[str, Any]]: @@ -1024,13 +1106,13 @@ class RPC: custom_data = trade.get_all_custom_data() return [ { - 'id': data_entry.id, - 'ft_trade_id': data_entry.ft_trade_id, - 'cd_key': data_entry.cd_key, - 'cd_type': data_entry.cd_type, - 'cd_value': data_entry.cd_value, - 'created_at': data_entry.created_at, - 'updated_at': data_entry.updated_at + "id": data_entry.id, + "ft_trade_id": data_entry.ft_trade_id, + "cd_key": data_entry.cd_key, + "cd_type": data_entry.cd_type, + "cd_value": data_entry.cd_value, + "created_at": data_entry.created_at, + "updated_at": data_entry.updated_at, } for data_entry in custom_data ] @@ -1068,30 +1150,31 @@ class RPC: return mix_tags def _rpc_count(self) -> Dict[str, float]: - """ Returns the number of trades running """ + """Returns the number of trades running""" if self._freqtrade.state != State.RUNNING: - raise RPCException('trader is not running') + raise RPCException("trader is not running") trades = Trade.get_open_trades() return { - 'current': len(trades), - 'max': (int(self._freqtrade.config['max_open_trades']) - if self._freqtrade.config['max_open_trades'] != float('inf') else -1), - 'total_stake': sum((trade.open_rate * trade.amount) for trade in trades) + "current": len(trades), + "max": ( + int(self._freqtrade.config["max_open_trades"]) + if self._freqtrade.config["max_open_trades"] != float("inf") + else -1 + ), + "total_stake": sum((trade.open_rate * trade.amount) for trade in trades), } def _rpc_locks(self) -> Dict[str, Any]: - """ Returns the current locks """ + """Returns the current locks""" locks = PairLocks.get_pair_locks(None) - return { - 'lock_count': len(locks), - 'locks': [lock.to_json() for lock in locks] - } + return {"lock_count": len(locks), "locks": [lock.to_json() for lock in locks]} - def _rpc_delete_lock(self, lockid: Optional[int] = None, - pair: Optional[str] = None) -> Dict[str, Any]: - """ Delete specific lock(s) """ + def _rpc_delete_lock( + self, lockid: Optional[int] = None, pair: Optional[str] = None + ) -> Dict[str, Any]: + """Delete specific lock(s)""" locks: Sequence[PairLock] = [] if pair: @@ -1108,7 +1191,8 @@ class RPC: return self._rpc_locks() def _rpc_add_lock( - self, pair: str, until: datetime, reason: Optional[str], side: str) -> PairLock: + self, pair: str, until: datetime, reason: Optional[str], side: str + ) -> PairLock: lock = PairLocks.lock_pair( pair=pair, until=until, @@ -1118,29 +1202,28 @@ class RPC: return lock def _rpc_whitelist(self) -> Dict: - """ Returns the currently active whitelist""" - res = {'method': self._freqtrade.pairlists.name_list, - 'length': len(self._freqtrade.active_pair_whitelist), - 'whitelist': self._freqtrade.active_pair_whitelist - } + """Returns the currently active whitelist""" + res = { + "method": self._freqtrade.pairlists.name_list, + "length": len(self._freqtrade.active_pair_whitelist), + "whitelist": self._freqtrade.active_pair_whitelist, + } return res def _rpc_blacklist_delete(self, delete: List[str]) -> Dict: - """ Removes pairs from currently active blacklist """ + """Removes pairs from currently active blacklist""" errors = {} for pair in delete: if pair in self._freqtrade.pairlists.blacklist: self._freqtrade.pairlists.blacklist.remove(pair) else: - errors[pair] = { - 'error_msg': f"Pair {pair} is not in the current blacklist." - } + errors[pair] = {"error_msg": f"Pair {pair} is not in the current blacklist."} resp = self._rpc_blacklist() - resp['errors'] = errors + resp["errors"] = errors return resp def _rpc_blacklist(self, add: Optional[List[str]] = None) -> Dict: - """ Returns the currently active blacklist""" + """Returns the currently active blacklist""" errors = {} if add: for pair in add: @@ -1150,18 +1233,17 @@ class RPC: self._freqtrade.pairlists.blacklist.append(pair) except ValueError: - errors[pair] = { - 'error_msg': f'Pair {pair} is not a valid wildcard.'} + errors[pair] = {"error_msg": f"Pair {pair} is not a valid wildcard."} else: - errors[pair] = { - 'error_msg': f'Pair {pair} already in pairlist.'} + errors[pair] = {"error_msg": f"Pair {pair} already in pairlist."} - res = {'method': self._freqtrade.pairlists.name_list, - 'length': len(self._freqtrade.pairlists.blacklist), - 'blacklist': self._freqtrade.pairlists.blacklist, - 'blacklist_expanded': self._freqtrade.pairlists.expanded_blacklist, - 'errors': errors, - } + res = { + "method": self._freqtrade.pairlists.name_list, + "length": len(self._freqtrade.pairlists.blacklist), + "blacklist": self._freqtrade.pairlists.blacklist, + "blacklist_expanded": self._freqtrade.pairlists.expanded_blacklist, + "errors": errors, + } return res @staticmethod @@ -1171,47 +1253,65 @@ class RPC: buffer = bufferHandler.buffer[-limit:] else: buffer = bufferHandler.buffer - records = [[format_date(datetime.fromtimestamp(r.created)), - r.created * 1000, r.name, r.levelname, - r.message + ('\n' + r.exc_text if r.exc_text else '')] - for r in buffer] + records = [ + [ + format_date(datetime.fromtimestamp(r.created)), + r.created * 1000, + r.name, + r.levelname, + r.message + ("\n" + r.exc_text if r.exc_text else ""), + ] + for r in buffer + ] # Log format: # [logtime-formatted, logepoch, logger-name, loglevel, message \n + exception] # e.g. ["2020-08-27 11:35:01", 1598520901097.9397, # "freqtrade.worker", "INFO", "Starting worker develop"] - return {'log_count': len(records), 'logs': records} + return {"log_count": len(records), "logs": records} def _rpc_edge(self) -> List[Dict[str, Any]]: - """ Returns information related to Edge """ + """Returns information related to Edge""" if not self._freqtrade.edge: - raise RPCException('Edge is not enabled.') + raise RPCException("Edge is not enabled.") return self._freqtrade.edge.accepted_pairs() @staticmethod - def _convert_dataframe_to_dict(strategy: str, pair: str, timeframe: str, dataframe: DataFrame, - last_analyzed: datetime) -> Dict[str, Any]: + def _convert_dataframe_to_dict( + strategy: str, + pair: str, + timeframe: str, + dataframe: DataFrame, + last_analyzed: datetime, + selected_cols: Optional[List[str]], + ) -> Dict[str, Any]: has_content = len(dataframe) != 0 + dataframe_columns = list(dataframe.columns) signals = { - 'enter_long': 0, - 'exit_long': 0, - 'enter_short': 0, - 'exit_short': 0, + "enter_long": 0, + "exit_long": 0, + "enter_short": 0, + "exit_short": 0, } if has_content: + if selected_cols is not None: + # Ensure OHLCV columns are always present + cols_set = set(DEFAULT_DATAFRAME_COLUMNS + list(signals.keys()) + selected_cols) + df_cols = [col for col in dataframe_columns if col in cols_set] + dataframe = dataframe.loc[:, df_cols] - dataframe.loc[:, '__date_ts'] = dataframe.loc[:, 'date'].astype(int64) // 1000 // 1000 + dataframe.loc[:, "__date_ts"] = dataframe.loc[:, "date"].astype(int64) // 1000 // 1000 # Move signal close to separate column when signal for easy plotting for sig_type in signals.keys(): if sig_type in dataframe.columns: - mask = (dataframe[sig_type] == 1) + mask = dataframe[sig_type] == 1 signals[sig_type] = int(mask.sum()) - dataframe.loc[mask, f'_{sig_type}_signal_close'] = dataframe.loc[mask, 'close'] + dataframe.loc[mask, f"_{sig_type}_signal_close"] = dataframe.loc[mask, "close"] # band-aid until this is fixed: # https://github.com/pandas-dev/pandas/issues/45836 - datetime_types = ['datetime', 'datetime64', 'datetime64[ns, UTC]'] + datetime_types = ["datetime", "datetime64", "datetime64[ns, UTC]"] date_columns = dataframe.select_dtypes(include=datetime_types) for date_column in date_columns: # replace NaT with `None` @@ -1220,48 +1320,50 @@ class RPC: dataframe = dataframe.replace({inf: None, -inf: None, NAN: None}) res = { - 'pair': pair, - 'timeframe': timeframe, - 'timeframe_ms': timeframe_to_msecs(timeframe), - 'strategy': strategy, - 'columns': list(dataframe.columns), - 'data': dataframe.values.tolist(), - 'length': len(dataframe), - 'buy_signals': signals['enter_long'], # Deprecated - 'sell_signals': signals['exit_long'], # Deprecated - 'enter_long_signals': signals['enter_long'], - 'exit_long_signals': signals['exit_long'], - 'enter_short_signals': signals['enter_short'], - 'exit_short_signals': signals['exit_short'], - 'last_analyzed': last_analyzed, - 'last_analyzed_ts': int(last_analyzed.timestamp()), - 'data_start': '', - 'data_start_ts': 0, - 'data_stop': '', - 'data_stop_ts': 0, + "pair": pair, + "timeframe": timeframe, + "timeframe_ms": timeframe_to_msecs(timeframe), + "strategy": strategy, + "all_columns": dataframe_columns, + "columns": list(dataframe.columns), + "data": dataframe.values.tolist(), + "length": len(dataframe), + "buy_signals": signals["enter_long"], # Deprecated + "sell_signals": signals["exit_long"], # Deprecated + "enter_long_signals": signals["enter_long"], + "exit_long_signals": signals["exit_long"], + "enter_short_signals": signals["enter_short"], + "exit_short_signals": signals["exit_short"], + "last_analyzed": last_analyzed, + "last_analyzed_ts": int(last_analyzed.timestamp()), + "data_start": "", + "data_start_ts": 0, + "data_stop": "", + "data_stop_ts": 0, } if has_content: - res.update({ - 'data_start': str(dataframe.iloc[0]['date']), - 'data_start_ts': int(dataframe.iloc[0]['__date_ts']), - 'data_stop': str(dataframe.iloc[-1]['date']), - 'data_stop_ts': int(dataframe.iloc[-1]['__date_ts']), - }) + res.update( + { + "data_start": str(dataframe.iloc[0]["date"]), + "data_start_ts": int(dataframe.iloc[0]["__date_ts"]), + "data_stop": str(dataframe.iloc[-1]["date"]), + "data_stop_ts": int(dataframe.iloc[-1]["__date_ts"]), + } + ) return res - def _rpc_analysed_dataframe(self, pair: str, timeframe: str, - limit: Optional[int]) -> Dict[str, Any]: - """ Analyzed dataframe in Dict form """ + def _rpc_analysed_dataframe( + self, pair: str, timeframe: str, limit: Optional[int], selected_cols: Optional[List[str]] + ) -> Dict[str, Any]: + """Analyzed dataframe in Dict form""" _data, last_analyzed = self.__rpc_analysed_dataframe_raw(pair, timeframe, limit) - return RPC._convert_dataframe_to_dict(self._freqtrade.config['strategy'], - pair, timeframe, _data, last_analyzed) + return RPC._convert_dataframe_to_dict( + self._freqtrade.config["strategy"], pair, timeframe, _data, last_analyzed, selected_cols + ) def __rpc_analysed_dataframe_raw( - self, - pair: str, - timeframe: str, - limit: Optional[int] + self, pair: str, timeframe: str, limit: Optional[int] ) -> Tuple[DataFrame, datetime]: """ Get the dataframe and last analyze from the dataprovider @@ -1270,8 +1372,7 @@ class RPC: :param timeframe: The timeframe of data to get :param limit: The amount of candles in the dataframe """ - _data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe( - pair, timeframe) + _data, last_analyzed = self._freqtrade.dataprovider.get_analyzed_dataframe(pair, timeframe) _data = _data.copy() if limit: @@ -1280,9 +1381,7 @@ class RPC: return _data, last_analyzed def _ws_all_analysed_dataframes( - self, - pairlist: List[str], - limit: Optional[int] + self, pairlist: List[str], limit: Optional[int] ) -> Generator[Dict[str, Any], None, None]: """ Get the analysed dataframes of each pair in the pairlist. @@ -1294,36 +1393,29 @@ class RPC: If a list of string date times, only returns those candles :returns: A generator of dictionaries with the key, dataframe, and last analyzed timestamp """ - timeframe = self._freqtrade.config['timeframe'] - candle_type = self._freqtrade.config.get('candle_type_def', CandleType.SPOT) + timeframe = self._freqtrade.config["timeframe"] + candle_type = self._freqtrade.config.get("candle_type_def", CandleType.SPOT) for pair in pairlist: dataframe, last_analyzed = self.__rpc_analysed_dataframe_raw(pair, timeframe, limit) - yield { - "key": (pair, timeframe, candle_type), - "df": dataframe, - "la": last_analyzed - } + yield {"key": (pair, timeframe, candle_type), "df": dataframe, "la": last_analyzed} - def _ws_request_analyzed_df( - self, - limit: Optional[int] = None, - pair: Optional[str] = None - ): - """ Historical Analyzed Dataframes for WebSocket """ + def _ws_request_analyzed_df(self, limit: Optional[int] = None, pair: Optional[str] = None): + """Historical Analyzed Dataframes for WebSocket""" pairlist = [pair] if pair else self._freqtrade.active_pair_whitelist return self._ws_all_analysed_dataframes(pairlist, limit) def _ws_request_whitelist(self): - """ Whitelist data for WebSocket """ + """Whitelist data for WebSocket""" return self._freqtrade.active_pair_whitelist @staticmethod - def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str, - exchange) -> Dict[str, Any]: - timerange_parsed = TimeRange.parse_timerange(config.get('timerange')) + def _rpc_analysed_history_full( + config: Config, pair: str, timeframe: str, exchange, selected_cols: Optional[List[str]] + ) -> Dict[str, Any]: + timerange_parsed = TimeRange.parse_timerange(config.get("timerange")) from freqtrade.data.converter import trim_dataframe from freqtrade.data.dataprovider import DataProvider @@ -1337,44 +1429,53 @@ class RPC: pairs=[pair], timeframe=timeframe, timerange=timerange_parsed, - data_format=config['dataformat_ohlcv'], - candle_type=config.get('candle_type_def', CandleType.SPOT), + data_format=config["dataformat_ohlcv"], + candle_type=config.get("candle_type_def", CandleType.SPOT), startup_candles=startup_candles, ) if pair not in _data: raise RPCException( - f"No data for {pair}, {timeframe} in {config.get('timerange')} found.") + f"No data for {pair}, {timeframe} in {config.get('timerange')} found." + ) strategy.dp = DataProvider(config, exchange=exchange, pairlists=None) strategy.ft_bot_start() - df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair}) + df_analyzed = strategy.analyze_ticker(_data[pair], {"pair": pair}) df_analyzed = trim_dataframe(df_analyzed, timerange_parsed, startup_candles=startup_candles) - return RPC._convert_dataframe_to_dict(strategy.get_strategy_name(), pair, timeframe, - df_analyzed.copy(), dt_now()) + return RPC._convert_dataframe_to_dict( + strategy.get_strategy_name(), + pair, + timeframe, + df_analyzed.copy(), + dt_now(), + selected_cols, + ) def _rpc_plot_config(self) -> Dict[str, Any]: - if (self._freqtrade.strategy.plot_config and - 'subplots' not in self._freqtrade.strategy.plot_config): - self._freqtrade.strategy.plot_config['subplots'] = {} + if ( + self._freqtrade.strategy.plot_config + and "subplots" not in self._freqtrade.strategy.plot_config + ): + self._freqtrade.strategy.plot_config["subplots"] = {} return self._freqtrade.strategy.plot_config @staticmethod def _rpc_plot_config_with_strategy(config: Config) -> Dict[str, Any]: - from freqtrade.resolvers.strategy_resolver import StrategyResolver + strategy = StrategyResolver.load_strategy(config) - if (strategy.plot_config and 'subplots' not in strategy.plot_config): - strategy.plot_config['subplots'] = {} + if strategy.plot_config and "subplots" not in strategy.plot_config: + strategy.plot_config["subplots"] = {} return strategy.plot_config @staticmethod def _rpc_sysinfo() -> Dict[str, Any]: return { "cpu_pct": psutil.cpu_percent(interval=1, percpu=True), - "ram_pct": psutil.virtual_memory().percent + "ram_pct": psutil.virtual_memory().percent, } def health(self) -> Dict[str, Optional[Union[str, int]]]: @@ -1392,24 +1493,30 @@ class RPC: } if last_p is not None: - res.update({ - "last_process": str(last_p), - "last_process_loc": format_date(last_p.astimezone(tzlocal())), - "last_process_ts": int(last_p.timestamp()), - }) + res.update( + { + "last_process": str(last_p), + "last_process_loc": format_date(last_p.astimezone(tzlocal())), + "last_process_ts": int(last_p.timestamp()), + } + ) - if (bot_start := KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME)): - res.update({ - "bot_start": str(bot_start), - "bot_start_loc": format_date(bot_start.astimezone(tzlocal())), - "bot_start_ts": int(bot_start.timestamp()), - }) - if (bot_startup := KeyValueStore.get_datetime_value(KeyStoreKeys.STARTUP_TIME)): - res.update({ - "bot_startup": str(bot_startup), - "bot_startup_loc": format_date(bot_startup.astimezone(tzlocal())), - "bot_startup_ts": int(bot_startup.timestamp()), - }) + if bot_start := KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME): + res.update( + { + "bot_start": str(bot_start), + "bot_start_loc": format_date(bot_start.astimezone(tzlocal())), + "bot_start_ts": int(bot_start.timestamp()), + } + ) + if bot_startup := KeyValueStore.get_datetime_value(KeyStoreKeys.STARTUP_TIME): + res.update( + { + "bot_startup": str(bot_startup), + "bot_startup_loc": format_date(bot_startup.astimezone(tzlocal())), + "bot_startup_ts": int(bot_startup.timestamp()), + } + ) return res diff --git a/freqtrade/rpc/rpc_manager.py b/freqtrade/rpc/rpc_manager.py index 1972ad6e5..f62feea3e 100644 --- a/freqtrade/rpc/rpc_manager.py +++ b/freqtrade/rpc/rpc_manager.py @@ -1,6 +1,7 @@ """ This module contains class to manage RPC communications (Telegram, API, ...) """ + import logging from collections import deque from typing import List @@ -20,42 +21,46 @@ class RPCManager: """ def __init__(self, freqtrade) -> None: - """ Initializes all enabled rpc modules """ + """Initializes all enabled rpc modules""" self.registered_modules: List[RPCHandler] = [] self._rpc = RPC(freqtrade) config = freqtrade.config # Enable telegram - if config.get('telegram', {}).get('enabled', False): - logger.info('Enabling rpc.telegram ...') + if config.get("telegram", {}).get("enabled", False): + logger.info("Enabling rpc.telegram ...") from freqtrade.rpc.telegram import Telegram + self.registered_modules.append(Telegram(self._rpc, config)) # Enable discord - if config.get('discord', {}).get('enabled', False): - logger.info('Enabling rpc.discord ...') + if config.get("discord", {}).get("enabled", False): + logger.info("Enabling rpc.discord ...") from freqtrade.rpc.discord import Discord + self.registered_modules.append(Discord(self._rpc, config)) # Enable Webhook - if config.get('webhook', {}).get('enabled', False): - logger.info('Enabling rpc.webhook ...') + if config.get("webhook", {}).get("enabled", False): + logger.info("Enabling rpc.webhook ...") from freqtrade.rpc.webhook import Webhook + self.registered_modules.append(Webhook(self._rpc, config)) # Enable local rest api server for cmd line control - if config.get('api_server', {}).get('enabled', False): - logger.info('Enabling rpc.api_server') + if config.get("api_server", {}).get("enabled", False): + logger.info("Enabling rpc.api_server") from freqtrade.rpc.api_server import ApiServer + apiserver = ApiServer(config) apiserver.add_rpc_handler(self._rpc) self.registered_modules.append(apiserver) def cleanup(self) -> None: - """ Stops all enabled rpc modules """ - logger.info('Cleaning up rpc modules ...') + """Stops all enabled rpc modules""" + logger.info("Cleaning up rpc modules ...") while self.registered_modules: mod = self.registered_modules.pop() - logger.info('Cleaning up rpc.%s ...', mod.name) + logger.info("Cleaning up rpc.%s ...", mod.name) mod.cleanup() del mod @@ -68,16 +73,16 @@ class RPCManager: 'status': 'stopping bot' } """ - if msg.get('type') not in NO_ECHO_MESSAGES: - logger.info('Sending rpc message: %s', msg) + if msg.get("type") not in NO_ECHO_MESSAGES: + logger.info("Sending rpc message: %s", msg) for mod in self.registered_modules: - logger.debug('Forwarding message to rpc.%s', mod.name) + logger.debug("Forwarding message to rpc.%s", mod.name) try: mod.send_msg(msg) except NotImplementedError: logger.error(f"Message type '{msg['type']}' not implemented by handler {mod.name}.") except Exception: - logger.exception('Exception occurred within RPC module %s', mod.name) + logger.exception("Exception occurred within RPC module %s", mod.name) def process_msg_queue(self, queue: deque) -> None: """ @@ -85,47 +90,54 @@ class RPCManager: """ while queue: msg = queue.popleft() - logger.info('Sending rpc strategy_msg: %s', msg) + logger.info("Sending rpc strategy_msg: %s", msg) for mod in self.registered_modules: - if mod._config.get(mod.name, {}).get('allow_custom_messages', False): - mod.send_msg({ - 'type': RPCMessageType.STRATEGY_MSG, - 'msg': msg, - }) + if mod._config.get(mod.name, {}).get("allow_custom_messages", False): + mod.send_msg( + { + "type": RPCMessageType.STRATEGY_MSG, + "msg": msg, + } + ) def startup_messages(self, config: Config, pairlist, protections) -> None: - if config['dry_run']: - self.send_msg({ - 'type': RPCMessageType.WARNING, - 'status': 'Dry run is enabled. All trades are simulated.' - }) - stake_currency = config['stake_currency'] - stake_amount = config['stake_amount'] - minimal_roi = config['minimal_roi'] - stoploss = config['stoploss'] - trailing_stop = config['trailing_stop'] - timeframe = config['timeframe'] - exchange_name = config['exchange']['name'] - strategy_name = config.get('strategy', '') - pos_adjust_enabled = 'On' if config['position_adjustment_enable'] else 'Off' - self.send_msg({ - 'type': RPCMessageType.STARTUP, - 'status': f'*Exchange:* `{exchange_name}`\n' - f'*Stake per trade:* `{stake_amount} {stake_currency}`\n' - f'*Minimum ROI:* `{minimal_roi}`\n' - f'*{"Trailing " if trailing_stop else ""}Stoploss:* `{stoploss}`\n' - f'*Position adjustment:* `{pos_adjust_enabled}`\n' - f'*Timeframe:* `{timeframe}`\n' - f'*Strategy:* `{strategy_name}`' - }) - self.send_msg({ - 'type': RPCMessageType.STARTUP, - 'status': f'Searching for {stake_currency} pairs to buy and sell ' - f'based on {pairlist.short_desc()}' - }) + if config["dry_run"]: + self.send_msg( + { + "type": RPCMessageType.WARNING, + "status": "Dry run is enabled. All trades are simulated.", + } + ) + stake_currency = config["stake_currency"] + stake_amount = config["stake_amount"] + minimal_roi = config["minimal_roi"] + stoploss = config["stoploss"] + trailing_stop = config["trailing_stop"] + timeframe = config["timeframe"] + exchange_name = config["exchange"]["name"] + strategy_name = config.get("strategy", "") + pos_adjust_enabled = "On" if config["position_adjustment_enable"] else "Off" + self.send_msg( + { + "type": RPCMessageType.STARTUP, + "status": f"*Exchange:* `{exchange_name}`\n" + f"*Stake per trade:* `{stake_amount} {stake_currency}`\n" + f"*Minimum ROI:* `{minimal_roi}`\n" + f"*{'Trailing ' if trailing_stop else ''}Stoploss:* `{stoploss}`\n" + f"*Position adjustment:* `{pos_adjust_enabled}`\n" + f"*Timeframe:* `{timeframe}`\n" + f"*Strategy:* `{strategy_name}`", + } + ) + self.send_msg( + { + "type": RPCMessageType.STARTUP, + "status": f"Searching for {stake_currency} pairs to buy and sell " + f"based on {pairlist.short_desc()}", + } + ) if len(protections.name_list) > 0: - prots = '\n'.join([p for prot in protections.short_desc() for k, p in prot.items()]) - self.send_msg({ - 'type': RPCMessageType.STARTUP, - 'status': f'Using Protections: \n{prots}' - }) + prots = "\n".join([p for prot in protections.short_desc() for k, p in prot.items()]) + self.send_msg( + {"type": RPCMessageType.STARTUP, "status": f"Using Protections: \n{prots}"} + ) diff --git a/freqtrade/rpc/rpc_types.py b/freqtrade/rpc/rpc_types.py index 72a382f48..e5f4f93c9 100644 --- a/freqtrade/rpc/rpc_types.py +++ b/freqtrade/rpc/rpc_types.py @@ -15,12 +15,14 @@ class RPCSendMsgBase(TypedDict): class RPCStatusMsg(RPCSendMsgBase): """Used for Status, Startup and Warning messages""" + type: Literal[RPCMessageType.STATUS, RPCMessageType.STARTUP, RPCMessageType.WARNING] status: str class RPCStrategyMsg(RPCSendMsgBase): """Used for Status, Startup and Warning messages""" + type: Literal[RPCMessageType.STRATEGY_MSG] msg: str @@ -108,12 +110,14 @@ class _AnalyzedDFData(TypedDict): class RPCAnalyzedDFMsg(RPCSendMsgBase): """New Analyzed dataframe message""" + type: Literal[RPCMessageType.ANALYZED_DF] data: _AnalyzedDFData class RPCNewCandleMsg(RPCSendMsgBase): """New candle ping message, issued once per new candle/pair""" + type: Literal[RPCMessageType.NEW_CANDLE] data: PairWithTimeframe @@ -131,5 +135,5 @@ RPCSendMsg = Union[ RPCExitMsg, RPCExitCancelMsg, RPCAnalyzedDFMsg, - RPCNewCandleMsg - ] + RPCNewCandleMsg, +] diff --git a/freqtrade/rpc/telegram.py b/freqtrade/rpc/telegram.py index 030075c64..39137b605 100644 --- a/freqtrade/rpc/telegram.py +++ b/freqtrade/rpc/telegram.py @@ -3,6 +3,7 @@ """ This module manage Telegram communication """ + import asyncio import json import logging @@ -18,8 +19,14 @@ from threading import Thread from typing import Any, Callable, Coroutine, Dict, List, Literal, Optional, Union from tabulate import tabulate -from telegram import (CallbackQuery, InlineKeyboardButton, InlineKeyboardMarkup, KeyboardButton, - ReplyKeyboardMarkup, Update) +from telegram import ( + CallbackQuery, + InlineKeyboardButton, + InlineKeyboardMarkup, + KeyboardButton, + ReplyKeyboardMarkup, + Update, +) from telegram.constants import MessageLimit, ParseMode from telegram.error import BadRequest, NetworkError, TelegramError from telegram.ext import Application, CallbackContext, CallbackQueryHandler, CommandHandler @@ -41,7 +48,7 @@ MAX_MESSAGE_LENGTH = MessageLimit.MAX_TEXT_LENGTH logger = logging.getLogger(__name__) -logger.debug('Included module rpc.telegram ...') +logger.debug("Included module rpc.telegram ...") def safe_async_db(func: Callable[..., Any]): @@ -50,9 +57,10 @@ def safe_async_db(func: Callable[..., Any]): :param func: function to decorate :return: decorated function """ + @wraps(func) def wrapper(*args, **kwargs): - """ Decorator logic """ + """Decorator logic""" try: return func(*args, **kwargs) finally: @@ -80,8 +88,8 @@ def authorized_only(command_handler: Callable[..., Coroutine[Any, Any, None]]): @wraps(command_handler) async def wrapper(self, *args, **kwargs): - """ Decorator logic """ - update = kwargs.get('update') or args[0] + """Decorator logic""" + update = kwargs.get("update") or args[0] # Reject unauthorized messages if update.callback_query: @@ -89,23 +97,19 @@ def authorized_only(command_handler: Callable[..., Coroutine[Any, Any, None]]): else: cchat_id = int(update.message.chat_id) - chat_id = int(self._config['telegram']['chat_id']) + chat_id = int(self._config["telegram"]["chat_id"]) if cchat_id != chat_id: - logger.info(f'Rejected unauthorized message from: {update.message.chat_id}') + logger.info(f"Rejected unauthorized message from: {update.message.chat_id}") return wrapper # Rollback session to avoid getting data stored in a transaction. Trade.rollback() - logger.debug( - 'Executing handler: %s for chat_id: %s', - command_handler.__name__, - chat_id - ) + logger.debug("Executing handler: %s for chat_id: %s", command_handler.__name__, chat_id) try: return await command_handler(self, *args, **kwargs) except RPCException as e: await self._send_msg(str(e)) except BaseException: - logger.exception('Exception occurred within Telegram module') + logger.exception("Exception occurred within Telegram module") finally: Trade.session.remove() @@ -113,7 +117,7 @@ def authorized_only(command_handler: Callable[..., Coroutine[Any, Any, None]]): class Telegram(RPCHandler): - """ This class handles all telegram communication """ + """This class handles all telegram communication""" def __init__(self, rpc: RPC, config: Config) -> None: """ @@ -133,7 +137,7 @@ class Telegram(RPCHandler): """ Creates and starts the polling thread """ - self._thread = Thread(target=self._init, name='FTTelegram') + self._thread = Thread(target=self._init, name="FTTelegram") self._thread.start() def _init_keyboard(self) -> None: @@ -142,51 +146,83 @@ class Telegram(RPCHandler): section. """ self._keyboard: List[List[Union[str, KeyboardButton]]] = [ - ['/daily', '/profit', '/balance'], - ['/status', '/status table', '/performance'], - ['/count', '/start', '/stop', '/help'] + ["/daily", "/profit", "/balance"], + ["/status", "/status table", "/performance"], + ["/count", "/start", "/stop", "/help"], ] # do not allow commands with mandatory arguments and critical cmds # TODO: DRY! - its not good to list all valid cmds here. But otherwise # this needs refactoring of the whole telegram module (same # problem in _help()). valid_keys: List[str] = [ - r'/start$', r'/stop$', r'/status$', r'/status table$', - r'/trades$', r'/performance$', r'/buys', r'/entries', - r'/sells', r'/exits', r'/mix_tags', - r'/daily$', r'/daily \d+$', r'/profit$', r'/profit \d+', - r'/stats$', r'/count$', r'/locks$', r'/balance$', - r'/stopbuy$', r'/stopentry$', r'/reload_config$', r'/show_config$', - r'/logs$', r'/whitelist$', r'/whitelist(\ssorted|\sbaseonly)+$', - r'/blacklist$', r'/bl_delete$', - r'/weekly$', r'/weekly \d+$', r'/monthly$', r'/monthly \d+$', - r'/forcebuy$', r'/forcelong$', r'/forceshort$', - r'/forcesell$', r'/forceexit$', - r'/edge$', r'/health$', r'/help$', r'/version$', r'/marketdir (long|short|even|none)$', - r'/marketdir$' + r"/start$", + r"/stop$", + r"/status$", + r"/status table$", + r"/trades$", + r"/performance$", + r"/buys", + r"/entries", + r"/sells", + r"/exits", + r"/mix_tags", + r"/daily$", + r"/daily \d+$", + r"/profit$", + r"/profit \d+", + r"/stats$", + r"/count$", + r"/locks$", + r"/balance$", + r"/stopbuy$", + r"/stopentry$", + r"/reload_config$", + r"/show_config$", + r"/logs$", + r"/whitelist$", + r"/whitelist(\ssorted|\sbaseonly)+$", + r"/blacklist$", + r"/bl_delete$", + r"/weekly$", + r"/weekly \d+$", + r"/monthly$", + r"/monthly \d+$", + r"/forcebuy$", + r"/forcelong$", + r"/forceshort$", + r"/forcesell$", + r"/forceexit$", + r"/edge$", + r"/health$", + r"/help$", + r"/version$", + r"/marketdir (long|short|even|none)$", + r"/marketdir$", ] # Create keys for generation - valid_keys_print = [k.replace('$', '') for k in valid_keys] + valid_keys_print = [k.replace("$", "") for k in valid_keys] # custom keyboard specified in config.json - cust_keyboard = self._config['telegram'].get('keyboard', []) + cust_keyboard = self._config["telegram"].get("keyboard", []) if cust_keyboard: combined = "(" + ")|(".join(valid_keys) + ")" # check for valid shortcuts - invalid_keys = [b for b in chain.from_iterable(cust_keyboard) - if not re.match(combined, b)] + invalid_keys = [ + b for b in chain.from_iterable(cust_keyboard) if not re.match(combined, b) + ] if len(invalid_keys): - err_msg = ('config.telegram.keyboard: Invalid commands for ' - f'custom Telegram keyboard: {invalid_keys}' - f'\nvalid commands are: {valid_keys_print}') + err_msg = ( + "config.telegram.keyboard: Invalid commands for " + f"custom Telegram keyboard: {invalid_keys}" + f"\nvalid commands are: {valid_keys_print}" + ) raise OperationalException(err_msg) else: self._keyboard = cust_keyboard - logger.info('using custom keyboard from ' - f'config.json: {self._keyboard}') + logger.info(f"using custom keyboard from config.json: {self._keyboard}") def _init_telegram_app(self): - return Application.builder().token(self._config['telegram']['token']).build() + return Application.builder().token(self._config["telegram"]["token"]).build() def _init(self) -> None: """ @@ -205,60 +241,65 @@ class Telegram(RPCHandler): # Register command handler and start telegram message polling handles = [ - CommandHandler('status', self._status), - CommandHandler('profit', self._profit), - CommandHandler('balance', self._balance), - CommandHandler('start', self._start), - CommandHandler('stop', self._stop), - CommandHandler(['forcesell', 'forceexit', 'fx'], self._force_exit), - CommandHandler(['forcebuy', 'forcelong'], partial( - self._force_enter, order_side=SignalDirection.LONG)), - CommandHandler('forceshort', partial( - self._force_enter, order_side=SignalDirection.SHORT)), - CommandHandler('reload_trade', self._reload_trade_from_exchange), - CommandHandler('trades', self._trades), - CommandHandler('delete', self._delete_trade), - CommandHandler(['coo', 'cancel_open_order'], self._cancel_open_order), - CommandHandler('performance', self._performance), - CommandHandler(['buys', 'entries'], self._enter_tag_performance), - CommandHandler(['sells', 'exits'], self._exit_reason_performance), - CommandHandler('mix_tags', self._mix_tag_performance), - CommandHandler('stats', self._stats), - CommandHandler('daily', self._daily), - CommandHandler('weekly', self._weekly), - CommandHandler('monthly', self._monthly), - CommandHandler('count', self._count), - CommandHandler('locks', self._locks), - CommandHandler(['unlock', 'delete_locks'], self._delete_locks), - CommandHandler(['reload_config', 'reload_conf'], self._reload_config), - CommandHandler(['show_config', 'show_conf'], self._show_config), - CommandHandler(['stopbuy', 'stopentry'], self._stopentry), - CommandHandler('whitelist', self._whitelist), - CommandHandler('blacklist', self._blacklist), - CommandHandler(['blacklist_delete', 'bl_delete'], self._blacklist_delete), - CommandHandler('logs', self._logs), - CommandHandler('edge', self._edge), - CommandHandler('health', self._health), - CommandHandler('help', self._help), - CommandHandler('version', self._version), - CommandHandler('marketdir', self._changemarketdir), - CommandHandler('order', self._order), - CommandHandler('list_custom_data', self._list_custom_data), + CommandHandler("status", self._status), + CommandHandler("profit", self._profit), + CommandHandler("balance", self._balance), + CommandHandler("start", self._start), + CommandHandler("stop", self._stop), + CommandHandler(["forcesell", "forceexit", "fx"], self._force_exit), + CommandHandler( + ["forcebuy", "forcelong"], + partial(self._force_enter, order_side=SignalDirection.LONG), + ), + CommandHandler( + "forceshort", partial(self._force_enter, order_side=SignalDirection.SHORT) + ), + CommandHandler("reload_trade", self._reload_trade_from_exchange), + CommandHandler("trades", self._trades), + CommandHandler("delete", self._delete_trade), + CommandHandler(["coo", "cancel_open_order"], self._cancel_open_order), + CommandHandler("performance", self._performance), + CommandHandler(["buys", "entries"], self._enter_tag_performance), + CommandHandler(["sells", "exits"], self._exit_reason_performance), + CommandHandler("mix_tags", self._mix_tag_performance), + CommandHandler("stats", self._stats), + CommandHandler("daily", self._daily), + CommandHandler("weekly", self._weekly), + CommandHandler("monthly", self._monthly), + CommandHandler("count", self._count), + CommandHandler("locks", self._locks), + CommandHandler(["unlock", "delete_locks"], self._delete_locks), + CommandHandler(["reload_config", "reload_conf"], self._reload_config), + CommandHandler(["show_config", "show_conf"], self._show_config), + CommandHandler(["stopbuy", "stopentry"], self._stopentry), + CommandHandler("whitelist", self._whitelist), + CommandHandler("blacklist", self._blacklist), + CommandHandler(["blacklist_delete", "bl_delete"], self._blacklist_delete), + CommandHandler("logs", self._logs), + CommandHandler("edge", self._edge), + CommandHandler("health", self._health), + CommandHandler("help", self._help), + CommandHandler("version", self._version), + CommandHandler("marketdir", self._changemarketdir), + CommandHandler("order", self._order), + CommandHandler("list_custom_data", self._list_custom_data), ] callbacks = [ - CallbackQueryHandler(self._status_table, pattern='update_status_table'), - CallbackQueryHandler(self._daily, pattern='update_daily'), - CallbackQueryHandler(self._weekly, pattern='update_weekly'), - CallbackQueryHandler(self._monthly, pattern='update_monthly'), - CallbackQueryHandler(self._profit, pattern='update_profit'), - CallbackQueryHandler(self._balance, pattern='update_balance'), - CallbackQueryHandler(self._performance, pattern='update_performance'), - CallbackQueryHandler(self._enter_tag_performance, - pattern='update_enter_tag_performance'), - CallbackQueryHandler(self._exit_reason_performance, - pattern='update_exit_reason_performance'), - CallbackQueryHandler(self._mix_tag_performance, pattern='update_mix_tag_performance'), - CallbackQueryHandler(self._count, pattern='update_count'), + CallbackQueryHandler(self._status_table, pattern="update_status_table"), + CallbackQueryHandler(self._daily, pattern="update_daily"), + CallbackQueryHandler(self._weekly, pattern="update_weekly"), + CallbackQueryHandler(self._monthly, pattern="update_monthly"), + CallbackQueryHandler(self._profit, pattern="update_profit"), + CallbackQueryHandler(self._balance, pattern="update_balance"), + CallbackQueryHandler(self._performance, pattern="update_performance"), + CallbackQueryHandler( + self._enter_tag_performance, pattern="update_enter_tag_performance" + ), + CallbackQueryHandler( + self._exit_reason_performance, pattern="update_exit_reason_performance" + ), + CallbackQueryHandler(self._mix_tag_performance, pattern="update_mix_tag_performance"), + CallbackQueryHandler(self._count, pattern="update_count"), CallbackQueryHandler(self._force_exit_inline, pattern=r"force_exit__\S+"), CallbackQueryHandler(self._force_enter_inline, pattern=r"force_enter__\S+"), ] @@ -269,8 +310,8 @@ class Telegram(RPCHandler): self._app.add_handler(callback) logger.info( - 'rpc.telegram is listening for following commands: %s', - [[x for x in sorted(h.commands)] for h in handles] + "rpc.telegram is listening for following commands: %s", + [[x for x in sorted(h.commands)] for h in handles], ) self._loop.run_until_complete(self._startup_telegram()) @@ -314,12 +355,14 @@ class Telegram(RPCHandler): return f"{msg['exchange']}{' (dry)' if self._config['dry_run'] else ''}" def _add_analyzed_candle(self, pair: str) -> str: - candle_val = self._config['telegram'].get( - 'notification_settings', {}).get('show_candle', 'off') - if candle_val != 'off': - if candle_val == 'ohlc': + candle_val = ( + self._config["telegram"].get("notification_settings", {}).get("show_candle", "off") + ) + if candle_val != "off": + if candle_val == "ohlc": analyzed_df, _ = self._rpc._freqtrade.dataprovider.get_analyzed_dataframe( - pair, self._config['timeframe']) + pair, self._config["timeframe"] + ) candle = analyzed_df.iloc[-1].squeeze() if len(analyzed_df) > 0 else None if candle is not None: return ( @@ -327,18 +370,17 @@ class Telegram(RPCHandler): f"{candle['low']}, {candle['close']}`\n" ) - return '' + return "" def _format_entry_msg(self, msg: RPCEntryMsg) -> str: - - is_fill = msg['type'] in [RPCMessageType.ENTRY_FILL] - emoji = '\N{CHECK MARK}' if is_fill else '\N{LARGE BLUE CIRCLE}' + is_fill = msg["type"] in [RPCMessageType.ENTRY_FILL] + emoji = "\N{CHECK MARK}" if is_fill else "\N{LARGE BLUE CIRCLE}" terminology = { - '1_enter': 'New Trade', - '1_entered': 'New Trade filled', - 'x_enter': 'Increasing position', - 'x_entered': 'Position increase filled', + "1_enter": "New Trade", + "1_entered": "New Trade filled", + "x_enter": "Increasing position", + "x_entered": "Position increase filled", } key = f"{'x' if msg['sub_trade'] else '1'}_{'entered' if is_fill else 'enter'}" @@ -349,65 +391,69 @@ class Telegram(RPCHandler): f" {wording} (#{msg['trade_id']})\n" f"*Pair:* `{msg['pair']}`\n" ) - message += self._add_analyzed_candle(msg['pair']) - message += f"*Enter Tag:* `{msg['enter_tag']}`\n" if msg.get('enter_tag') else "" + message += self._add_analyzed_candle(msg["pair"]) + message += f"*Enter Tag:* `{msg['enter_tag']}`\n" if msg.get("enter_tag") else "" message += f"*Amount:* `{round_value(msg['amount'], 8)}`\n" message += f"*Direction:* `{msg['direction']}" - if msg.get('leverage') and msg.get('leverage', 1.0) != 1.0: + if msg.get("leverage") and msg.get("leverage", 1.0) != 1.0: message += f" ({msg['leverage']:.3g}x)" message += "`\n" message += f"*Open Rate:* `{round_value(msg['open_rate'], 8)} {msg['quote_currency']}`\n" - if msg['type'] == RPCMessageType.ENTRY and msg['current_rate']: + if msg["type"] == RPCMessageType.ENTRY and msg["current_rate"]: message += ( f"*Current Rate:* `{round_value(msg['current_rate'], 8)} {msg['quote_currency']}`\n" ) - profit_fiat_extra = self.__format_profit_fiat(msg, 'stake_amount') # type: ignore - total = fmt_coin(msg['stake_amount'], msg['quote_currency']) + profit_fiat_extra = self.__format_profit_fiat(msg, "stake_amount") # type: ignore + total = fmt_coin(msg["stake_amount"], msg["quote_currency"]) message += f"*{'New ' if msg['sub_trade'] else ''}Total:* `{total}{profit_fiat_extra}`" return message def _format_exit_msg(self, msg: RPCExitMsg) -> str: - duration = msg['close_date'].replace( - microsecond=0) - msg['open_date'].replace(microsecond=0) + duration = msg["close_date"].replace(microsecond=0) - msg["open_date"].replace( + microsecond=0 + ) duration_min = duration.total_seconds() / 60 - leverage_text = (f" ({msg['leverage']:.3g}x)" - if msg.get('leverage') and msg.get('leverage', 1.0) != 1.0 - else "") + leverage_text = ( + f" ({msg['leverage']:.3g}x)" + if msg.get("leverage") and msg.get("leverage", 1.0) != 1.0 + else "" + ) - profit_fiat_extra = self.__format_profit_fiat(msg, 'profit_amount') + profit_fiat_extra = self.__format_profit_fiat(msg, "profit_amount") profit_extra = ( f" ({msg['gain']}: {fmt_coin(msg['profit_amount'], msg['quote_currency'])}" - f"{profit_fiat_extra})") + f"{profit_fiat_extra})" + ) - is_fill = msg['type'] == RPCMessageType.EXIT_FILL - is_sub_trade = msg.get('sub_trade') - is_sub_profit = msg['profit_amount'] != msg.get('cumulative_profit') - is_final_exit = msg.get('is_final_exit', False) and is_sub_profit - profit_prefix = 'Sub ' if is_sub_trade else '' - cp_extra = '' - exit_wording = 'Exited' if is_fill else 'Exiting' + is_fill = msg["type"] == RPCMessageType.EXIT_FILL + is_sub_trade = msg.get("sub_trade") + is_sub_profit = msg["profit_amount"] != msg.get("cumulative_profit") + is_final_exit = msg.get("is_final_exit", False) and is_sub_profit + profit_prefix = "Sub " if is_sub_trade else "" + cp_extra = "" + exit_wording = "Exited" if is_fill else "Exiting" if is_sub_trade or is_final_exit: - cp_fiat = self.__format_profit_fiat(msg, 'cumulative_profit') + cp_fiat = self.__format_profit_fiat(msg, "cumulative_profit") if is_final_exit: - profit_prefix = 'Sub ' + profit_prefix = "Sub " cp_extra = ( f"*Final Profit:* `{msg['final_profit_ratio']:.2%} " f"({msg['cumulative_profit']:.8f} {msg['quote_currency']}{cp_fiat})`\n" ) else: exit_wording = f"Partially {exit_wording.lower()}" - if msg['cumulative_profit']: + if msg["cumulative_profit"]: cp_extra = ( f"*Cumulative Profit:* `" f"{fmt_coin(msg['cumulative_profit'], msg['stake_currency'])}{cp_fiat}`\n" ) - enter_tag = f"*Enter Tag:* `{msg['enter_tag']}`\n" if msg.get('enter_tag') else "" + enter_tag = f"*Enter Tag:* `{msg['enter_tag']}`\n" if msg.get("enter_tag") else "" message = ( f"{self._get_exit_emoji(msg)} *{self._exchange_from_msg(msg)}:* " f"{exit_wording} {msg['pair']} (#{msg['trade_id']})\n" @@ -422,104 +468,108 @@ class Telegram(RPCHandler): f"*Amount:* `{round_value(msg['amount'], 8)}`\n" f"*Open Rate:* `{fmt_coin(msg['open_rate'], msg['quote_currency'])}`\n" ) - if msg['type'] == RPCMessageType.EXIT and msg['current_rate']: + if msg["type"] == RPCMessageType.EXIT and msg["current_rate"]: message += f"*Current Rate:* `{fmt_coin(msg['current_rate'], msg['quote_currency'])}`\n" - if msg['order_rate']: + if msg["order_rate"]: message += f"*Exit Rate:* `{fmt_coin(msg['order_rate'], msg['quote_currency'])}`" - elif msg['type'] == RPCMessageType.EXIT_FILL: + elif msg["type"] == RPCMessageType.EXIT_FILL: message += f"*Exit Rate:* `{fmt_coin(msg['close_rate'], msg['quote_currency'])}`" if is_sub_trade: - stake_amount_fiat = self.__format_profit_fiat(msg, 'stake_amount') + stake_amount_fiat = self.__format_profit_fiat(msg, "stake_amount") - rem = fmt_coin(msg['stake_amount'], msg['quote_currency']) + rem = fmt_coin(msg["stake_amount"], msg["quote_currency"]) message += f"\n*Remaining:* `{rem}{stake_amount_fiat}`" else: message += f"\n*Duration:* `{duration} ({duration_min:.1f} min)`" return message def __format_profit_fiat( - self, - msg: RPCExitMsg, - key: Literal['stake_amount', 'profit_amount', 'cumulative_profit'] + self, msg: RPCExitMsg, key: Literal["stake_amount", "profit_amount", "cumulative_profit"] ) -> str: """ Format Fiat currency to append to regular profit output """ - profit_fiat_extra = '' - if self._rpc._fiat_converter and (fiat_currency := msg.get('fiat_currency')): + profit_fiat_extra = "" + if self._rpc._fiat_converter and (fiat_currency := msg.get("fiat_currency")): profit_fiat = self._rpc._fiat_converter.convert_amount( - msg[key], msg['stake_currency'], fiat_currency) + msg[key], msg["stake_currency"], fiat_currency + ) profit_fiat_extra = f" / {profit_fiat:.3f} {fiat_currency}" return profit_fiat_extra def compose_message(self, msg: RPCSendMsg) -> Optional[str]: - if msg['type'] == RPCMessageType.ENTRY or msg['type'] == RPCMessageType.ENTRY_FILL: + if msg["type"] == RPCMessageType.ENTRY or msg["type"] == RPCMessageType.ENTRY_FILL: message = self._format_entry_msg(msg) - elif msg['type'] == RPCMessageType.EXIT or msg['type'] == RPCMessageType.EXIT_FILL: + elif msg["type"] == RPCMessageType.EXIT or msg["type"] == RPCMessageType.EXIT_FILL: message = self._format_exit_msg(msg) elif ( - msg['type'] == RPCMessageType.ENTRY_CANCEL - or msg['type'] == RPCMessageType.EXIT_CANCEL + msg["type"] == RPCMessageType.ENTRY_CANCEL or msg["type"] == RPCMessageType.EXIT_CANCEL ): - message_side = 'enter' if msg['type'] == RPCMessageType.ENTRY_CANCEL else 'exit' - message = (f"\N{WARNING SIGN} *{self._exchange_from_msg(msg)}:* " - f"Cancelling {'partial ' if msg.get('sub_trade') else ''}" - f"{message_side} Order for {msg['pair']} " - f"(#{msg['trade_id']}). Reason: {msg['reason']}.") + message_side = "enter" if msg["type"] == RPCMessageType.ENTRY_CANCEL else "exit" + message = ( + f"\N{WARNING SIGN} *{self._exchange_from_msg(msg)}:* " + f"Cancelling {'partial ' if msg.get('sub_trade') else ''}" + f"{message_side} Order for {msg['pair']} " + f"(#{msg['trade_id']}). Reason: {msg['reason']}." + ) - elif msg['type'] == RPCMessageType.PROTECTION_TRIGGER: + elif msg["type"] == RPCMessageType.PROTECTION_TRIGGER: message = ( f"*Protection* triggered due to {msg['reason']}. " f"`{msg['pair']}` will be locked until `{msg['lock_end_time']}`." ) - elif msg['type'] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL: + elif msg["type"] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL: message = ( f"*Protection* triggered due to {msg['reason']}. " f"*All pairs* will be locked until `{msg['lock_end_time']}`." ) - elif msg['type'] == RPCMessageType.STATUS: + elif msg["type"] == RPCMessageType.STATUS: message = f"*Status:* `{msg['status']}`" - elif msg['type'] == RPCMessageType.WARNING: + elif msg["type"] == RPCMessageType.WARNING: message = f"\N{WARNING SIGN} *Warning:* `{msg['status']}`" - elif msg['type'] == RPCMessageType.EXCEPTION: + elif msg["type"] == RPCMessageType.EXCEPTION: # Errors will contain exceptions, which are wrapped in triple ticks. message = f"\N{WARNING SIGN} *ERROR:* \n {msg['status']}" - elif msg['type'] == RPCMessageType.STARTUP: + elif msg["type"] == RPCMessageType.STARTUP: message = f"{msg['status']}" - elif msg['type'] == RPCMessageType.STRATEGY_MSG: + elif msg["type"] == RPCMessageType.STRATEGY_MSG: message = f"{msg['msg']}" else: - logger.debug("Unknown message type: %s", msg['type']) + logger.debug("Unknown message type: %s", msg["type"]) return None return message def send_msg(self, msg: RPCSendMsg) -> None: - """ Send a message to telegram channel """ + """Send a message to telegram channel""" - default_noti = 'on' + default_noti = "on" - msg_type = msg['type'] - noti = '' - if msg['type'] == RPCMessageType.EXIT: - sell_noti = self._config['telegram'] \ - .get('notification_settings', {}).get(str(msg_type), {}) + msg_type = msg["type"] + noti = "" + if msg["type"] == RPCMessageType.EXIT: + sell_noti = ( + self._config["telegram"].get("notification_settings", {}).get(str(msg_type), {}) + ) # For backward compatibility sell still can be string if isinstance(sell_noti, str): noti = sell_noti else: - noti = sell_noti.get(str(msg['exit_reason']), default_noti) + noti = sell_noti.get(str(msg["exit_reason"]), default_noti) else: - noti = self._config['telegram'] \ - .get('notification_settings', {}).get(str(msg_type), default_noti) + noti = ( + self._config["telegram"] + .get("notification_settings", {}) + .get(str(msg_type), default_noti) + ) - if noti == 'off': + if noti == "off": logger.info(f"Notification '{msg_type}' not sent.") # Notification disabled return @@ -527,19 +577,19 @@ class Telegram(RPCHandler): message = self.compose_message(deepcopy(msg)) if message: asyncio.run_coroutine_threadsafe( - self._send_msg(message, disable_notification=(noti == 'silent')), - self._loop) + self._send_msg(message, disable_notification=(noti == "silent")), self._loop + ) def _get_exit_emoji(self, msg): """ Get emoji for exit-messages """ - if float(msg['profit_ratio']) >= 0.05: + if float(msg["profit_ratio"]) >= 0.05: return "\N{ROCKET}" - elif float(msg['profit_ratio']) >= 0.0: + elif float(msg["profit_ratio"]) >= 0.0: return "\N{EIGHT SPOKED ASTERISK}" - elif msg['exit_reason'] == "stop_loss": + elif msg["exit_reason"] == "stop_loss": return "\N{WARNING SIGN}" else: return "\N{CROSS MARK}" @@ -554,10 +604,10 @@ class Telegram(RPCHandler): order_nr = 0 for order in filled_orders: lines: List[str] = [] - if order['is_open'] is True: + if order["is_open"] is True: continue order_nr += 1 - wording = 'Entry' if order['ft_is_entry'] else 'Exit' + wording = "Entry" if order["ft_is_entry"] else "Exit" cur_entry_amount = order["filled"] or order["amount"] cur_entry_average = order["safe_price"] @@ -571,13 +621,17 @@ class Telegram(RPCHandler): lines.append(f"*Average Price:* {round_value(cur_entry_average, 8)}") else: # TODO: This calculation ignores fees. - price_to_1st_entry = ((cur_entry_average - first_avg) / first_avg) + price_to_1st_entry = (cur_entry_average - first_avg) / first_avg if is_open: lines.append("({})".format(dt_humanize_delta(order["order_filled_date"]))) - lines.append(f"*Amount:* {round_value(cur_entry_amount, 8)} " - f"({fmt_coin(order['cost'], quote_currency)})") - lines.append(f"*Average {wording} Price:* {round_value(cur_entry_average, 8)} " - f"({price_to_1st_entry:.2%} from 1st entry rate)") + lines.append( + f"*Amount:* {round_value(cur_entry_amount, 8)} " + f"({fmt_coin(order['cost'], quote_currency)})" + ) + lines.append( + f"*Average {wording} Price:* {round_value(cur_entry_average, 8)} " + f"({price_to_1st_entry:.2%} from 1st entry rate)" + ) lines.append(f"*Order Filled:* {order['order_filled_date']}") lines_detail.append("\n".join(lines)) @@ -600,12 +654,11 @@ class Telegram(RPCHandler): results = self._rpc._rpc_trade_status(trade_ids=trade_ids) for r in results: - lines = [ - "*Order List for Trade #*`{trade_id}`" - ] + lines = ["*Order List for Trade #*`{trade_id}`"] lines_detail = self._prepare_order_details( - r['orders'], r['quote_currency'], r['is_open']) + r["orders"], r["quote_currency"], r["is_open"] + ) lines.extend(lines_detail if lines_detail else "") await self.__send_order_msg(lines, r) @@ -613,15 +666,15 @@ class Telegram(RPCHandler): """ Send status message. """ - msg = '' + msg = "" for line in lines: if line: if (len(msg) + len(line) + 1) < MAX_MESSAGE_LENGTH: - msg += line + '\n' + msg += line + "\n" else: await self._send_msg(msg.format(**r)) - msg = "*Order List for Trade #*`{trade_id}` - continued\n" + line + '\n' + msg = "*Order List for Trade #*`{trade_id}` - continued\n" + line + "\n" await self._send_msg(msg.format(**r)) @@ -635,7 +688,7 @@ class Telegram(RPCHandler): :return: None """ - if context.args and 'table' in context.args: + if context.args and "table" in context.args: await self._status_table(update, context) return else: @@ -653,74 +706,102 @@ class Telegram(RPCHandler): trade_ids = [int(i) for i in context.args if i.isnumeric()] results = self._rpc._rpc_trade_status(trade_ids=trade_ids) - position_adjust = self._config.get('position_adjustment_enable', False) - max_entries = self._config.get('max_entry_position_adjustment', -1) + position_adjust = self._config.get("position_adjustment_enable", False) + max_entries = self._config.get("max_entry_position_adjustment", -1) for r in results: - r['open_date_hum'] = dt_humanize_delta(r['open_date']) - r['num_entries'] = len([o for o in r['orders'] if o['ft_is_entry']]) - r['num_exits'] = len([o for o in r['orders'] if not o['ft_is_entry'] - and not o['ft_order_side'] == 'stoploss']) - r['exit_reason'] = r.get('exit_reason', "") - r['stake_amount_r'] = fmt_coin(r['stake_amount'], r['quote_currency']) - r['max_stake_amount_r'] = fmt_coin( - r['max_stake_amount'] or r['stake_amount'], r['quote_currency']) - r['profit_abs_r'] = fmt_coin(r['profit_abs'], r['quote_currency']) - r['realized_profit_r'] = fmt_coin(r['realized_profit'], r['quote_currency']) - r['total_profit_abs_r'] = fmt_coin( - r['total_profit_abs'], r['quote_currency']) + r["open_date_hum"] = dt_humanize_delta(r["open_date"]) + r["num_entries"] = len([o for o in r["orders"] if o["ft_is_entry"]]) + r["num_exits"] = len( + [ + o + for o in r["orders"] + if not o["ft_is_entry"] and not o["ft_order_side"] == "stoploss" + ] + ) + r["exit_reason"] = r.get("exit_reason", "") + r["stake_amount_r"] = fmt_coin(r["stake_amount"], r["quote_currency"]) + r["max_stake_amount_r"] = fmt_coin( + r["max_stake_amount"] or r["stake_amount"], r["quote_currency"] + ) + r["profit_abs_r"] = fmt_coin(r["profit_abs"], r["quote_currency"]) + r["realized_profit_r"] = fmt_coin(r["realized_profit"], r["quote_currency"]) + r["total_profit_abs_r"] = fmt_coin(r["total_profit_abs"], r["quote_currency"]) lines = [ - "*Trade ID:* `{trade_id}`" + - (" `(since {open_date_hum})`" if r['is_open'] else ""), + "*Trade ID:* `{trade_id}`" + (" `(since {open_date_hum})`" if r["is_open"] else ""), "*Current Pair:* {pair}", - f"*Direction:* {'`Short`' if r.get('is_short') else '`Long`'}" - + " ` ({leverage}x)`" if r.get('leverage') else "", + ( + f"*Direction:* {'`Short`' if r.get('is_short') else '`Long`'}" + + " ` ({leverage}x)`" + if r.get("leverage") + else "" + ), "*Amount:* `{amount} ({stake_amount_r})`", "*Total invested:* `{max_stake_amount_r}`" if position_adjust else "", - "*Enter Tag:* `{enter_tag}`" if r['enter_tag'] else "", - "*Exit Reason:* `{exit_reason}`" if r['exit_reason'] else "", + "*Enter Tag:* `{enter_tag}`" if r["enter_tag"] else "", + "*Exit Reason:* `{exit_reason}`" if r["exit_reason"] else "", ] if position_adjust: - max_buy_str = (f"/{max_entries + 1}" if (max_entries > 0) else "") - lines.extend([ - "*Number of Entries:* `{num_entries}" + max_buy_str + "`", - "*Number of Exits:* `{num_exits}`" - ]) + max_buy_str = f"/{max_entries + 1}" if (max_entries > 0) else "" + lines.extend( + [ + "*Number of Entries:* `{num_entries}" + max_buy_str + "`", + "*Number of Exits:* `{num_exits}`", + ] + ) - lines.extend([ - f"*Open Rate:* `{round_value(r['open_rate'], 8)}`", - f"*Close Rate:* `{round_value(r['close_rate'], 8)}`" if r['close_rate'] else "", - "*Open Date:* `{open_date}`", - "*Close Date:* `{close_date}`" if r['close_date'] else "", - f" \n*Current Rate:* `{round_value(r['current_rate'], 8)}`" if r['is_open'] else "", - ("*Unrealized Profit:* " if r['is_open'] else "*Close Profit: *") - + "`{profit_ratio:.2%}` `({profit_abs_r})`", - ]) + lines.extend( + [ + f"*Open Rate:* `{round_value(r['open_rate'], 8)}`", + f"*Close Rate:* `{round_value(r['close_rate'], 8)}`" if r["close_rate"] else "", + "*Open Date:* `{open_date}`", + "*Close Date:* `{close_date}`" if r["close_date"] else "", + ( + f" \n*Current Rate:* `{round_value(r['current_rate'], 8)}`" + if r["is_open"] + else "" + ), + ("*Unrealized Profit:* " if r["is_open"] else "*Close Profit: *") + + "`{profit_ratio:.2%}` `({profit_abs_r})`", + ] + ) - if r['is_open']: - if r.get('realized_profit'): - lines.extend([ - "*Realized Profit:* `{realized_profit_ratio:.2%} ({realized_profit_r})`", - "*Total Profit:* `{total_profit_ratio:.2%} ({total_profit_abs_r})`" - ]) + if r["is_open"]: + if r.get("realized_profit"): + lines.extend( + [ + "*Realized Profit:* `{realized_profit_ratio:.2%} " + "({realized_profit_r})`", + "*Total Profit:* `{total_profit_ratio:.2%} ({total_profit_abs_r})`", + ] + ) # Append empty line to improve readability lines.append(" ") - if (r['stop_loss_abs'] != r['initial_stop_loss_abs'] - and r['initial_stop_loss_ratio'] is not None): + if ( + r["stop_loss_abs"] != r["initial_stop_loss_abs"] + and r["initial_stop_loss_ratio"] is not None + ): # Adding initial stoploss only if it is different from stoploss - lines.append("*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " - "`({initial_stop_loss_ratio:.2%})`") + lines.append( + "*Initial Stoploss:* `{initial_stop_loss_abs:.8f}` " + "`({initial_stop_loss_ratio:.2%})`" + ) # Adding stoploss and stoploss percentage only if it is not None - lines.append(f"*Stoploss:* `{round_value(r['stop_loss_abs'], 8)}` " + - ("`({stop_loss_ratio:.2%})`" if r['stop_loss_ratio'] else "")) - lines.append(f"*Stoploss distance:* `{round_value(r['stoploss_current_dist'], 8)}` " - "`({stoploss_current_dist_ratio:.2%})`") - if r.get('open_orders'): + lines.append( + f"*Stoploss:* `{round_value(r['stop_loss_abs'], 8)}` " + + ("`({stop_loss_ratio:.2%})`" if r["stop_loss_ratio"] else "") + ) + lines.append( + f"*Stoploss distance:* `{round_value(r['stoploss_current_dist'], 8)}` " + "`({stoploss_current_dist_ratio:.2%})`" + ) + if r.get("open_orders"): lines.append( "*Open Order:* `{open_orders}`" - + ("- `{exit_order_status}`" if r['exit_order_status'] else "")) + + ("- `{exit_order_status}`" if r["exit_order_status"] else "") + ) await self.__send_status_msg(lines, r) @@ -728,15 +809,15 @@ class Telegram(RPCHandler): """ Send status message. """ - msg = '' + msg = "" for line in lines: if line: if (len(msg) + len(line) + 1) < MAX_MESSAGE_LENGTH: - msg += line + '\n' + msg += line + "\n" else: await self._send_msg(msg.format(**r)) - msg = "*Trade ID:* `{trade_id}` - continued\n" + line + '\n' + msg = "*Trade ID:* `{trade_id}` - continued\n" + line + "\n" await self._send_msg(msg.format(**r)) @@ -749,9 +830,10 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - fiat_currency = self._config.get('fiat_display_currency', '') + fiat_currency = self._config.get("fiat_display_currency", "") statlist, head, fiat_profit_sum = self._rpc._rpc_status_table( - self._config['stake_currency'], fiat_currency) + self._config["stake_currency"], fiat_currency + ) show_total = not isnan(fiat_profit_sum) and len(statlist) > 1 max_trades_per_msg = 50 @@ -762,21 +844,23 @@ class Telegram(RPCHandler): """ messages_count = max(int(len(statlist) / max_trades_per_msg + 0.99), 1) for i in range(0, messages_count): - trades = statlist[i * max_trades_per_msg:(i + 1) * max_trades_per_msg] + trades = statlist[i * max_trades_per_msg : (i + 1) * max_trades_per_msg] if show_total and i == messages_count - 1: # append total line trades.append(["Total", "", "", f"{fiat_profit_sum:.2f} {fiat_currency}"]) - message = tabulate(trades, - headers=head, - tablefmt='simple') + message = tabulate(trades, headers=head, tablefmt="simple") if show_total and i == messages_count - 1: # insert separators line between Total lines = message.split("\n") message = "\n".join(lines[:-1] + [lines[1]] + [lines[-1]]) - await self._send_msg(f"
{message}
", parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_status_table", - query=update.callback_query) + await self._send_msg( + f"
{message}
", + parse_mode=ParseMode.HTML, + reload_able=True, + callback_path="update_status_table", + query=update.callback_query, + ) async def _timeunit_stats(self, update: Update, context: CallbackContext, unit: str) -> None: """ @@ -788,45 +872,51 @@ class Telegram(RPCHandler): """ vals = { - 'days': TimeunitMappings('Day', 'Daily', 'days', 'update_daily', 7, '%Y-%m-%d'), - 'weeks': TimeunitMappings('Monday', 'Weekly', 'weeks (starting from Monday)', - 'update_weekly', 8, '%Y-%m-%d'), - 'months': TimeunitMappings('Month', 'Monthly', 'months', 'update_monthly', 6, '%Y-%m'), + "days": TimeunitMappings("Day", "Daily", "days", "update_daily", 7, "%Y-%m-%d"), + "weeks": TimeunitMappings( + "Monday", "Weekly", "weeks (starting from Monday)", "update_weekly", 8, "%Y-%m-%d" + ), + "months": TimeunitMappings("Month", "Monthly", "months", "update_monthly", 6, "%Y-%m"), } val = vals[unit] - stake_cur = self._config['stake_currency'] - fiat_disp_cur = self._config.get('fiat_display_currency', '') + stake_cur = self._config["stake_currency"] + fiat_disp_cur = self._config.get("fiat_display_currency", "") try: timescale = int(context.args[0]) if context.args else val.default except (TypeError, ValueError, IndexError): timescale = val.default - stats = self._rpc._rpc_timeunit_profit( - timescale, - stake_cur, - fiat_disp_cur, - unit - ) + stats = self._rpc._rpc_timeunit_profit(timescale, stake_cur, fiat_disp_cur, unit) stats_tab = tabulate( - [[f"{period['date']:{val.dateformat}} ({period['trade_count']})", - f"{fmt_coin(period['abs_profit'], stats['stake_currency'])}", - f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", - f"{period['rel_profit']:.2%}", - ] for period in stats['data']], + [ + [ + f"{period['date']:{val.dateformat}} ({period['trade_count']})", + f"{fmt_coin(period['abs_profit'], stats['stake_currency'])}", + f"{period['fiat_value']:.2f} {stats['fiat_display_currency']}", + f"{period['rel_profit']:.2%}", + ] + for period in stats["data"] + ], headers=[ f"{val.header} (count)", - f'{stake_cur}', - f'{fiat_disp_cur}', - 'Profit %', - 'Trades', + f"{stake_cur}", + f"{fiat_disp_cur}", + "Profit %", + "Trades", ], - tablefmt='simple') - message = ( - f'{val.message} Profit over the last {timescale} {val.message2}:\n' - f'
{stats_tab}
' + tablefmt="simple", + ) + message = ( + f"{val.message} Profit over the last {timescale} {val.message2}:\n" + f"
{stats_tab}
" + ) + await self._send_msg( + message, + parse_mode=ParseMode.HTML, + reload_able=True, + callback_path=val.callback, + query=update.callback_query, ) - await self._send_msg(message, parse_mode=ParseMode.HTML, reload_able=True, - callback_path=val.callback, query=update.callback_query) @authorized_only async def _daily(self, update: Update, context: CallbackContext) -> None: @@ -837,7 +927,7 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - await self._timeunit_stats(update, context, 'days') + await self._timeunit_stats(update, context, "days") @authorized_only async def _weekly(self, update: Update, context: CallbackContext) -> None: @@ -848,7 +938,7 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - await self._timeunit_stats(update, context, 'weeks') + await self._timeunit_stats(update, context, "weeks") @authorized_only async def _monthly(self, update: Update, context: CallbackContext) -> None: @@ -859,7 +949,7 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - await self._timeunit_stats(update, context, 'months') + await self._timeunit_stats(update, context, "months") @authorized_only async def _profit(self, update: Update, context: CallbackContext) -> None: @@ -870,8 +960,8 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - stake_cur = self._config['stake_currency'] - fiat_disp_cur = self._config.get('fiat_display_currency', '') + stake_cur = self._config["stake_currency"] + fiat_disp_cur = self._config.get("fiat_display_currency", "") start_date = datetime.fromtimestamp(0) timescale = None @@ -883,47 +973,48 @@ class Telegram(RPCHandler): except (TypeError, ValueError, IndexError): pass - stats = self._rpc._rpc_trade_statistics( - stake_cur, - fiat_disp_cur, - start_date) - profit_closed_coin = stats['profit_closed_coin'] - profit_closed_ratio_mean = stats['profit_closed_ratio_mean'] - profit_closed_percent = stats['profit_closed_percent'] - profit_closed_fiat = stats['profit_closed_fiat'] - profit_all_coin = stats['profit_all_coin'] - profit_all_ratio_mean = stats['profit_all_ratio_mean'] - profit_all_percent = stats['profit_all_percent'] - profit_all_fiat = stats['profit_all_fiat'] - trade_count = stats['trade_count'] + stats = self._rpc._rpc_trade_statistics(stake_cur, fiat_disp_cur, start_date) + profit_closed_coin = stats["profit_closed_coin"] + profit_closed_ratio_mean = stats["profit_closed_ratio_mean"] + profit_closed_percent = stats["profit_closed_percent"] + profit_closed_fiat = stats["profit_closed_fiat"] + profit_all_coin = stats["profit_all_coin"] + profit_all_ratio_mean = stats["profit_all_ratio_mean"] + profit_all_percent = stats["profit_all_percent"] + profit_all_fiat = stats["profit_all_fiat"] + trade_count = stats["trade_count"] first_trade_date = f"{stats['first_trade_humanized']} ({stats['first_trade_date']})" latest_trade_date = f"{stats['latest_trade_humanized']} ({stats['latest_trade_date']})" - avg_duration = stats['avg_duration'] - best_pair = stats['best_pair'] - best_pair_profit_ratio = stats['best_pair_profit_ratio'] - winrate = stats['winrate'] - expectancy = stats['expectancy'] - expectancy_ratio = stats['expectancy_ratio'] + avg_duration = stats["avg_duration"] + best_pair = stats["best_pair"] + best_pair_profit_ratio = stats["best_pair_profit_ratio"] + winrate = stats["winrate"] + expectancy = stats["expectancy"] + expectancy_ratio = stats["expectancy_ratio"] - if stats['trade_count'] == 0: + if stats["trade_count"] == 0: markdown_msg = f"No trades yet.\n*Bot started:* `{stats['bot_start_date']}`" else: # Message to display - if stats['closed_trade_count'] > 0: - markdown_msg = ("*ROI:* Closed trades\n" - f"∙ `{fmt_coin(profit_closed_coin, stake_cur)} " - f"({profit_closed_ratio_mean:.2%}) " - f"({profit_closed_percent} \N{GREEK CAPITAL LETTER SIGMA}%)`\n" - f"∙ `{fmt_coin(profit_closed_fiat, fiat_disp_cur)}`\n") + if stats["closed_trade_count"] > 0: + markdown_msg = ( + "*ROI:* Closed trades\n" + f"∙ `{fmt_coin(profit_closed_coin, stake_cur)} " + f"({profit_closed_ratio_mean:.2%}) " + f"({profit_closed_percent} \N{GREEK CAPITAL LETTER SIGMA}%)`\n" + f"∙ `{fmt_coin(profit_closed_fiat, fiat_disp_cur)}`\n" + ) else: markdown_msg = "`No closed trade` \n" - + fiat_all_trades = ( + f"∙ `{fmt_coin(profit_all_fiat, fiat_disp_cur)}`\n" if fiat_disp_cur else "" + ) markdown_msg += ( f"*ROI:* All trades\n" f"∙ `{fmt_coin(profit_all_coin, stake_cur)} " f"({profit_all_ratio_mean:.2%}) " f"({profit_all_percent} \N{GREEK CAPITAL LETTER SIGMA}%)`\n" - f"∙ `{fmt_coin(profit_all_fiat, fiat_disp_cur)}`\n" + f"{fiat_all_trades}" f"*Total Trade Count:* `{trade_count}`\n" f"*Bot started:* `{stats['bot_start_date']}`\n" f"*{'First Trade opened' if not timescale else 'Showing Profit since'}:* " @@ -933,7 +1024,7 @@ class Telegram(RPCHandler): f"*Winrate:* `{winrate:.2%}`\n" f"*Expectancy (Ratio):* `{expectancy:.2f} ({expectancy_ratio:.2f})`" ) - if stats['closed_trade_count'] > 0: + if stats["closed_trade_count"] > 0: markdown_msg += ( f"\n*Avg. Duration:* `{avg_duration}`\n" f"*Best Performing:* `{best_pair}: {best_pair_profit_ratio:.2%}`\n" @@ -946,8 +1037,12 @@ class Telegram(RPCHandler): f" to `{stats['max_drawdown_end']} " f"({fmt_coin(stats['drawdown_low'], stake_cur)})`\n" ) - await self._send_msg(markdown_msg, reload_able=True, callback_path="update_profit", - query=update.callback_query) + await self._send_msg( + markdown_msg, + reload_able=True, + callback_path="update_profit", + query=update.callback_query, + ) @authorized_only async def _stats(self, update: Update, context: CallbackContext) -> None: @@ -958,86 +1053,94 @@ class Telegram(RPCHandler): stats = self._rpc._rpc_stats() reason_map = { - 'roi': 'ROI', - 'stop_loss': 'Stoploss', - 'trailing_stop_loss': 'Trail. Stop', - 'stoploss_on_exchange': 'Stoploss', - 'exit_signal': 'Exit Signal', - 'force_exit': 'Force Exit', - 'emergency_exit': 'Emergency Exit', + "roi": "ROI", + "stop_loss": "Stoploss", + "trailing_stop_loss": "Trail. Stop", + "stoploss_on_exchange": "Stoploss", + "exit_signal": "Exit Signal", + "force_exit": "Force Exit", + "emergency_exit": "Emergency Exit", } exit_reasons_tabulate = [ - [ - reason_map.get(reason, reason), - sum(count.values()), - count['wins'], - count['losses'] - ] for reason, count in stats['exit_reasons'].items() + [reason_map.get(reason, reason), sum(count.values()), count["wins"], count["losses"]] + for reason, count in stats["exit_reasons"].items() ] - exit_reasons_msg = 'No trades yet.' + exit_reasons_msg = "No trades yet." for reason in chunks(exit_reasons_tabulate, 25): - exit_reasons_msg = tabulate( - reason, - headers=['Exit Reason', 'Exits', 'Wins', 'Losses'] - ) + exit_reasons_msg = tabulate(reason, headers=["Exit Reason", "Exits", "Wins", "Losses"]) if len(exit_reasons_tabulate) > 25: await self._send_msg(f"```\n{exit_reasons_msg}```", ParseMode.MARKDOWN) - exit_reasons_msg = '' + exit_reasons_msg = "" - durations = stats['durations'] + durations = stats["durations"] duration_msg = tabulate( [ - ['Wins', str(timedelta(seconds=durations['wins'])) - if durations['wins'] is not None else 'N/A'], - ['Losses', str(timedelta(seconds=durations['losses'])) - if durations['losses'] is not None else 'N/A'] + [ + "Wins", + ( + str(timedelta(seconds=durations["wins"])) + if durations["wins"] is not None + else "N/A" + ), + ], + [ + "Losses", + ( + str(timedelta(seconds=durations["losses"])) + if durations["losses"] is not None + else "N/A" + ), + ], ], - headers=['', 'Avg. Duration'] + headers=["", "Avg. Duration"], ) - msg = (f"""```\n{exit_reasons_msg}```\n```\n{duration_msg}```""") + msg = f"""```\n{exit_reasons_msg}```\n```\n{duration_msg}```""" await self._send_msg(msg, ParseMode.MARKDOWN) @authorized_only async def _balance(self, update: Update, context: CallbackContext) -> None: - """ Handler for /balance """ - full_result = context.args and 'full' in context.args - result = self._rpc._rpc_balance(self._config['stake_currency'], - self._config.get('fiat_display_currency', '')) + """Handler for /balance""" + full_result = context.args and "full" in context.args + result = self._rpc._rpc_balance( + self._config["stake_currency"], self._config.get("fiat_display_currency", "") + ) - balance_dust_level = self._config['telegram'].get('balance_dust_level', 0.0) + balance_dust_level = self._config["telegram"].get("balance_dust_level", 0.0) if not balance_dust_level: - balance_dust_level = DUST_PER_COIN.get(self._config['stake_currency'], 1.0) + balance_dust_level = DUST_PER_COIN.get(self._config["stake_currency"], 1.0) - output = '' - if self._config['dry_run']: + output = "" + if self._config["dry_run"]: output += "*Warning:* Simulated balances in Dry Mode.\n" - starting_cap = fmt_coin(result['starting_capital'], self._config['stake_currency']) + starting_cap = fmt_coin(result["starting_capital"], self._config["stake_currency"]) output += f"Starting capital: `{starting_cap}`" - starting_cap_fiat = fmt_coin( - result['starting_capital_fiat'], self._config['fiat_display_currency'] - ) if result['starting_capital_fiat'] > 0 else '' - output += (f" `, {starting_cap_fiat}`.\n" - ) if result['starting_capital_fiat'] > 0 else '.\n' + starting_cap_fiat = ( + fmt_coin(result["starting_capital_fiat"], self._config["fiat_display_currency"]) + if result["starting_capital_fiat"] > 0 + else "" + ) + output += (f" `, {starting_cap_fiat}`.\n") if result["starting_capital_fiat"] > 0 else ".\n" total_dust_balance = 0 total_dust_currencies = 0 - for curr in result['currencies']: - curr_output = '' - if ( - (curr['is_position'] or curr['est_stake'] > balance_dust_level) - and (full_result or curr['is_bot_managed']) + for curr in result["currencies"]: + curr_output = "" + if (curr["is_position"] or curr["est_stake"] > balance_dust_level) and ( + full_result or curr["is_bot_managed"] ): - if curr['is_position']: + if curr["is_position"]: curr_output = ( f"*{curr['currency']}:*\n" f"\t`{curr['side']}: {curr['position']:.8f}`\n" f"\t`Leverage: {curr['leverage']:.1f}`\n" f"\t`Est. {curr['stake']}: " - f"{fmt_coin(curr['est_stake'], curr['stake'], False)}`\n") + f"{fmt_coin(curr['est_stake'], curr['stake'], False)}`\n" + ) else: est_stake = fmt_coin( - curr['est_stake' if full_result else 'est_stake_bot'], curr['stake'], False) + curr["est_stake" if full_result else "est_stake_bot"], curr["stake"], False + ) curr_output = ( f"*{curr['currency']}:*\n" @@ -1045,10 +1148,11 @@ class Telegram(RPCHandler): f"\t`Balance: {curr['balance']:.8f}`\n" f"\t`Pending: {curr['used']:.8f}`\n" f"\t`Bot Owned: {curr['bot_owned']:.8f}`\n" - f"\t`Est. {curr['stake']}: {est_stake}`\n") + f"\t`Est. {curr['stake']}: {est_stake}`\n" + ) - elif curr['est_stake'] <= balance_dust_level: - total_dust_balance += curr['est_stake'] + elif curr["est_stake"] <= balance_dust_level: + total_dust_balance += curr["est_stake"] total_dust_currencies += 1 # Handle overflowing message length @@ -1064,21 +1168,23 @@ class Telegram(RPCHandler): f"{plural(total_dust_currencies, 'Currency', 'Currencies')} " f"(< {balance_dust_level} {result['stake']}):*\n" f"\t`Est. {result['stake']}: " - f"{fmt_coin(total_dust_balance, result['stake'], False)}`\n") - tc = result['trade_count'] > 0 - stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else '' - fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else '' - value = fmt_coin( - result['value' if full_result else 'value_bot'], result['symbol'], False) + f"{fmt_coin(total_dust_balance, result['stake'], False)}`\n" + ) + tc = result["trade_count"] > 0 + stake_improve = f" `({result['starting_capital_ratio']:.2%})`" if tc else "" + fiat_val = f" `({result['starting_capital_fiat_ratio']:.2%})`" if tc else "" + value = fmt_coin(result["value" if full_result else "value_bot"], result["symbol"], False) total_stake = fmt_coin( - result['total' if full_result else 'total_bot'], result['stake'], False) + result["total" if full_result else "total_bot"], result["stake"], False + ) output += ( f"\n*Estimated Value{' (Bot managed assets only)' if not full_result else ''}*:\n" f"\t`{result['stake']}: {total_stake}`{stake_improve}\n" f"\t`{result['symbol']}: {value}`{fiat_val}\n" ) - await self._send_msg(output, reload_able=True, callback_path="update_balance", - query=update.callback_query) + await self._send_msg( + output, reload_able=True, callback_path="update_balance", query=update.callback_query + ) @authorized_only async def _start(self, update: Update, context: CallbackContext) -> None: @@ -1153,12 +1259,13 @@ class Telegram(RPCHandler): trade_id = context.args[0] await self._force_exit_action(trade_id) else: - fiat_currency = self._config.get('fiat_display_currency', '') + fiat_currency = self._config.get("fiat_display_currency", "") try: statlist, _, _ = self._rpc._rpc_status_table( - self._config['stake_currency'], fiat_currency) + self._config["stake_currency"], fiat_currency + ) except RPCException: - await self._send_msg(msg='No open trade found.') + await self._send_msg(msg="No open trade found.") return trades = [] for trade in statlist: @@ -1166,15 +1273,17 @@ class Telegram(RPCHandler): trade_buttons = [ InlineKeyboardButton(text=trade[1], callback_data=f"force_exit__{trade[0]}") - for trade in trades] + for trade in trades + ] buttons_aligned = self._layout_inline_keyboard_onecol(trade_buttons) - buttons_aligned.append([InlineKeyboardButton( - text='Cancel', callback_data='force_exit__cancel')]) + buttons_aligned.append( + [InlineKeyboardButton(text="Cancel", callback_data="force_exit__cancel")] + ) await self._send_msg(msg="Which trade?", keyboard=buttons_aligned) async def _force_exit_action(self, trade_id: str): - if trade_id != 'cancel': + if trade_id != "cancel": try: loop = asyncio.get_running_loop() # Workaround to avoid nested loops @@ -1185,10 +1294,10 @@ class Telegram(RPCHandler): async def _force_exit_inline(self, update: Update, _: CallbackContext) -> None: if update.callback_query: query = update.callback_query - if query.data and '__' in query.data: + if query.data and "__" in query.data: # Input data is "force_exit__" - trade_id = query.data.split("__")[1].split(' ')[0] - if trade_id == 'cancel': + trade_id = query.data.split("__")[1].split(" ")[0] + if trade_id == "cancel": await query.answer() await query.edit_message_text(text="Force exit canceled.") return @@ -1196,17 +1305,20 @@ class Telegram(RPCHandler): await query.answer() if trade: await query.edit_message_text( - text=f"Manually exiting Trade #{trade_id}, {trade.pair}") + text=f"Manually exiting Trade #{trade_id}, {trade.pair}" + ) await self._force_exit_action(trade_id) else: await query.edit_message_text(text=f"Trade {trade_id} not found.") async def _force_enter_action(self, pair, price: Optional[float], order_side: SignalDirection): - if pair != 'cancel': + if pair != "cancel": try: + @safe_async_db def _force_enter(): self._rpc._rpc_force_entry(pair, price, order_side=order_side) + loop = asyncio.get_running_loop() # Workaround to avoid nested loops await loop.run_in_executor(None, _force_enter) @@ -1217,15 +1329,15 @@ class Telegram(RPCHandler): async def _force_enter_inline(self, update: Update, _: CallbackContext) -> None: if update.callback_query: query = update.callback_query - if query.data and '__' in query.data: + if query.data and "__" in query.data: # Input data is "force_enter___" payload = query.data.split("__")[1] - if payload == 'cancel': + if payload == "cancel": await query.answer() await query.edit_message_text(text="Force enter canceled.") return - if payload and '_||_' in payload: - pair, side = payload.split('_||_') + if payload and "_||_" in payload: + pair, side = payload.split("_||_") order_side = SignalDirection(side) await query.answer() await query.edit_message_text(text=f"Manually entering {order_side} for {pair}") @@ -1233,17 +1345,20 @@ class Telegram(RPCHandler): @staticmethod def _layout_inline_keyboard( - buttons: List[InlineKeyboardButton], cols=3) -> List[List[InlineKeyboardButton]]: - return [buttons[i:i + cols] for i in range(0, len(buttons), cols)] + buttons: List[InlineKeyboardButton], cols=3 + ) -> List[List[InlineKeyboardButton]]: + return [buttons[i : i + cols] for i in range(0, len(buttons), cols)] @staticmethod def _layout_inline_keyboard_onecol( - buttons: List[InlineKeyboardButton], cols=1) -> List[List[InlineKeyboardButton]]: - return [buttons[i:i + cols] for i in range(0, len(buttons), cols)] + buttons: List[InlineKeyboardButton], cols=1 + ) -> List[List[InlineKeyboardButton]]: + return [buttons[i : i + cols] for i in range(0, len(buttons), cols)] @authorized_only async def _force_enter( - self, update: Update, context: CallbackContext, order_side: SignalDirection) -> None: + self, update: Update, context: CallbackContext, order_side: SignalDirection + ) -> None: """ Handler for /forcelong and `/forceshort Buys a pair trade at the given or current price @@ -1256,19 +1371,21 @@ class Telegram(RPCHandler): price = float(context.args[1]) if len(context.args) > 1 else None await self._force_enter_action(pair, price, order_side) else: - whitelist = self._rpc._rpc_whitelist()['whitelist'] + whitelist = self._rpc._rpc_whitelist()["whitelist"] pair_buttons = [ InlineKeyboardButton( text=pair, callback_data=f"force_enter__{pair}_||_{order_side}" - ) for pair in sorted(whitelist) + ) + for pair in sorted(whitelist) ] buttons_aligned = self._layout_inline_keyboard(pair_buttons) - buttons_aligned.append([InlineKeyboardButton(text='Cancel', - callback_data='force_enter__cancel')]) - await self._send_msg(msg="Which pair?", - keyboard=buttons_aligned, - query=update.callback_query) + buttons_aligned.append( + [InlineKeyboardButton(text="Cancel", callback_data="force_enter__cancel")] + ) + await self._send_msg( + msg="Which pair?", keyboard=buttons_aligned, query=update.callback_query + ) @authorized_only async def _trades(self, update: Update, context: CallbackContext) -> None: @@ -1279,27 +1396,31 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - stake_cur = self._config['stake_currency'] + stake_cur = self._config["stake_currency"] try: nrecent = int(context.args[0]) if context.args else 10 except (TypeError, ValueError, IndexError): nrecent = 10 - trades = self._rpc._rpc_trade_history( - nrecent - ) + trades = self._rpc._rpc_trade_history(nrecent) trades_tab = tabulate( - [[dt_humanize_delta(dt_from_ts(trade['close_timestamp'])), - trade['pair'] + " (#" + str(trade['trade_id']) + ")", - f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})"] - for trade in trades['trades']], - headers=[ - 'Close Date', - 'Pair (ID)', - f'Profit ({stake_cur})', + [ + [ + dt_humanize_delta(dt_from_ts(trade["close_timestamp"])), + trade["pair"] + " (#" + str(trade["trade_id"]) + ")", + f"{(trade['close_profit']):.2%} ({trade['close_profit_abs']})", + ] + for trade in trades["trades"] ], - tablefmt='simple') - message = (f"{min(trades['trades_count'], nrecent)} recent trades:\n" - + (f"
{trades_tab}
" if trades['trades_count'] > 0 else '')) + headers=[ + "Close Date", + "Pair (ID)", + f"Profit ({stake_cur})", + ], + tablefmt="simple", + ) + message = f"{min(trades['trades_count'], nrecent)} recent trades:\n" + ( + f"
{trades_tab}
" if trades["trades_count"] > 0 else "" + ) await self._send_msg(message, parse_mode=ParseMode.HTML) @authorized_only @@ -1317,7 +1438,7 @@ class Telegram(RPCHandler): msg = self._rpc._rpc_delete(trade_id) await self._send_msg( f"`{msg['result_msg']}`\n" - 'Please make sure to take care of this asset on the exchange manually.' + "Please make sure to take care of this asset on the exchange manually." ) @authorized_only @@ -1333,7 +1454,7 @@ class Telegram(RPCHandler): raise RPCException("Trade-id not set.") trade_id = int(context.args[0]) self._rpc._rpc_cancel_open_order(trade_id) - await self._send_msg('Open order canceled.') + await self._send_msg("Open order canceled.") @authorized_only async def _performance(self, update: Update, context: CallbackContext) -> None: @@ -1351,7 +1472,8 @@ class Telegram(RPCHandler): f"{i + 1}.\t {trade['pair']}\t" f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})\n") + f"({trade['count']})\n" + ) if len(output + stat_line) >= MAX_MESSAGE_LENGTH: await self._send_msg(output, parse_mode=ParseMode.HTML) @@ -1359,9 +1481,13 @@ class Telegram(RPCHandler): else: output += stat_line - await self._send_msg(output, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_performance", - query=update.callback_query) + await self._send_msg( + output, + parse_mode=ParseMode.HTML, + reload_able=True, + callback_path="update_performance", + query=update.callback_query, + ) @authorized_only async def _enter_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1383,7 +1509,8 @@ class Telegram(RPCHandler): f"{i + 1}.\t `{trade['enter_tag']}\t" f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})`\n") + f"({trade['count']})`\n" + ) if len(output + stat_line) >= MAX_MESSAGE_LENGTH: await self._send_msg(output, parse_mode=ParseMode.MARKDOWN) @@ -1391,9 +1518,13 @@ class Telegram(RPCHandler): else: output += stat_line - await self._send_msg(output, parse_mode=ParseMode.MARKDOWN, - reload_able=True, callback_path="update_enter_tag_performance", - query=update.callback_query) + await self._send_msg( + output, + parse_mode=ParseMode.MARKDOWN, + reload_able=True, + callback_path="update_enter_tag_performance", + query=update.callback_query, + ) @authorized_only async def _exit_reason_performance(self, update: Update, context: CallbackContext) -> None: @@ -1415,7 +1546,8 @@ class Telegram(RPCHandler): f"{i + 1}.\t `{trade['exit_reason']}\t" f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})`\n") + f"({trade['count']})`\n" + ) if len(output + stat_line) >= MAX_MESSAGE_LENGTH: await self._send_msg(output, parse_mode=ParseMode.MARKDOWN) @@ -1423,9 +1555,13 @@ class Telegram(RPCHandler): else: output += stat_line - await self._send_msg(output, parse_mode=ParseMode.MARKDOWN, - reload_able=True, callback_path="update_exit_reason_performance", - query=update.callback_query) + await self._send_msg( + output, + parse_mode=ParseMode.MARKDOWN, + reload_able=True, + callback_path="update_exit_reason_performance", + query=update.callback_query, + ) @authorized_only async def _mix_tag_performance(self, update: Update, context: CallbackContext) -> None: @@ -1447,7 +1583,8 @@ class Telegram(RPCHandler): f"{i + 1}.\t `{trade['mix_tag']}\t" f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"({trade['profit_ratio']:.2%}) " - f"({trade['count']})`\n") + f"({trade['count']})`\n" + ) if len(output + stat_line) >= MAX_MESSAGE_LENGTH: await self._send_msg(output, parse_mode=ParseMode.MARKDOWN) @@ -1455,9 +1592,13 @@ class Telegram(RPCHandler): else: output += stat_line - await self._send_msg(output, parse_mode=ParseMode.MARKDOWN, - reload_able=True, callback_path="update_mix_tag_performance", - query=update.callback_query) + await self._send_msg( + output, + parse_mode=ParseMode.MARKDOWN, + reload_able=True, + callback_path="update_mix_tag_performance", + query=update.callback_query, + ) @authorized_only async def _count(self, update: Update, context: CallbackContext) -> None: @@ -1469,14 +1610,20 @@ class Telegram(RPCHandler): :return: None """ counts = self._rpc._rpc_count() - message = tabulate({k: [v] for k, v in counts.items()}, - headers=['current', 'max', 'total stake'], - tablefmt='simple') + message = tabulate( + {k: [v] for k, v in counts.items()}, + headers=["current", "max", "total stake"], + tablefmt="simple", + ) message = f"
{message}
" logger.debug(message) - await self._send_msg(message, parse_mode=ParseMode.HTML, - reload_able=True, callback_path="update_count", - query=update.callback_query) + await self._send_msg( + message, + parse_mode=ParseMode.HTML, + reload_able=True, + callback_path="update_count", + query=update.callback_query, + ) @authorized_only async def _locks(self, update: Update, context: CallbackContext) -> None: @@ -1485,17 +1632,18 @@ class Telegram(RPCHandler): Returns the currently active locks """ rpc_locks = self._rpc._rpc_locks() - if not rpc_locks['locks']: - await self._send_msg('No active locks.', parse_mode=ParseMode.HTML) + if not rpc_locks["locks"]: + await self._send_msg("No active locks.", parse_mode=ParseMode.HTML) - for locks in chunks(rpc_locks['locks'], 25): - message = tabulate([[ - lock['id'], - lock['pair'], - lock['lock_end_time'], - lock['reason']] for lock in locks], - headers=['ID', 'Pair', 'Until', 'Reason'], - tablefmt='simple') + for locks in chunks(rpc_locks["locks"], 25): + message = tabulate( + [ + [lock["id"], lock["pair"], lock["lock_end_time"], lock["reason"]] + for lock in locks + ], + headers=["ID", "Pair", "Until", "Reason"], + tablefmt="simple", + ) message = f"
{escape(message)}
" logger.debug(message) await self._send_msg(message, parse_mode=ParseMode.HTML) @@ -1528,9 +1676,9 @@ class Telegram(RPCHandler): if context.args: if "sorted" in context.args: - whitelist['whitelist'] = sorted(whitelist['whitelist']) + whitelist["whitelist"] = sorted(whitelist["whitelist"]) if "baseonly" in context.args: - whitelist['whitelist'] = [pair.split("/")[0] for pair in whitelist['whitelist']] + whitelist["whitelist"] = [pair.split("/")[0] for pair in whitelist["whitelist"]] message = f"Using whitelist `{whitelist['method']}` with {whitelist['length']} pairs\n" message += f"`{', '.join(whitelist['whitelist'])}`" @@ -1548,10 +1696,10 @@ class Telegram(RPCHandler): async def send_blacklist_msg(self, blacklist: Dict): errmsgs = [] - for _, error in blacklist['errors'].items(): + for _, error in blacklist["errors"].items(): errmsgs.append(f"Error: {error['error_msg']}") if errmsgs: - await self._send_msg('\n'.join(errmsgs)) + await self._send_msg("\n".join(errmsgs)) message = f"Blacklist contains {blacklist['length']} pairs\n" message += f"`{', '.join(blacklist['blacklist'])}`" @@ -1577,21 +1725,23 @@ class Telegram(RPCHandler): limit = int(context.args[0]) if context.args else 10 except (TypeError, ValueError, IndexError): limit = 10 - logs = RPC._rpc_get_logs(limit)['logs'] - msgs = '' + logs = RPC._rpc_get_logs(limit)["logs"] + msgs = "" msg_template = "*{}* {}: {} \\- `{}`" for logrec in logs: - msg = msg_template.format(escape_markdown(logrec[0], version=2), - escape_markdown(logrec[2], version=2), - escape_markdown(logrec[3], version=2), - escape_markdown(logrec[4], version=2)) + msg = msg_template.format( + escape_markdown(logrec[0], version=2), + escape_markdown(logrec[2], version=2), + escape_markdown(logrec[3], version=2), + escape_markdown(logrec[4], version=2), + ) if len(msgs + msg) + 10 >= MAX_MESSAGE_LENGTH: # Send message immediately if it would become too long await self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) - msgs = msg + '\n' + msgs = msg + "\n" else: # Append message to messages to send - msgs += msg + '\n' + msgs += msg + "\n" if msgs: await self._send_msg(msgs, parse_mode=ParseMode.MARKDOWN_V2) @@ -1604,13 +1754,12 @@ class Telegram(RPCHandler): """ edge_pairs = self._rpc._rpc_edge() if not edge_pairs: - message = 'Edge only validated following pairs:' + message = "Edge only validated following pairs:" await self._send_msg(message, parse_mode=ParseMode.HTML) for chunk in chunks(edge_pairs, 25): - edge_pairs_tab = tabulate(chunk, headers='keys', tablefmt='simple') - message = (f'Edge only validated following pairs:\n' - f'
{edge_pairs_tab}
') + edge_pairs_tab = tabulate(chunk, headers="keys", tablefmt="simple") + message = f"Edge only validated following pairs:\n
{edge_pairs_tab}
" await self._send_msg(message, parse_mode=ParseMode.HTML) @@ -1623,14 +1772,17 @@ class Telegram(RPCHandler): :param update: message update :return: None """ - force_enter_text = ("*/forcelong []:* `Instantly buys the given pair. " - "Optionally takes a rate at which to buy " - "(only applies to limit orders).` \n" - ) + force_enter_text = ( + "*/forcelong []:* `Instantly buys the given pair. " + "Optionally takes a rate at which to buy " + "(only applies to limit orders).` \n" + ) if self._rpc._freqtrade.trading_mode != TradingMode.SPOT: - force_enter_text += ("*/forceshort []:* `Instantly shorts the given pair. " - "Optionally takes a rate at which to sell " - "(only applies to limit orders).` \n") + force_enter_text += ( + "*/forceshort []:* `Instantly shorts the given pair. " + "Optionally takes a rate at which to sell " + "(only applies to limit orders).` \n" + ) message = ( "_Bot Control_\n" "------------\n" @@ -1646,7 +1798,6 @@ class Telegram(RPCHandler): "*/cancel_open_order :* `Cancels open orders for trade. " "Only valid when the trade has open orders.`\n" "*/coo |all:* `Alias to /cancel_open_order`\n" - "*/whitelist [sorted] [baseonly]:* `Show current whitelist. Optionally in " "order and/or only displaying the base currency of each pairing.`\n" "*/blacklist [pair]:* `Show current blacklist, or adds one or more pairs " @@ -1655,7 +1806,6 @@ class Telegram(RPCHandler): "`Delete pair / pattern from blacklist. Will reset on reload_conf.` \n" "*/reload_config:* `Reload configuration file` \n" "*/unlock :* `Unlock this Pair (or this lock id if it's numeric)`\n" - "_Current state_\n" "------------\n" "*/show_config:* `Show running configuration` \n" @@ -1671,7 +1821,6 @@ class Telegram(RPCHandler): "`the currently set market direction will be output.` \n" "*/list_custom_data :* `List custom_data for Trade ID & Key combo.`\n" "`If no Key is supplied it will list all key-value pairs found for that Trade ID.`" - "_Statistics_\n" "------------\n" "*/status |[table]:* `Lists all open trades`\n" @@ -1694,7 +1843,7 @@ class Telegram(RPCHandler): "Avg. holding durations for buys and sells.`\n" "*/help:* `This help message`\n" "*/version:* `Show version`\n" - ) + ) await self._send_msg(message, parse_mode=ParseMode.MARKDOWN) @@ -1720,9 +1869,9 @@ class Telegram(RPCHandler): :return: None """ strategy_version = self._rpc._freqtrade.strategy.version() - version_string = f'*Version:* `{__version__}`' + version_string = f"*Version:* `{__version__}`" if strategy_version is not None: - version_string += f'\n*Strategy version: * `{strategy_version}`' + version_string += f"\n*Strategy version: * `{strategy_version}`" await self._send_msg(version_string) @@ -1737,7 +1886,7 @@ class Telegram(RPCHandler): """ val = RPC._rpc_show_config(self._config, self._rpc._freqtrade.state) - if val['trailing_stop']: + if val["trailing_stop"]: sl_info = ( f"*Initial Stoploss:* `{val['stoploss']}`\n" f"*Trailing stop positive:* `{val['trailing_stop_positive']}`\n" @@ -1748,7 +1897,7 @@ class Telegram(RPCHandler): else: sl_info = f"*Stoploss:* `{val['stoploss']}`\n" - if val['position_adjustment_enable']: + if val["position_adjustment_enable"]: pa_info = ( f"*Position adjustment:* On\n" f"*Max enter position adjustment:* `{val['max_entry_position_adjustment']}`\n" @@ -1790,9 +1939,7 @@ class Telegram(RPCHandler): results = self._rpc._rpc_list_custom_data(trade_id, key) messages = [] if len(results) > 0: - messages.append( - 'Found custom-data entr' + ('ies: ' if len(results) > 1 else 'y: ') - ) + messages.append("Found custom-data entr" + ("ies: " if len(results) > 1 else "y: ")) for result in results: lines = [ f"*Key:* `{result['cd_key']}`", @@ -1801,7 +1948,7 @@ class Telegram(RPCHandler): f"*Type:* `{result['cd_type']}`", f"*Value:* `{result['cd_value']}`", f"*Create Date:* `{format_date(result['created_at'])}`", - f"*Update Date:* `{format_date(result['updated_at'])}`" + f"*Update Date:* `{format_date(result['updated_at'])}`", ] # Filter empty lines using list-comprehension messages.append("\n".join([line for line in lines if line])) @@ -1819,12 +1966,20 @@ class Telegram(RPCHandler): except RPCException as e: await self._send_msg(str(e)) - async def _update_msg(self, query: CallbackQuery, msg: str, callback_path: str = "", - reload_able: bool = False, parse_mode: str = ParseMode.MARKDOWN) -> None: + async def _update_msg( + self, + query: CallbackQuery, + msg: str, + callback_path: str = "", + reload_able: bool = False, + parse_mode: str = ParseMode.MARKDOWN, + ) -> None: if reload_able: - reply_markup = InlineKeyboardMarkup([ - [InlineKeyboardButton("Refresh", callback_data=callback_path)], - ]) + reply_markup = InlineKeyboardMarkup( + [ + [InlineKeyboardButton("Refresh", callback_data=callback_path)], + ] + ) else: reply_markup = InlineKeyboardMarkup([[]]) msg += f"\nUpdated: {datetime.now().ctime()}" @@ -1833,24 +1988,26 @@ class Telegram(RPCHandler): try: await query.edit_message_text( - text=msg, - parse_mode=parse_mode, - reply_markup=reply_markup + text=msg, parse_mode=parse_mode, reply_markup=reply_markup ) except BadRequest as e: - if 'not modified' in e.message.lower(): + if "not modified" in e.message.lower(): pass else: - logger.warning('TelegramError: %s', e.message) + logger.warning("TelegramError: %s", e.message) except TelegramError as telegram_err: - logger.warning('TelegramError: %s! Giving up on that message.', telegram_err.message) + logger.warning("TelegramError: %s! Giving up on that message.", telegram_err.message) - async def _send_msg(self, msg: str, parse_mode: str = ParseMode.MARKDOWN, - disable_notification: bool = False, - keyboard: Optional[List[List[InlineKeyboardButton]]] = None, - callback_path: str = "", - reload_able: bool = False, - query: Optional[CallbackQuery] = None) -> None: + async def _send_msg( + self, + msg: str, + parse_mode: str = ParseMode.MARKDOWN, + disable_notification: bool = False, + keyboard: Optional[List[List[InlineKeyboardButton]]] = None, + callback_path: str = "", + reload_able: bool = False, + query: Optional[CallbackQuery] = None, + ) -> None: """ Send given markdown message :param msg: message @@ -1860,12 +2017,18 @@ class Telegram(RPCHandler): """ reply_markup: Union[InlineKeyboardMarkup, ReplyKeyboardMarkup] if query: - await self._update_msg(query=query, msg=msg, parse_mode=parse_mode, - callback_path=callback_path, reload_able=reload_able) + await self._update_msg( + query=query, + msg=msg, + parse_mode=parse_mode, + callback_path=callback_path, + reload_able=reload_able, + ) return - if reload_able and self._config['telegram'].get('reload', True): - reply_markup = InlineKeyboardMarkup([ - [InlineKeyboardButton("Refresh", callback_data=callback_path)]]) + if reload_able and self._config["telegram"].get("reload", True): + reply_markup = InlineKeyboardMarkup( + [[InlineKeyboardButton("Refresh", callback_data=callback_path)]] + ) else: if keyboard is not None: reply_markup = InlineKeyboardMarkup(keyboard) @@ -1874,7 +2037,7 @@ class Telegram(RPCHandler): try: try: await self._app.bot.send_message( - self._config['telegram']['chat_id'], + self._config["telegram"]["chat_id"], text=msg, parse_mode=parse_mode, reply_markup=reply_markup, @@ -1884,21 +2047,17 @@ class Telegram(RPCHandler): # Sometimes the telegram server resets the current connection, # if this is the case we send the message again. logger.warning( - 'Telegram NetworkError: %s! Trying one more time.', - network_err.message + "Telegram NetworkError: %s! Trying one more time.", network_err.message ) await self._app.bot.send_message( - self._config['telegram']['chat_id'], + self._config["telegram"]["chat_id"], text=msg, parse_mode=parse_mode, reply_markup=reply_markup, disable_notification=disable_notification, ) except TelegramError as telegram_err: - logger.warning( - 'TelegramError: %s! Giving up on that message.', - telegram_err.message - ) + logger.warning("TelegramError: %s! Giving up on that message.", telegram_err.message) @authorized_only async def _changemarketdir(self, update: Update, context: CallbackContext) -> None: @@ -1924,14 +2083,20 @@ class Telegram(RPCHandler): if new_market_dir is not None: self._rpc._update_market_direction(new_market_dir) - await self._send_msg("Successfully updated market direction" - f" from *{old_market_dir}* to *{new_market_dir}*.") + await self._send_msg( + "Successfully updated market direction" + f" from *{old_market_dir}* to *{new_market_dir}*." + ) else: - raise RPCException("Invalid market direction provided. \n" - "Valid market directions: *long, short, even, none*") + raise RPCException( + "Invalid market direction provided. \n" + "Valid market directions: *long, short, even, none*" + ) elif context.args is not None and len(context.args) == 0: old_market_dir = self._rpc._get_market_direction() await self._send_msg(f"Currently set market direction: *{old_market_dir}*") else: - raise RPCException("Invalid usage of command /marketdir. \n" - "Usage: */marketdir [short | long | even | none]*") + raise RPCException( + "Invalid usage of command /marketdir. \n" + "Usage: */marketdir [short | long | even | none]*" + ) diff --git a/freqtrade/rpc/webhook.py b/freqtrade/rpc/webhook.py index 9b12b7a21..d67d654f0 100644 --- a/freqtrade/rpc/webhook.py +++ b/freqtrade/rpc/webhook.py @@ -1,6 +1,7 @@ """ This module manages webhook communication """ + import logging import time from typing import Any, Dict, Optional @@ -15,11 +16,11 @@ from freqtrade.rpc.rpc_types import RPCSendMsg logger = logging.getLogger(__name__) -logger.debug('Included module rpc.webhook ...') +logger.debug("Included module rpc.webhook ...") class Webhook(RPCHandler): - """ This class handles all webhook communication """ + """This class handles all webhook communication""" def __init__(self, rpc: RPC, config: Config) -> None: """ @@ -30,11 +31,11 @@ class Webhook(RPCHandler): """ super().__init__(rpc, config) - self._url = self._config['webhook']['url'] - self._format = self._config['webhook'].get('format', 'form') - self._retries = self._config['webhook'].get('retries', 0) - self._retry_delay = self._config['webhook'].get('retry_delay', 0.1) - self._timeout = self._config['webhook'].get('timeout', 10) + self._url = self._config["webhook"]["url"] + self._format = self._config["webhook"].get("format", "form") + self._retries = self._config["webhook"].get("retries", 0) + self._retry_delay = self._config["webhook"].get("retry_delay", 0.1) + self._timeout = self._config["webhook"].get("timeout", 10) def cleanup(self) -> None: """ @@ -44,54 +45,59 @@ class Webhook(RPCHandler): pass def _get_value_dict(self, msg: RPCSendMsg) -> Optional[Dict[str, Any]]: - whconfig = self._config['webhook'] - if msg['type'].value in whconfig: + whconfig = self._config["webhook"] + if msg["type"].value in whconfig: # Explicit types should have priority - valuedict = whconfig.get(msg['type'].value) + valuedict = whconfig.get(msg["type"].value) # Deprecated 2022.10 - only keep generic method. - elif msg['type'] in [RPCMessageType.ENTRY]: - valuedict = whconfig.get('webhookentry') - elif msg['type'] in [RPCMessageType.ENTRY_CANCEL]: - valuedict = whconfig.get('webhookentrycancel') - elif msg['type'] in [RPCMessageType.ENTRY_FILL]: - valuedict = whconfig.get('webhookentryfill') - elif msg['type'] == RPCMessageType.EXIT: - valuedict = whconfig.get('webhookexit') - elif msg['type'] == RPCMessageType.EXIT_FILL: - valuedict = whconfig.get('webhookexitfill') - elif msg['type'] == RPCMessageType.EXIT_CANCEL: - valuedict = whconfig.get('webhookexitcancel') - elif msg['type'] in (RPCMessageType.STATUS, - RPCMessageType.STARTUP, - RPCMessageType.EXCEPTION, - RPCMessageType.WARNING): - valuedict = whconfig.get('webhookstatus') - elif msg['type'] in ( - RPCMessageType.PROTECTION_TRIGGER, - RPCMessageType.PROTECTION_TRIGGER_GLOBAL, - RPCMessageType.WHITELIST, - RPCMessageType.ANALYZED_DF, - RPCMessageType.NEW_CANDLE, - RPCMessageType.STRATEGY_MSG): + elif msg["type"] in [RPCMessageType.ENTRY]: + valuedict = whconfig.get("webhookentry") + elif msg["type"] in [RPCMessageType.ENTRY_CANCEL]: + valuedict = whconfig.get("webhookentrycancel") + elif msg["type"] in [RPCMessageType.ENTRY_FILL]: + valuedict = whconfig.get("webhookentryfill") + elif msg["type"] == RPCMessageType.EXIT: + valuedict = whconfig.get("webhookexit") + elif msg["type"] == RPCMessageType.EXIT_FILL: + valuedict = whconfig.get("webhookexitfill") + elif msg["type"] == RPCMessageType.EXIT_CANCEL: + valuedict = whconfig.get("webhookexitcancel") + elif msg["type"] in ( + RPCMessageType.STATUS, + RPCMessageType.STARTUP, + RPCMessageType.EXCEPTION, + RPCMessageType.WARNING, + ): + valuedict = whconfig.get("webhookstatus") + elif msg["type"] in ( + RPCMessageType.PROTECTION_TRIGGER, + RPCMessageType.PROTECTION_TRIGGER_GLOBAL, + RPCMessageType.WHITELIST, + RPCMessageType.ANALYZED_DF, + RPCMessageType.NEW_CANDLE, + RPCMessageType.STRATEGY_MSG, + ): # Don't fail for non-implemented types return None return valuedict def send_msg(self, msg: RPCSendMsg) -> None: - """ Send a message to telegram channel """ + """Send a message to telegram channel""" try: - valuedict = self._get_value_dict(msg) if not valuedict: - logger.debug("Message type '%s' not configured for webhooks", msg['type']) + logger.debug("Message type '%s' not configured for webhooks", msg["type"]) return payload = {key: value.format(**msg) for (key, value) in valuedict.items()} self._send_msg(payload) except KeyError as exc: - logger.exception("Problem calling Webhook. Please check your webhook configuration. " - "Exception: %s", exc) + logger.exception( + "Problem calling Webhook. Please check your webhook configuration. " + "Exception: %s", + exc, + ) def _send_msg(self, payload: dict) -> None: """do the actual call to the webhook""" @@ -107,16 +113,19 @@ class Webhook(RPCHandler): attempts += 1 try: - if self._format == 'form': + if self._format == "form": response = post(self._url, data=payload, timeout=self._timeout) - elif self._format == 'json': + elif self._format == "json": response = post(self._url, json=payload, timeout=self._timeout) - elif self._format == 'raw': - response = post(self._url, data=payload['data'], - headers={'Content-Type': 'text/plain'}, - timeout=self._timeout) + elif self._format == "raw": + response = post( + self._url, + data=payload["data"], + headers={"Content-Type": "text/plain"}, + timeout=self._timeout, + ) else: - raise NotImplementedError(f'Unknown format: {self._format}') + raise NotImplementedError(f"Unknown format: {self._format}") # Throw a RequestException if the post was not successful response.raise_for_status() diff --git a/freqtrade/strategy/__init__.py b/freqtrade/strategy/__init__.py index 2d23bcd4d..bb21100c4 100644 --- a/freqtrade/strategy/__init__.py +++ b/freqtrade/strategy/__init__.py @@ -1,9 +1,22 @@ # flake8: noqa: F401 -from freqtrade.exchange import (timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, - timeframe_to_prev_date, timeframe_to_seconds) +from freqtrade.exchange import ( + timeframe_to_minutes, + timeframe_to_msecs, + timeframe_to_next_date, + timeframe_to_prev_date, + timeframe_to_seconds, +) from freqtrade.strategy.informative_decorator import informative from freqtrade.strategy.interface import IStrategy -from freqtrade.strategy.parameters import (BooleanParameter, CategoricalParameter, DecimalParameter, - IntParameter, RealParameter) -from freqtrade.strategy.strategy_helper import (merge_informative_pair, stoploss_from_absolute, - stoploss_from_open) +from freqtrade.strategy.parameters import ( + BooleanParameter, + CategoricalParameter, + DecimalParameter, + IntParameter, + RealParameter, +) +from freqtrade.strategy.strategy_helper import ( + merge_informative_pair, + stoploss_from_absolute, + stoploss_from_open, +) diff --git a/freqtrade/strategy/hyper.py b/freqtrade/strategy/hyper.py index d38110a2a..8362b754a 100644 --- a/freqtrade/strategy/hyper.py +++ b/freqtrade/strategy/hyper.py @@ -2,6 +2,7 @@ IHyperStrategy interface, hyperoptable Parameter class. This module defines a base class for auto-hyperoptable strategies. """ + import logging from pathlib import Path from typing import Any, Dict, Iterator, List, Optional, Tuple, Type, Union @@ -32,20 +33,22 @@ class HyperStrategyMixin: self.ft_protection_params: List[BaseParameter] = [] params = self.load_params_from_file() - params = params.get('params', {}) + params = params.get("params", {}) self._ft_params_from_file = params # Init/loading of parameters is done as part of ft_bot_start(). def enumerate_parameters( - self, category: Optional[str] = None) -> Iterator[Tuple[str, BaseParameter]]: + self, category: Optional[str] = None + ) -> Iterator[Tuple[str, BaseParameter]]: """ Find all optimizable parameters and return (name, attr) iterator. :param category: :return: """ - if category not in ('buy', 'sell', 'protection', None): + if category not in ("buy", "sell", "protection", None): raise OperationalException( - 'Category must be one of: "buy", "sell", "protection", None.') + 'Category must be one of: "buy", "sell", "protection", None.' + ) if category is None: params = self.ft_buy_params + self.ft_sell_params + self.ft_protection_params @@ -57,15 +60,13 @@ class HyperStrategyMixin: @classmethod def detect_all_parameters(cls) -> Dict: - """ Detect all parameters and return them as a list""" + """Detect all parameters and return them as a list""" params: Dict[str, Any] = { - 'buy': list(detect_parameters(cls, 'buy')), - 'sell': list(detect_parameters(cls, 'sell')), - 'protection': list(detect_parameters(cls, 'protection')), + "buy": list(detect_parameters(cls, "buy")), + "sell": list(detect_parameters(cls, "sell")), + "protection": list(detect_parameters(cls, "protection")), } - params.update({ - 'count': len(params['buy'] + params['sell'] + params['protection']) - }) + params.update({"count": len(params["buy"] + params["sell"] + params["protection"])}) return params @@ -77,23 +78,28 @@ class HyperStrategyMixin: if self._ft_params_from_file: # Set parameters from Hyperopt results file params = self._ft_params_from_file - self.minimal_roi = params.get('roi', getattr(self, 'minimal_roi', {})) + self.minimal_roi = params.get("roi", getattr(self, "minimal_roi", {})) - self.stoploss = params.get('stoploss', {}).get( - 'stoploss', getattr(self, 'stoploss', -0.1)) - self.max_open_trades = params.get('max_open_trades', {}).get( - 'max_open_trades', getattr(self, 'max_open_trades', -1)) - trailing = params.get('trailing', {}) + self.stoploss = params.get("stoploss", {}).get( + "stoploss", getattr(self, "stoploss", -0.1) + ) + self.max_open_trades = params.get("max_open_trades", {}).get( + "max_open_trades", getattr(self, "max_open_trades", -1) + ) + trailing = params.get("trailing", {}) self.trailing_stop = trailing.get( - 'trailing_stop', getattr(self, 'trailing_stop', False)) + "trailing_stop", getattr(self, "trailing_stop", False) + ) self.trailing_stop_positive = trailing.get( - 'trailing_stop_positive', getattr(self, 'trailing_stop_positive', None)) + "trailing_stop_positive", getattr(self, "trailing_stop_positive", None) + ) self.trailing_stop_positive_offset = trailing.get( - 'trailing_stop_positive_offset', - getattr(self, 'trailing_stop_positive_offset', 0)) + "trailing_stop_positive_offset", getattr(self, "trailing_stop_positive_offset", 0) + ) self.trailing_only_offset_is_reached = trailing.get( - 'trailing_only_offset_is_reached', - getattr(self, 'trailing_only_offset_is_reached', 0.0)) + "trailing_only_offset_is_reached", + getattr(self, "trailing_only_offset_is_reached", 0.0), + ) def ft_load_hyper_params(self, hyperopt: bool = False) -> None: """ @@ -104,29 +110,32 @@ class HyperStrategyMixin: * Parameter defaults """ - buy_params = deep_merge_dicts(self._ft_params_from_file.get('buy', {}), - getattr(self, 'buy_params', {})) - sell_params = deep_merge_dicts(self._ft_params_from_file.get('sell', {}), - getattr(self, 'sell_params', {})) - protection_params = deep_merge_dicts(self._ft_params_from_file.get('protection', {}), - getattr(self, 'protection_params', {})) + buy_params = deep_merge_dicts( + self._ft_params_from_file.get("buy", {}), getattr(self, "buy_params", {}) + ) + sell_params = deep_merge_dicts( + self._ft_params_from_file.get("sell", {}), getattr(self, "sell_params", {}) + ) + protection_params = deep_merge_dicts( + self._ft_params_from_file.get("protection", {}), getattr(self, "protection_params", {}) + ) - self._ft_load_params(buy_params, 'buy', hyperopt) - self._ft_load_params(sell_params, 'sell', hyperopt) - self._ft_load_params(protection_params, 'protection', hyperopt) + self._ft_load_params(buy_params, "buy", hyperopt) + self._ft_load_params(sell_params, "sell", hyperopt) + self._ft_load_params(protection_params, "protection", hyperopt) def load_params_from_file(self) -> Dict: - filename_str = getattr(self, '__file__', '') + filename_str = getattr(self, "__file__", "") if not filename_str: return {} - filename = Path(filename_str).with_suffix('.json') + filename = Path(filename_str).with_suffix(".json") if filename.is_file(): logger.info(f"Loading parameters from file {filename}") try: params = HyperoptTools.load_params(filename) - if params.get('strategy_name') != self.__class__.__name__: - raise OperationalException('Invalid parameter file provided.') + if params.get("strategy_name") != self.__class__.__name__: + raise OperationalException("Invalid parameter file provided.") return params except ValueError: logger.warning("Invalid parameter file format.") @@ -155,21 +164,23 @@ class HyperStrategyMixin: if params and attr_name in params: if attr.load: attr.value = params[attr_name] - logger.info(f'Strategy Parameter: {attr_name} = {attr.value}') + logger.info(f"Strategy Parameter: {attr_name} = {attr.value}") else: - logger.warning(f'Parameter "{attr_name}" exists, but is disabled. ' - f'Default value "{attr.value}" used.') + logger.warning( + f'Parameter "{attr_name}" exists, but is disabled. ' + f'Default value "{attr.value}" used.' + ) else: - logger.info(f'Strategy Parameter(default): {attr_name} = {attr.value}') + logger.info(f"Strategy Parameter(default): {attr_name} = {attr.value}") def get_no_optimize_params(self) -> Dict[str, Dict]: """ Returns list of Parameters that are not part of the current optimize job """ params: Dict[str, Dict] = { - 'buy': {}, - 'sell': {}, - 'protection': {}, + "buy": {}, + "sell": {}, + "protection": {}, } for name, p in self.enumerate_parameters(): if p.category and (not p.optimize or not p.in_space): @@ -178,23 +189,27 @@ class HyperStrategyMixin: def detect_parameters( - obj: Union[HyperStrategyMixin, Type[HyperStrategyMixin]], - category: str - ) -> Iterator[Tuple[str, BaseParameter]]: + obj: Union[HyperStrategyMixin, Type[HyperStrategyMixin]], category: str +) -> Iterator[Tuple[str, BaseParameter]]: """ Detect all parameters for 'category' for "obj" :param obj: Strategy object or class :param category: category - usually `'buy', 'sell', 'protection',... """ for attr_name in dir(obj): - if not attr_name.startswith('__'): # Ignore internals, not strictly necessary. + if not attr_name.startswith("__"): # Ignore internals, not strictly necessary. attr = getattr(obj, attr_name) if issubclass(attr.__class__, BaseParameter): - if (attr_name.startswith(category + '_') - and attr.category is not None and attr.category != category): + if ( + attr_name.startswith(category + "_") + and attr.category is not None + and attr.category != category + ): raise OperationalException( - f'Inconclusive parameter name {attr_name}, category: {attr.category}.') + f"Inconclusive parameter name {attr_name}, category: {attr.category}." + ) - if (category == attr.category or - (attr_name.startswith(category + '_') and attr.category is None)): + if category == attr.category or ( + attr_name.startswith(category + "_") and attr.category is None + ): yield attr_name, attr diff --git a/freqtrade/strategy/informative_decorator.py b/freqtrade/strategy/informative_decorator.py index 6e44a7e20..12f4281d2 100644 --- a/freqtrade/strategy/informative_decorator.py +++ b/freqtrade/strategy/informative_decorator.py @@ -20,11 +20,14 @@ class InformativeData: candle_type: Optional[CandleType] -def informative(timeframe: str, asset: str = '', - fmt: Optional[Union[str, Callable[[Any], str]]] = None, - *, - candle_type: Optional[Union[CandleType, str]] = None, - ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]: +def informative( + timeframe: str, + asset: str = "", + fmt: Optional[Union[str, Callable[[Any], str]]] = None, + *, + candle_type: Optional[Union[CandleType, str]] = None, + ffill: bool = True, +) -> Callable[[PopulateIndicators], PopulateIndicators]: """ A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to define informative indicators. @@ -62,38 +65,43 @@ def informative(timeframe: str, asset: str = '', _candle_type = CandleType.from_string(candle_type) if candle_type else None def decorator(fn: PopulateIndicators): - informative_pairs = getattr(fn, '_ft_informative', []) + informative_pairs = getattr(fn, "_ft_informative", []) informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill, _candle_type)) - setattr(fn, '_ft_informative', informative_pairs) # noqa: B010 + setattr(fn, "_ft_informative", informative_pairs) # noqa: B010 return fn + return decorator def __get_pair_formats(market: Optional[Dict[str, Any]]) -> Dict[str, str]: if not market: return {} - base = market['base'] - quote = market['quote'] + base = market["base"] + quote = market["quote"] return { - 'base': base.lower(), - 'BASE': base.upper(), - 'quote': quote.lower(), - 'QUOTE': quote.upper(), + "base": base.lower(), + "BASE": base.upper(), + "quote": quote.lower(), + "QUOTE": quote.upper(), } def _format_pair_name(config, pair: str, market: Optional[Dict[str, Any]] = None) -> str: return pair.format( - stake_currency=config['stake_currency'], - stake=config['stake_currency'], + stake_currency=config["stake_currency"], + stake=config["stake_currency"], **__get_pair_formats(market), ).upper() -def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict, - inf_data: InformativeData, - populate_indicators: PopulateIndicators): - asset = inf_data.asset or '' +def _create_and_merge_informative_pair( + strategy, + dataframe: DataFrame, + metadata: dict, + inf_data: InformativeData, + populate_indicators: PopulateIndicators, +): + asset = inf_data.asset or "" timeframe = inf_data.timeframe fmt = inf_data.fmt candle_type = inf_data.candle_type @@ -102,15 +110,15 @@ def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: if asset: # Insert stake currency if needed. - market1 = strategy.dp.market(metadata['pair']) + market1 = strategy.dp.market(metadata["pair"]) asset = _format_pair_name(config, asset, market1) else: # Not specifying an asset will define informative dataframe for current pair. - asset = metadata['pair'] + asset = metadata["pair"] market = strategy.dp.market(asset) if market is None: - raise OperationalException(f'Market {asset} is not available.') + raise OperationalException(f"Market {asset} is not available.") # Default format. This optimizes for the common case: informative pairs using same stake # currency. When quote currency matches stake currency, column name will omit base currency. @@ -118,33 +126,40 @@ def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: # where it is desired to keep quote currency in column name at all times user should specify # fmt='{base}_{quote}_{column}_{timeframe}' format or similar. if not fmt: - fmt = '{column}_{timeframe}' # Informatives of current pair + fmt = "{column}_{timeframe}" # Informatives of current pair if inf_data.asset: - fmt = '{base}_{quote}_' + fmt # Informatives of other pairs + fmt = "{base}_{quote}_" + fmt # Informatives of other pairs - inf_metadata = {'pair': asset, 'timeframe': timeframe} + inf_metadata = {"pair": asset, "timeframe": timeframe} inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe, candle_type) inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata) formatter: Any = None if callable(fmt): - formatter = fmt # A custom user-specified formatter function. + formatter = fmt # A custom user-specified formatter function. else: - formatter = fmt.format # A default string formatter. + formatter = fmt.format # A default string formatter. fmt_args = { **__get_pair_formats(market), - 'asset': asset, - 'timeframe': timeframe, + "asset": asset, + "timeframe": timeframe, } - inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args), - inplace=True) + inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args), inplace=True) - date_column = formatter(column='date', **fmt_args) + date_column = formatter(column="date", **fmt_args) if date_column in dataframe.columns: - raise OperationalException(f'Duplicate column name {date_column} exists in ' - f'dataframe! Ensure column names are unique!') - dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe, - ffill=inf_data.ffill, append_timeframe=False, - date_column=date_column) + raise OperationalException( + f"Duplicate column name {date_column} exists in " + f"dataframe! Ensure column names are unique!" + ) + dataframe = merge_informative_pair( + dataframe, + inf_dataframe, + strategy.timeframe, + timeframe, + ffill=inf_data.ffill, + append_timeframe=False, + date_column=date_column, + ) return dataframe diff --git a/freqtrade/strategy/interface.py b/freqtrade/strategy/interface.py index f8a890d5d..de3d0f349 100644 --- a/freqtrade/strategy/interface.py +++ b/freqtrade/strategy/interface.py @@ -2,6 +2,7 @@ IStrategy interface This module defines the interface to apply for strategies """ + import logging from abc import ABC, abstractmethod from datetime import datetime, timedelta, timezone @@ -11,16 +12,28 @@ from pandas import DataFrame from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH, Config, IntOrInf, ListPairsWithTimeframes from freqtrade.data.dataprovider import DataProvider -from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, MarketDirection, RunMode, - SignalDirection, SignalTagType, SignalType, TradingMode) +from freqtrade.enums import ( + CandleType, + ExitCheckTuple, + ExitType, + MarketDirection, + RunMode, + SignalDirection, + SignalTagType, + SignalType, + TradingMode, +) from freqtrade.exceptions import OperationalException, StrategyError from freqtrade.exchange import timeframe_to_minutes, timeframe_to_next_date, timeframe_to_seconds from freqtrade.misc import remove_entry_exit_signals from freqtrade.persistence import Order, PairLocks, Trade from freqtrade.strategy.hyper import HyperStrategyMixin -from freqtrade.strategy.informative_decorator import (InformativeData, PopulateIndicators, - _create_and_merge_informative_pair, - _format_pair_name) +from freqtrade.strategy.informative_decorator import ( + InformativeData, + PopulateIndicators, + _create_and_merge_informative_pair, + _format_pair_name, +) from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper from freqtrade.util import dt_now from freqtrade.wallets import Wallets @@ -39,6 +52,7 @@ class IStrategy(ABC, HyperStrategyMixin): stoploss -> float: optimal stoploss designed for the strategy timeframe -> str: value of the timeframe to use with the strategy """ + # Strategy interface version # Default to version 2 # Version 1 is the initial interface without metadata dict - deprecated and no longer supported. @@ -54,7 +68,7 @@ class IStrategy(ABC, HyperStrategyMixin): stoploss: float # max open trades for the strategy - max_open_trades: IntOrInf + max_open_trades: IntOrInf # trailing stoploss trailing_stop: bool = False @@ -71,17 +85,17 @@ class IStrategy(ABC, HyperStrategyMixin): # Optional order types order_types: Dict = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': False, - 'stoploss_on_exchange_interval': 60, + "entry": "limit", + "exit": "limit", + "stoploss": "limit", + "stoploss_on_exchange": False, + "stoploss_on_exchange_interval": 60, } # Optional time in force order_time_in_force: Dict = { - 'entry': 'GTC', - 'exit': 'GTC', + "entry": "GTC", + "exit": "GTC", } # run "populate_indicators" only for new candle @@ -116,7 +130,7 @@ class IStrategy(ABC, HyperStrategyMixin): # Filled from configuration stake_currency: str # container variable for strategy source code - __source__: str = '' + __source__: str = "" # Definition of plot_config. See plotting documentation for more details. plot_config: Dict = {} @@ -136,7 +150,7 @@ class IStrategy(ABC, HyperStrategyMixin): cls_method = getattr(self.__class__, attr_name) if not callable(cls_method): continue - informative_data_list = getattr(cls_method, '_ft_informative', None) + informative_data_list = getattr(cls_method, "_ft_informative", None) if not isinstance(informative_data_list, list): # Type check is required because mocker would return a mock object that evaluates to # True, confusing this code. @@ -144,22 +158,24 @@ class IStrategy(ABC, HyperStrategyMixin): strategy_timeframe_minutes = timeframe_to_minutes(self.timeframe) for informative_data in informative_data_list: if timeframe_to_minutes(informative_data.timeframe) < strategy_timeframe_minutes: - raise OperationalException('Informative timeframe must be equal or higher than ' - 'strategy timeframe!') + raise OperationalException( + "Informative timeframe must be equal or higher than strategy timeframe!" + ) if not informative_data.candle_type: - informative_data.candle_type = config['candle_type_def'] + informative_data.candle_type = config["candle_type_def"] self._ft_informative.append((informative_data, cls_method)) def load_freqAI_model(self) -> None: - if self.config.get('freqai', {}).get('enabled', False): + if self.config.get("freqai", {}).get("enabled", False): # Import here to avoid importing this if freqAI is disabled from freqtrade.freqai.utils import download_all_data_for_training from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver + self.freqai = FreqaiModelResolver.load_freqaimodel(self.config) self.freqai_info = self.config["freqai"] # download the desired data in dry/live - if self.config.get('runmode') in (RunMode.DRY_RUN, RunMode.LIVE): + if self.config.get("runmode") in (RunMode.DRY_RUN, RunMode.LIVE): logger.info( "Downloading all training data for all pairs in whitelist and " "corr_pairlist, this may take a while if the data is not " @@ -171,8 +187,9 @@ class IStrategy(ABC, HyperStrategyMixin): class DummyClass: def start(self, *args, **kwargs): raise OperationalException( - 'freqAI is not enabled. ' - 'Please enable it in your config to use this strategy.') + "freqAI is not enabled. " + "Please enable it in your config to use this strategy." + ) def shutdown(self, *args, **kwargs): pass @@ -188,7 +205,7 @@ class IStrategy(ABC, HyperStrategyMixin): strategy_safe_wrapper(self.bot_start)() - self.ft_load_hyper_params(self.config.get('runmode') == RunMode.HYPEROPT) + self.ft_load_hyper_params(self.config.get("runmode") == RunMode.HYPEROPT) def ft_bot_cleanup(self) -> None: """ @@ -260,15 +277,17 @@ class IStrategy(ABC, HyperStrategyMixin): """ pass - def check_buy_timeout(self, pair: str, trade: Trade, order: Order, - current_time: datetime, **kwargs) -> bool: + def check_buy_timeout( + self, pair: str, trade: Trade, order: Order, current_time: datetime, **kwargs + ) -> bool: """ DEPRECATED: Please use `check_entry_timeout` instead. """ return False - def check_entry_timeout(self, pair: str, trade: Trade, order: Order, - current_time: datetime, **kwargs) -> bool: + def check_entry_timeout( + self, pair: str, trade: Trade, order: Order, current_time: datetime, **kwargs + ) -> bool: """ Check entry timeout function callback. This method can be used to override the entry-timeout. @@ -286,17 +305,20 @@ class IStrategy(ABC, HyperStrategyMixin): :return bool: When True is returned, then the entry order is cancelled. """ return self.check_buy_timeout( - pair=pair, trade=trade, order=order, current_time=current_time) + pair=pair, trade=trade, order=order, current_time=current_time + ) - def check_sell_timeout(self, pair: str, trade: Trade, order: Order, - current_time: datetime, **kwargs) -> bool: + def check_sell_timeout( + self, pair: str, trade: Trade, order: Order, current_time: datetime, **kwargs + ) -> bool: """ DEPRECATED: Please use `check_exit_timeout` instead. """ return False - def check_exit_timeout(self, pair: str, trade: Trade, order: Order, - current_time: datetime, **kwargs) -> bool: + def check_exit_timeout( + self, pair: str, trade: Trade, order: Order, current_time: datetime, **kwargs + ) -> bool: """ Check exit timeout function callback. This method can be used to override the exit-timeout. @@ -314,11 +336,21 @@ class IStrategy(ABC, HyperStrategyMixin): :return bool: When True is returned, then the exit-order is cancelled. """ return self.check_sell_timeout( - pair=pair, trade=trade, order=order, current_time=current_time) + pair=pair, trade=trade, order=order, current_time=current_time + ) - def confirm_trade_entry(self, pair: str, order_type: str, amount: float, rate: float, - time_in_force: str, current_time: datetime, entry_tag: Optional[str], - side: str, **kwargs) -> bool: + def confirm_trade_entry( + self, + pair: str, + order_type: str, + amount: float, + rate: float, + time_in_force: str, + current_time: datetime, + entry_tag: Optional[str], + side: str, + **kwargs, + ) -> bool: """ Called right before placing a entry order. Timing for this function is critical, so avoid doing heavy computations or @@ -343,9 +375,18 @@ class IStrategy(ABC, HyperStrategyMixin): """ return True - def confirm_trade_exit(self, pair: str, trade: Trade, order_type: str, amount: float, - rate: float, time_in_force: str, exit_reason: str, - current_time: datetime, **kwargs) -> bool: + def confirm_trade_exit( + self, + pair: str, + trade: Trade, + order_type: str, + amount: float, + rate: float, + time_in_force: str, + exit_reason: str, + current_time: datetime, + **kwargs, + ) -> bool: """ Called right before placing a regular exit order. Timing for this function is critical, so avoid doing heavy computations or @@ -372,8 +413,9 @@ class IStrategy(ABC, HyperStrategyMixin): """ return True - def order_filled(self, pair: str, trade: Trade, order: Order, - current_time: datetime, **kwargs) -> None: + def order_filled( + self, pair: str, trade: Trade, order: Order, current_time: datetime, **kwargs + ) -> None: """ Called right after an order fills. Will be called for all order types (entry, exit, stoploss, position adjustment). @@ -385,8 +427,16 @@ class IStrategy(ABC, HyperStrategyMixin): """ pass - def custom_stoploss(self, pair: str, trade: Trade, current_time: datetime, current_rate: float, - current_profit: float, after_fill: bool, **kwargs) -> Optional[float]: + def custom_stoploss( + self, + pair: str, + trade: Trade, + current_time: datetime, + current_rate: float, + current_profit: float, + after_fill: bool, + **kwargs, + ) -> Optional[float]: """ Custom stoploss logic, returning the new distance relative to current_rate (as ratio). e.g. returning -0.05 would create a stoploss 5% below current_rate. @@ -408,9 +458,16 @@ class IStrategy(ABC, HyperStrategyMixin): """ return self.stoploss - def custom_entry_price(self, pair: str, trade: Optional[Trade], - current_time: datetime, proposed_rate: float, - entry_tag: Optional[str], side: str, **kwargs) -> float: + def custom_entry_price( + self, + pair: str, + trade: Optional[Trade], + current_time: datetime, + proposed_rate: float, + entry_tag: Optional[str], + side: str, + **kwargs, + ) -> float: """ Custom entry price logic, returning the new entry price. @@ -429,9 +486,16 @@ class IStrategy(ABC, HyperStrategyMixin): """ return proposed_rate - def custom_exit_price(self, pair: str, trade: Trade, - current_time: datetime, proposed_rate: float, - current_profit: float, exit_tag: Optional[str], **kwargs) -> float: + def custom_exit_price( + self, + pair: str, + trade: Trade, + current_time: datetime, + proposed_rate: float, + current_profit: float, + exit_tag: Optional[str], + **kwargs, + ) -> float: """ Custom exit price logic, returning the new exit price. @@ -450,8 +514,15 @@ class IStrategy(ABC, HyperStrategyMixin): """ return proposed_rate - def custom_sell(self, pair: str, trade: Trade, current_time: datetime, current_rate: float, - current_profit: float, **kwargs) -> Optional[Union[str, bool]]: + def custom_sell( + self, + pair: str, + trade: Trade, + current_time: datetime, + current_rate: float, + current_profit: float, + **kwargs, + ) -> Optional[Union[str, bool]]: """ DEPRECATED - please use custom_exit instead. Custom exit signal logic indicating that specified position should be sold. Returning a @@ -475,8 +546,15 @@ class IStrategy(ABC, HyperStrategyMixin): """ return None - def custom_exit(self, pair: str, trade: Trade, current_time: datetime, current_rate: float, - current_profit: float, **kwargs) -> Optional[Union[str, bool]]: + def custom_exit( + self, + pair: str, + trade: Trade, + current_time: datetime, + current_rate: float, + current_profit: float, + **kwargs, + ) -> Optional[Union[str, bool]]: """ Custom exit signal logic indicating that specified position should be sold. Returning a string or True from this method is equal to setting exit signal on a candle at specified @@ -499,10 +577,19 @@ class IStrategy(ABC, HyperStrategyMixin): """ return self.custom_sell(pair, trade, current_time, current_rate, current_profit, **kwargs) - def custom_stake_amount(self, pair: str, current_time: datetime, current_rate: float, - proposed_stake: float, min_stake: Optional[float], max_stake: float, - leverage: float, entry_tag: Optional[str], side: str, - **kwargs) -> float: + def custom_stake_amount( + self, + pair: str, + current_time: datetime, + current_rate: float, + proposed_stake: float, + min_stake: Optional[float], + max_stake: float, + leverage: float, + entry_tag: Optional[str], + side: str, + **kwargs, + ) -> float: """ Customize stake size for each new trade. @@ -519,13 +606,20 @@ class IStrategy(ABC, HyperStrategyMixin): """ return proposed_stake - def adjust_trade_position(self, trade: Trade, current_time: datetime, - current_rate: float, current_profit: float, - min_stake: Optional[float], max_stake: float, - current_entry_rate: float, current_exit_rate: float, - current_entry_profit: float, current_exit_profit: float, - **kwargs - ) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]: + def adjust_trade_position( + self, + trade: Trade, + current_time: datetime, + current_rate: float, + current_profit: float, + min_stake: Optional[float], + max_stake: float, + current_entry_rate: float, + current_exit_rate: float, + current_entry_profit: float, + current_exit_profit: float, + **kwargs, + ) -> Union[Optional[float], Tuple[Optional[float], Optional[str]]]: """ Custom trade adjustment logic, returning the stake amount that a trade should be increased or decreased. @@ -555,9 +649,18 @@ class IStrategy(ABC, HyperStrategyMixin): """ return None - def adjust_entry_price(self, trade: Trade, order: Optional[Order], pair: str, - current_time: datetime, proposed_rate: float, current_order_rate: float, - entry_tag: Optional[str], side: str, **kwargs) -> float: + def adjust_entry_price( + self, + trade: Trade, + order: Optional[Order], + pair: str, + current_time: datetime, + proposed_rate: float, + current_order_rate: float, + entry_tag: Optional[str], + side: str, + **kwargs, + ) -> float: """ Entry price re-adjustment logic, returning the user desired limit price. This only executes when a order was already placed, still open (unfilled fully or partially) @@ -583,9 +686,17 @@ class IStrategy(ABC, HyperStrategyMixin): """ return current_order_rate - def leverage(self, pair: str, current_time: datetime, current_rate: float, - proposed_leverage: float, max_leverage: float, entry_tag: Optional[str], - side: str, **kwargs) -> float: + def leverage( + self, + pair: str, + current_time: datetime, + current_rate: float, + proposed_leverage: float, + max_leverage: float, + entry_tag: Optional[str], + side: str, + **kwargs, + ) -> float: """ Customize leverage for each new trade. This method is only called in futures mode. @@ -619,9 +730,14 @@ class IStrategy(ABC, HyperStrategyMixin): """ return None - def populate_any_indicators(self, pair: str, df: DataFrame, tf: str, - informative: Optional[DataFrame] = None, - set_generalized_indicators: bool = False) -> DataFrame: + def populate_any_indicators( + self, + pair: str, + df: DataFrame, + tf: str, + informative: Optional[DataFrame] = None, + set_generalized_indicators: bool = False, + ) -> DataFrame: """ DEPRECATED - USE FEATURE ENGINEERING FUNCTIONS INSTEAD Function designed to automatically generate, name and merge features @@ -636,8 +752,9 @@ class IStrategy(ABC, HyperStrategyMixin): """ return df - def feature_engineering_expand_all(self, dataframe: DataFrame, period: int, - metadata: Dict, **kwargs) -> DataFrame: + def feature_engineering_expand_all( + self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -664,7 +781,8 @@ class IStrategy(ABC, HyperStrategyMixin): return dataframe def feature_engineering_expand_basic( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -694,7 +812,8 @@ class IStrategy(ABC, HyperStrategyMixin): return dataframe def feature_engineering_standard( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This optional function will be called once with the dataframe of the base timeframe. @@ -734,38 +853,50 @@ class IStrategy(ABC, HyperStrategyMixin): """ return dataframe -### -# END - Intended to be overridden by strategy -### + ### + # END - Intended to be overridden by strategy + ### _ft_stop_uses_after_fill = False def _adjust_trade_position_internal( - self, trade: Trade, current_time: datetime, - current_rate: float, current_profit: float, - min_stake: Optional[float], max_stake: float, - current_entry_rate: float, current_exit_rate: float, - current_entry_profit: float, current_exit_profit: float, - **kwargs + self, + trade: Trade, + current_time: datetime, + current_rate: float, + current_profit: float, + min_stake: Optional[float], + max_stake: float, + current_entry_rate: float, + current_exit_rate: float, + current_entry_profit: float, + current_exit_profit: float, + **kwargs, ) -> Tuple[Optional[float], str]: """ wrapper around adjust_trade_position to handle the return value """ - resp = strategy_safe_wrapper(self.adjust_trade_position, - default_retval=(None, ''), supress_error=True)( - trade=trade, current_time=current_time, - current_rate=current_rate, current_profit=current_profit, - min_stake=min_stake, max_stake=max_stake, - current_entry_rate=current_entry_rate, current_exit_rate=current_exit_rate, - current_entry_profit=current_entry_profit, current_exit_profit=current_exit_profit, - **kwargs + resp = strategy_safe_wrapper( + self.adjust_trade_position, default_retval=(None, ""), supress_error=True + )( + trade=trade, + current_time=current_time, + current_rate=current_rate, + current_profit=current_profit, + min_stake=min_stake, + max_stake=max_stake, + current_entry_rate=current_entry_rate, + current_exit_rate=current_exit_rate, + current_entry_profit=current_entry_profit, + current_exit_profit=current_exit_profit, + **kwargs, ) - order_tag = '' + order_tag = "" if isinstance(resp, tuple): if len(resp) >= 1: stake_amount = resp[0] if len(resp) > 1: - order_tag = resp[1] or '' + order_tag = resp[1] or "" else: stake_amount = resp return stake_amount, order_tag @@ -774,9 +905,9 @@ class IStrategy(ABC, HyperStrategyMixin): """ Create informative-pairs needed for FreqAI """ - if self.config.get('freqai', {}).get('enabled', False): + if self.config.get("freqai", {}).get("enabled", False): whitelist_pairs = self.dp.current_whitelist() - candle_type = self.config.get('candle_type_def', CandleType.SPOT) + candle_type = self.config.get("candle_type_def", CandleType.SPOT) corr_pairs = self.config["freqai"]["feature_parameters"]["include_corr_pairlist"] informative_pairs = [] for tf in self.config["freqai"]["feature_parameters"]["include_timeframes"]: @@ -793,17 +924,27 @@ class IStrategy(ABC, HyperStrategyMixin): informative_pairs = self.informative_pairs() # Compatibility code for 2 tuple informative pairs informative_pairs = [ - (p[0], p[1], CandleType.from_string(p[2]) if len( - p) > 2 and p[2] != '' else self.config.get('candle_type_def', CandleType.SPOT)) - for p in informative_pairs] + ( + p[0], + p[1], + ( + CandleType.from_string(p[2]) + if len(p) > 2 and p[2] != "" + else self.config.get("candle_type_def", CandleType.SPOT) + ), + ) + for p in informative_pairs + ] for inf_data, _ in self._ft_informative: # Get default candle type if not provided explicitly. - candle_type = (inf_data.candle_type if inf_data.candle_type - else self.config.get('candle_type_def', CandleType.SPOT)) + candle_type = ( + inf_data.candle_type + if inf_data.candle_type + else self.config.get("candle_type_def", CandleType.SPOT) + ) if inf_data.asset: if any(s in inf_data.asset for s in ("{BASE}", "{base}")): for pair in self.dp.current_whitelist(): - pair_tf = ( _format_pair_name(self.config, inf_data.asset, self.dp.market(pair)), inf_data.timeframe, @@ -830,8 +971,9 @@ class IStrategy(ABC, HyperStrategyMixin): """ return self.__class__.__name__ - def lock_pair(self, pair: str, until: datetime, - reason: Optional[str] = None, side: str = '*') -> None: + def lock_pair( + self, pair: str, until: datetime, reason: Optional[str] = None, side: str = "*" + ) -> None: """ Locks pair until a given timestamp happens. Locked pairs are not analyzed, and are prevented from opening new trades. @@ -863,8 +1005,9 @@ class IStrategy(ABC, HyperStrategyMixin): """ PairLocks.unlock_reason(reason, datetime.now(timezone.utc)) - def is_pair_locked(self, pair: str, *, candle_date: Optional[datetime] = None, - side: str = '*') -> bool: + def is_pair_locked( + self, pair: str, *, candle_date: Optional[datetime] = None, side: str = "*" + ) -> bool: """ Checks if a pair is currently locked The 2nd, optional parameter ensures that locks are applied until the new candle arrives, @@ -907,19 +1050,18 @@ class IStrategy(ABC, HyperStrategyMixin): :param metadata: Metadata dictionary with additional data (e.g. 'pair') :return: DataFrame of candle (OHLCV) data with indicator data and signals added """ - pair = str(metadata.get('pair')) + pair = str(metadata.get("pair")) - new_candle = self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]['date'] + new_candle = self._last_candle_seen_per_pair.get(pair, None) != dataframe.iloc[-1]["date"] # Test if seen this pair and last candle before. # always run if process_only_new_candles is set to false if not self.process_only_new_candles or new_candle: - # Defs that only make change on new candle data. dataframe = self.analyze_ticker(dataframe, metadata) - self._last_candle_seen_per_pair[pair] = dataframe.iloc[-1]['date'] + self._last_candle_seen_per_pair[pair] = dataframe.iloc[-1]["date"] - candle_type = self.config.get('candle_type_def', CandleType.SPOT) + candle_type = self.config.get("candle_type_def", CandleType.SPOT) self.dp._set_cached_df(pair, self.timeframe, dataframe, candle_type=candle_type) self.dp._emit_df((pair, self.timeframe, candle_type), dataframe, new_candle) @@ -939,18 +1081,18 @@ class IStrategy(ABC, HyperStrategyMixin): :param pair: Pair to analyze. """ dataframe = self.dp.ohlcv( - pair, self.timeframe, candle_type=self.config.get('candle_type_def', CandleType.SPOT) + pair, self.timeframe, candle_type=self.config.get("candle_type_def", CandleType.SPOT) ) if not isinstance(dataframe, DataFrame) or dataframe.empty: - logger.warning('Empty candle (OHLCV) data for pair %s', pair) + logger.warning("Empty candle (OHLCV) data for pair %s", pair) return try: df_len, df_close, df_date = self.preserve_df(dataframe) - dataframe = strategy_safe_wrapper( - self._analyze_ticker_internal, message="" - )(dataframe, {'pair': pair}) + dataframe = strategy_safe_wrapper(self._analyze_ticker_internal, message="")( + dataframe, {"pair": pair} + ) self.assert_df(dataframe, df_len, df_close, df_date) except StrategyError as error: @@ -958,7 +1100,7 @@ class IStrategy(ABC, HyperStrategyMixin): return if dataframe.empty: - logger.warning('Empty dataframe for pair %s', pair) + logger.warning("Empty dataframe for pair %s", pair) return def analyze(self, pairs: List[str]) -> None: @@ -971,7 +1113,7 @@ class IStrategy(ABC, HyperStrategyMixin): @staticmethod def preserve_df(dataframe: DataFrame) -> Tuple[int, float, datetime]: - """ keep some data for dataframes """ + """keep some data for dataframes""" return len(dataframe), dataframe["close"].iloc[-1], dataframe["date"].iloc[-1] def assert_df(self, dataframe: DataFrame, df_len: int, df_close: float, df_date: datetime): @@ -982,7 +1124,7 @@ class IStrategy(ABC, HyperStrategyMixin): message = "" if dataframe is None: message = "No dataframe returned (return statement missing?)." - elif 'enter_long' not in dataframe: + elif "enter_long" not in dataframe: message = "enter_long/buy column not set." elif df_len != len(dataframe): message = message_template.format("length") @@ -1012,31 +1154,28 @@ class IStrategy(ABC, HyperStrategyMixin): :return: (None, None) or (Dataframe, latest_date) - corresponding to the last candle """ if not isinstance(dataframe, DataFrame) or dataframe.empty: - logger.warning(f'Empty candle (OHLCV) data for pair {pair}') + logger.warning(f"Empty candle (OHLCV) data for pair {pair}") return None, None - latest_date = dataframe['date'].max() - latest = dataframe.loc[dataframe['date'] == latest_date].iloc[-1] + latest_date = dataframe["date"].max() + latest = dataframe.loc[dataframe["date"] == latest_date].iloc[-1] # Explicitly convert to datetime object to ensure the below comparison does not fail latest_date = latest_date.to_pydatetime() # Check if dataframe is out of date timeframe_minutes = timeframe_to_minutes(timeframe) - offset = self.config.get('exchange', {}).get('outdated_offset', 5) + offset = self.config.get("exchange", {}).get("outdated_offset", 5) if latest_date < (dt_now() - timedelta(minutes=timeframe_minutes * 2 + offset)): logger.warning( - 'Outdated history for pair %s. Last tick is %s minutes old', - pair, int((dt_now() - latest_date).total_seconds() // 60) + "Outdated history for pair %s. Last tick is %s minutes old", + pair, + int((dt_now() - latest_date).total_seconds() // 60), ) return None, None return latest, latest_date def get_exit_signal( - self, - pair: str, - timeframe: str, - dataframe: DataFrame, - is_short: Optional[bool] = None + self, pair: str, timeframe: str, dataframe: DataFrame, is_short: Optional[bool] = None ) -> Tuple[bool, bool, Optional[str]]: """ Calculates current exit signal based based on the dataframe @@ -1062,10 +1201,9 @@ class IStrategy(ABC, HyperStrategyMixin): exit_ = latest.get(SignalType.EXIT_LONG.value, 0) == 1 exit_tag = latest.get(SignalTagType.EXIT_TAG.value, None) # Tags can be None, which does not resolve to False. - exit_tag = exit_tag if isinstance(exit_tag, str) and exit_tag != 'nan' else None + exit_tag = exit_tag if isinstance(exit_tag, str) and exit_tag != "nan" else None - logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) " - f"enter={enter} exit={exit_}") + logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) enter={enter} exit={exit_}") return enter, exit_, exit_tag @@ -1098,13 +1236,16 @@ class IStrategy(ABC, HyperStrategyMixin): if enter_long == 1 and not any([exit_long, enter_short]): enter_signal = SignalDirection.LONG enter_tag = latest.get(SignalTagType.ENTER_TAG.value, None) - if (self.config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT - and self.can_short - and enter_short == 1 and not any([exit_short, enter_long])): + if ( + self.config.get("trading_mode", TradingMode.SPOT) != TradingMode.SPOT + and self.can_short + and enter_short == 1 + and not any([exit_short, enter_long]) + ): enter_signal = SignalDirection.SHORT enter_tag = latest.get(SignalTagType.ENTER_TAG.value, None) - enter_tag = enter_tag if isinstance(enter_tag, str) and enter_tag != 'nan' else None + enter_tag = enter_tag if isinstance(enter_tag, str) and enter_tag != "nan" else None timeframe_seconds = timeframe_to_seconds(timeframe) @@ -1112,20 +1253,18 @@ class IStrategy(ABC, HyperStrategyMixin): latest_date=latest_date, current_time=dt_now(), timeframe_seconds=timeframe_seconds, - enter=bool(enter_signal) + enter=bool(enter_signal), ): return None, enter_tag - logger.debug(f"entry trigger: {latest['date']} (pair={pair}) " - f"enter={enter_long} enter_tag_value={enter_tag}") + logger.debug( + f"entry trigger: {latest['date']} (pair={pair}) " + f"enter={enter_long} enter_tag_value={enter_tag}" + ) return enter_signal, enter_tag def ignore_expired_candle( - self, - latest_date: datetime, - current_time: datetime, - timeframe_seconds: int, - enter: bool + self, latest_date: datetime, current_time: datetime, timeframe_seconds: int, enter: bool ): if self.ignore_buying_expired_candle_after and enter: time_delta = current_time - (latest_date + timedelta(seconds=timeframe_seconds)) @@ -1133,10 +1272,18 @@ class IStrategy(ABC, HyperStrategyMixin): else: return False - def should_exit(self, trade: Trade, rate: float, current_time: datetime, *, - enter: bool, exit_: bool, - low: Optional[float] = None, high: Optional[float] = None, - force_stoploss: float = 0) -> List[ExitCheckTuple]: + def should_exit( + self, + trade: Trade, + rate: float, + current_time: datetime, + *, + enter: bool, + exit_: bool, + low: Optional[float] = None, + high: Optional[float] = None, + force_stoploss: float = 0, + ) -> List[ExitCheckTuple]: """ This function evaluates if one of the conditions required to trigger an exit order has been reached, which can either be a stop-loss, ROI or exit-signal. @@ -1156,45 +1303,57 @@ class IStrategy(ABC, HyperStrategyMixin): trade.adjust_min_max_rates(high or current_rate, low or current_rate) - stoplossflag = self.ft_stoploss_reached(current_rate=current_rate, trade=trade, - current_time=current_time, - current_profit=current_profit, - force_stoploss=force_stoploss, low=low, high=high) + stoplossflag = self.ft_stoploss_reached( + current_rate=current_rate, + trade=trade, + current_time=current_time, + current_profit=current_profit, + force_stoploss=force_stoploss, + low=low, + high=high, + ) # if enter signal and ignore_roi is set, we don't need to evaluate min_roi. - roi_reached = (not (enter and self.ignore_roi_if_entry_signal) - and self.min_roi_reached(trade=trade, current_profit=current_profit_best, - current_time=current_time)) + roi_reached = not (enter and self.ignore_roi_if_entry_signal) and self.min_roi_reached( + trade=trade, current_profit=current_profit_best, current_time=current_time + ) exit_signal = ExitType.NONE - custom_reason = '' + custom_reason = "" if self.use_exit_signal: if exit_ and not enter: exit_signal = ExitType.EXIT_SIGNAL else: reason_cust = strategy_safe_wrapper(self.custom_exit, default_retval=False)( - pair=trade.pair, trade=trade, current_time=current_time, - current_rate=current_rate, current_profit=current_profit) + pair=trade.pair, + trade=trade, + current_time=current_time, + current_rate=current_rate, + current_profit=current_profit, + ) if reason_cust: exit_signal = ExitType.CUSTOM_EXIT if isinstance(reason_cust, str): custom_reason = reason_cust if len(reason_cust) > CUSTOM_TAG_MAX_LENGTH: - logger.warning(f'Custom exit reason returned from ' - f'custom_exit is too long and was trimmed' - f'to {CUSTOM_TAG_MAX_LENGTH} characters.') + logger.warning( + f"Custom exit reason returned from " + f"custom_exit is too long and was trimmed" + f"to {CUSTOM_TAG_MAX_LENGTH} characters." + ) custom_reason = reason_cust[:CUSTOM_TAG_MAX_LENGTH] else: - custom_reason = '' - if ( - exit_signal == ExitType.CUSTOM_EXIT - or (exit_signal == ExitType.EXIT_SIGNAL - and (not self.exit_profit_only or current_profit > self.exit_profit_offset)) + custom_reason = "" + if exit_signal == ExitType.CUSTOM_EXIT or ( + exit_signal == ExitType.EXIT_SIGNAL + and (not self.exit_profit_only or current_profit > self.exit_profit_offset) ): - logger.debug(f"{trade.pair} - Sell signal received. " - f"exit_type=ExitType.{exit_signal.name}" + - (f", custom_reason={custom_reason}" if custom_reason else "")) + logger.debug( + f"{trade.pair} - Sell signal received. " + f"exit_type=ExitType.{exit_signal.name}" + + (f", custom_reason={custom_reason}" if custom_reason else "") + ) exits.append(ExitCheckTuple(exit_type=exit_signal, exit_reason=custom_reason)) # Sequence: @@ -1204,7 +1363,6 @@ class IStrategy(ABC, HyperStrategyMixin): # Trailing stoploss if stoplossflag.exit_type in (ExitType.STOP_LOSS, ExitType.LIQUIDATION): - logger.debug(f"{trade.pair} - Stoploss hit. exit_type={stoplossflag.exit_type}") exits.append(stoplossflag) @@ -1213,16 +1371,22 @@ class IStrategy(ABC, HyperStrategyMixin): exits.append(ExitCheckTuple(exit_type=ExitType.ROI)) if stoplossflag.exit_type == ExitType.TRAILING_STOP_LOSS: - logger.debug(f"{trade.pair} - Trailing stoploss hit.") exits.append(stoplossflag) return exits - def ft_stoploss_adjust(self, current_rate: float, trade: Trade, - current_time: datetime, current_profit: float, - force_stoploss: float, low: Optional[float] = None, - high: Optional[float] = None, after_fill: bool = False) -> None: + def ft_stoploss_adjust( + self, + current_rate: float, + trade: Trade, + current_time: datetime, + current_profit: float, + force_stoploss: float, + low: Optional[float] = None, + high: Optional[float] = None, + after_fill: bool = False, + ) -> None: """ Adjust stop-loss dynamically if configured to do so. :param current_profit: current profit as ratio @@ -1238,27 +1402,32 @@ class IStrategy(ABC, HyperStrategyMixin): # Initiate stoploss with open_rate. Does nothing if stoploss is already set. trade.adjust_stop_loss(trade.open_rate, stop_loss_value, initial=True) - dir_correct = (trade.stop_loss < (low or current_rate) - if not trade.is_short else - trade.stop_loss > (high or current_rate) - ) + dir_correct = ( + trade.stop_loss < (low or current_rate) + if not trade.is_short + else trade.stop_loss > (high or current_rate) + ) # Make sure current_profit is calculated using high for backtesting. - bound = (low if trade.is_short else high) + bound = low if trade.is_short else high bound_profit = current_profit if not bound else trade.calc_profit_ratio(bound) if self.use_custom_stoploss and dir_correct: stop_loss_value_custom = strategy_safe_wrapper( self.custom_stoploss, default_retval=None, supress_error=True - )(pair=trade.pair, trade=trade, - current_time=current_time, - current_rate=(bound or current_rate), - current_profit=bound_profit, - after_fill=after_fill) + )( + pair=trade.pair, + trade=trade, + current_time=current_time, + current_rate=(bound or current_rate), + current_profit=bound_profit, + after_fill=after_fill, + ) # Sanity check - error cases will return None if stop_loss_value_custom: stop_loss_value = stop_loss_value_custom - trade.adjust_stop_loss(bound or current_rate, stop_loss_value, - allow_refresh=after_fill) + trade.adjust_stop_loss( + bound or current_rate, stop_loss_value, allow_refresh=after_fill + ) else: logger.debug("CustomStoploss function did not return valid stoploss") @@ -1272,15 +1441,23 @@ class IStrategy(ABC, HyperStrategyMixin): # Specific handling for trailing_stop_positive if self.trailing_stop_positive is not None and bound_profit > sl_offset: stop_loss_value = self.trailing_stop_positive - logger.debug(f"{trade.pair} - Using positive stoploss: {stop_loss_value} " - f"offset: {sl_offset:.4g} profit: {bound_profit:.2%}") + logger.debug( + f"{trade.pair} - Using positive stoploss: {stop_loss_value} " + f"offset: {sl_offset:.4g} profit: {bound_profit:.2%}" + ) trade.adjust_stop_loss(bound or current_rate, stop_loss_value) - def ft_stoploss_reached(self, current_rate: float, trade: Trade, - current_time: datetime, current_profit: float, - force_stoploss: float, low: Optional[float] = None, - high: Optional[float] = None) -> ExitCheckTuple: + def ft_stoploss_reached( + self, + current_rate: float, + trade: Trade, + current_time: datetime, + current_profit: float, + force_stoploss: float, + low: Optional[float] = None, + high: Optional[float] = None, + ) -> ExitCheckTuple: """ Based on current profit of the trade and configured (trailing) stoploss, decides to exit or not @@ -1288,24 +1465,29 @@ class IStrategy(ABC, HyperStrategyMixin): :param low: Low value of this candle, only set in backtesting :param high: High value of this candle, only set in backtesting """ - self.ft_stoploss_adjust(current_rate, trade, current_time, current_profit, - force_stoploss, low, high) + self.ft_stoploss_adjust( + current_rate, trade, current_time, current_profit, force_stoploss, low, high + ) - sl_higher_long = (trade.stop_loss >= (low or current_rate) and not trade.is_short) - sl_lower_short = (trade.stop_loss <= (high or current_rate) and trade.is_short) - liq_higher_long = (trade.liquidation_price - and trade.liquidation_price >= (low or current_rate) - and not trade.is_short) - liq_lower_short = (trade.liquidation_price - and trade.liquidation_price <= (high or current_rate) - and trade.is_short) + sl_higher_long = trade.stop_loss >= (low or current_rate) and not trade.is_short + sl_lower_short = trade.stop_loss <= (high or current_rate) and trade.is_short + liq_higher_long = ( + trade.liquidation_price + and trade.liquidation_price >= (low or current_rate) + and not trade.is_short + ) + liq_lower_short = ( + trade.liquidation_price + and trade.liquidation_price <= (high or current_rate) + and trade.is_short + ) # evaluate if the stoploss was hit if stoploss is not on exchange # in Dry-Run, this handles stoploss logic as well, as the logic will not be different to # regular stoploss handling. - if ((sl_higher_long or sl_lower_short) and - (not self.order_types.get('stoploss_on_exchange') or self.config['dry_run'])): - + if (sl_higher_long or sl_lower_short) and ( + not self.order_types.get("stoploss_on_exchange") or self.config["dry_run"] + ): exit_type = ExitType.STOP_LOSS # If initial stoploss is not the same as current one then it is trailing. @@ -1316,11 +1498,12 @@ class IStrategy(ABC, HyperStrategyMixin): f"{((high if trade.is_short else low) or current_rate):.6f}, " f"stoploss is {trade.stop_loss:.6f}, " f"initial stoploss was at {trade.initial_stop_loss:.6f}, " - f"trade opened at {trade.open_rate:.6f}") + f"trade opened at {trade.open_rate:.6f}" + ) return ExitCheckTuple(exit_type=exit_type) - if (liq_higher_long or liq_lower_short): + if liq_higher_long or liq_lower_short: logger.debug(f"{trade.pair} - Liquidation price hit. exit_type=ExitType.LIQUIDATION") return ExitCheckTuple(exit_type=ExitType.LIQUIDATION) @@ -1354,29 +1537,30 @@ class IStrategy(ABC, HyperStrategyMixin): else: return current_profit > roi - def ft_check_timed_out(self, trade: Trade, order: Order, - current_time: datetime) -> bool: + def ft_check_timed_out(self, trade: Trade, order: Order, current_time: datetime) -> bool: """ FT Internal method. Check if timeout is active, and if the order is still open and timed out """ - side = 'entry' if order.ft_order_side == trade.entry_side else 'exit' + side = "entry" if order.ft_order_side == trade.entry_side else "exit" - timeout = self.config.get('unfilledtimeout', {}).get(side) + timeout = self.config.get("unfilledtimeout", {}).get(side) if timeout is not None: - timeout_unit = self.config.get('unfilledtimeout', {}).get('unit', 'minutes') + timeout_unit = self.config.get("unfilledtimeout", {}).get("unit", "minutes") timeout_kwargs = {timeout_unit: -timeout} timeout_threshold = current_time + timedelta(**timeout_kwargs) - timedout = (order.status == 'open' and order.order_date_utc < timeout_threshold) + timedout = order.status == "open" and order.order_date_utc < timeout_threshold if timedout: return True - time_method = (self.check_exit_timeout if order.ft_order_side == trade.exit_side - else self.check_entry_timeout) + time_method = ( + self.check_exit_timeout + if order.ft_order_side == trade.exit_side + else self.check_entry_timeout + ) - return strategy_safe_wrapper(time_method, - default_retval=False)( - pair=trade.pair, trade=trade, order=order, - current_time=current_time) + return strategy_safe_wrapper(time_method, default_retval=False)( + pair=trade.pair, trade=trade, order=order, current_time=current_time + ) def advise_all_indicators(self, data: Dict[str, DataFrame]) -> Dict[str, DataFrame]: """ @@ -1388,8 +1572,10 @@ class IStrategy(ABC, HyperStrategyMixin): Has positive effects on memory usage for whatever reason - also when using only one strategy. """ - return {pair: self.advise_indicators(pair_data.copy(), {'pair': pair}).copy() - for pair, pair_data in data.items()} + return { + pair: self.advise_indicators(pair_data.copy(), {"pair": pair}).copy() + for pair, pair_data in data.items() + } def ft_advise_signals(self, dataframe: DataFrame, metadata: dict) -> DataFrame: """ @@ -1418,7 +1604,8 @@ class IStrategy(ABC, HyperStrategyMixin): # call populate_indicators_Nm() which were tagged with @informative decorator. for inf_data, populate_fn in self._ft_informative: dataframe = _create_and_merge_informative_pair( - self, dataframe, metadata, inf_data, populate_fn) + self, dataframe, metadata, inf_data, populate_fn + ) return self.populate_indicators(dataframe, metadata) @@ -1434,10 +1621,10 @@ class IStrategy(ABC, HyperStrategyMixin): logger.debug(f"Populating enter signals for pair {metadata.get('pair')}.") # Initialize column to work around Pandas bug #56503. - dataframe.loc[:, 'enter_tag'] = '' + dataframe.loc[:, "enter_tag"] = "" df = self.populate_entry_trend(dataframe, metadata) - if 'enter_long' not in df.columns: - df = df.rename({'buy': 'enter_long', 'buy_tag': 'enter_tag'}, axis='columns') + if "enter_long" not in df.columns: + df = df.rename({"buy": "enter_long", "buy_tag": "enter_tag"}, axis="columns") return df @@ -1451,9 +1638,9 @@ class IStrategy(ABC, HyperStrategyMixin): :return: DataFrame with exit column """ # Initialize column to work around Pandas bug #56503. - dataframe.loc[:, 'exit_tag'] = '' + dataframe.loc[:, "exit_tag"] = "" logger.debug(f"Populating exit signals for pair {metadata.get('pair')}.") df = self.populate_exit_trend(dataframe, metadata) - if 'exit_long' not in df.columns: - df = df.rename({'sell': 'exit_long'}, axis='columns') + if "exit_long" not in df.columns: + df = df.rename({"sell": "exit_long"}, axis="columns") return df diff --git a/freqtrade/strategy/parameters.py b/freqtrade/strategy/parameters.py index 796fb9514..79091e2d6 100644 --- a/freqtrade/strategy/parameters.py +++ b/freqtrade/strategy/parameters.py @@ -2,6 +2,7 @@ IHyperStrategy interface, hyperoptable Parameter class. This module defines a base class for auto-hyperoptable strategies. """ + import logging from abc import ABC, abstractmethod from contextlib import suppress @@ -12,7 +13,8 @@ from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer with suppress(ImportError): - from skopt.space import Integer, Real, Categorical + from skopt.space import Categorical, Integer, Real + from freqtrade.optimize.space import SKDecimal from freqtrade.exceptions import OperationalException @@ -25,14 +27,22 @@ class BaseParameter(ABC): """ Defines a parameter that can be optimized by hyperopt. """ + category: Optional[str] default: Any value: Any in_space: bool = False name: str - def __init__(self, *, default: Any, space: Optional[str] = None, - optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + *, + default: Any, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable parameter. :param space: A parameter category. Can be 'buy' or 'sell'. This parameter is optional if @@ -42,9 +52,10 @@ class BaseParameter(ABC): :param load: Load parameter value from {space}_params. :param kwargs: Extra parameters to skopt.space.(Integer|Real|Categorical). """ - if 'name' in kwargs: + if "name" in kwargs: raise OperationalException( - 'Name is determined by parameter field name and can not be specified manually.') + "Name is determined by parameter field name and can not be specified manually." + ) self.category = space self._space_params = kwargs self.value = default @@ -52,10 +63,10 @@ class BaseParameter(ABC): self.load = load def __repr__(self): - return f'{self.__class__.__name__}({self.value})' + return f"{self.__class__.__name__}({self.value})" @abstractmethod - def get_space(self, name: str) -> Union['Integer', 'Real', 'SKDecimal', 'Categorical']: + def get_space(self, name: str) -> Union["Integer", "Real", "SKDecimal", "Categorical"]: """ Get-space - will be used by Hyperopt to get the hyperopt Space """ @@ -69,14 +80,23 @@ class BaseParameter(ABC): class NumericParameter(BaseParameter): - """ Internal parameter used for Numeric purposes """ + """Internal parameter used for Numeric purposes""" + float_or_int = Union[int, float] default: float_or_int value: float_or_int - def __init__(self, low: Union[float_or_int, Sequence[float_or_int]], - high: Optional[float_or_int] = None, *, default: float_or_int, - space: Optional[str] = None, optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + low: Union[float_or_int, Sequence[float_or_int]], + high: Optional[float_or_int] = None, + *, + default: float_or_int, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable numeric parameter. Cannot be instantiated, but provides the validation for other numeric parameters @@ -91,17 +111,16 @@ class NumericParameter(BaseParameter): :param kwargs: Extra parameters to skopt.space.*. """ if high is not None and isinstance(low, Sequence): - raise OperationalException(f'{self.__class__.__name__} space invalid.') + raise OperationalException(f"{self.__class__.__name__} space invalid.") if high is None or isinstance(low, Sequence): if not isinstance(low, Sequence) or len(low) != 2: - raise OperationalException(f'{self.__class__.__name__} space must be [low, high]') + raise OperationalException(f"{self.__class__.__name__} space must be [low, high]") self.low, self.high = low else: self.low = low self.high = high - super().__init__(default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__(default=default, space=space, optimize=optimize, load=load, **kwargs) class IntParameter(NumericParameter): @@ -110,8 +129,17 @@ class IntParameter(NumericParameter): low: int high: int - def __init__(self, low: Union[int, Sequence[int]], high: Optional[int] = None, *, default: int, - space: Optional[str] = None, optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + low: Union[int, Sequence[int]], + high: Optional[int] = None, + *, + default: int, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable integer parameter. :param low: Lower end (inclusive) of optimization space or [low, high]. @@ -125,10 +153,11 @@ class IntParameter(NumericParameter): :param kwargs: Extra parameters to skopt.space.Integer. """ - super().__init__(low=low, high=high, default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__( + low=low, high=high, default=default, space=space, optimize=optimize, load=load, **kwargs + ) - def get_space(self, name: str) -> 'Integer': + def get_space(self, name: str) -> "Integer": """ Create skopt optimization space. :param name: A name of parameter field. @@ -154,9 +183,17 @@ class RealParameter(NumericParameter): default: float value: float - def __init__(self, low: Union[float, Sequence[float]], high: Optional[float] = None, *, - default: float, space: Optional[str] = None, optimize: bool = True, - load: bool = True, **kwargs): + def __init__( + self, + low: Union[float, Sequence[float]], + high: Optional[float] = None, + *, + default: float, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable floating point parameter with unlimited precision. :param low: Lower end (inclusive) of optimization space or [low, high]. @@ -169,10 +206,11 @@ class RealParameter(NumericParameter): :param load: Load parameter value from {space}_params. :param kwargs: Extra parameters to skopt.space.Real. """ - super().__init__(low=low, high=high, default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__( + low=low, high=high, default=default, space=space, optimize=optimize, load=load, **kwargs + ) - def get_space(self, name: str) -> 'Real': + def get_space(self, name: str) -> "Real": """ Create skopt optimization space. :param name: A name of parameter field. @@ -184,9 +222,18 @@ class DecimalParameter(NumericParameter): default: float value: float - def __init__(self, low: Union[float, Sequence[float]], high: Optional[float] = None, *, - default: float, decimals: int = 3, space: Optional[str] = None, - optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + low: Union[float, Sequence[float]], + high: Optional[float] = None, + *, + default: float, + decimals: int = 3, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable decimal parameter with a limited precision. :param low: Lower end (inclusive) of optimization space or [low, high]. @@ -203,16 +250,18 @@ class DecimalParameter(NumericParameter): self._decimals = decimals default = round(default, self._decimals) - super().__init__(low=low, high=high, default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__( + low=low, high=high, default=default, space=space, optimize=optimize, load=load, **kwargs + ) - def get_space(self, name: str) -> 'SKDecimal': + def get_space(self, name: str) -> "SKDecimal": """ Create skopt optimization space. :param name: A name of parameter field. """ - return SKDecimal(low=self.low, high=self.high, decimals=self._decimals, name=name, - **self._space_params) + return SKDecimal( + low=self.low, high=self.high, decimals=self._decimals, name=name, **self._space_params + ) @property def range(self): @@ -235,8 +284,16 @@ class CategoricalParameter(BaseParameter): value: Any opt_range: Sequence[Any] - def __init__(self, categories: Sequence[Any], *, default: Optional[Any] = None, - space: Optional[str] = None, optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + categories: Sequence[Any], + *, + default: Optional[Any] = None, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable parameter. :param categories: Optimization space, [a, b, ...]. @@ -251,12 +308,12 @@ class CategoricalParameter(BaseParameter): """ if len(categories) < 2: raise OperationalException( - 'CategoricalParameter space must be [a, b, ...] (at least two parameters)') + "CategoricalParameter space must be [a, b, ...] (at least two parameters)" + ) self.opt_range = categories - super().__init__(default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__(default=default, space=space, optimize=optimize, load=load, **kwargs) - def get_space(self, name: str) -> 'Categorical': + def get_space(self, name: str) -> "Categorical": """ Create skopt optimization space. :param name: A name of parameter field. @@ -278,9 +335,15 @@ class CategoricalParameter(BaseParameter): class BooleanParameter(CategoricalParameter): - - def __init__(self, *, default: Optional[Any] = None, - space: Optional[str] = None, optimize: bool = True, load: bool = True, **kwargs): + def __init__( + self, + *, + default: Optional[Any] = None, + space: Optional[str] = None, + optimize: bool = True, + load: bool = True, + **kwargs, + ): """ Initialize hyperopt-optimizable Boolean Parameter. It's a shortcut to `CategoricalParameter([True, False])`. @@ -295,5 +358,11 @@ class BooleanParameter(CategoricalParameter): """ categories = [True, False] - super().__init__(categories=categories, default=default, space=space, optimize=optimize, - load=load, **kwargs) + super().__init__( + categories=categories, + default=default, + space=space, + optimize=optimize, + load=load, + **kwargs, + ) diff --git a/freqtrade/strategy/strategy_helper.py b/freqtrade/strategy/strategy_helper.py index 5085063a3..1a91629d9 100644 --- a/freqtrade/strategy/strategy_helper.py +++ b/freqtrade/strategy/strategy_helper.py @@ -5,11 +5,16 @@ import pandas as pd from freqtrade.exchange import timeframe_to_minutes -def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, - timeframe: str, timeframe_inf: str, ffill: bool = True, - append_timeframe: bool = True, - date_column: str = 'date', - suffix: Optional[str] = None) -> pd.DataFrame: +def merge_informative_pair( + dataframe: pd.DataFrame, + informative: pd.DataFrame, + timeframe: str, + timeframe_inf: str, + ffill: bool = True, + append_timeframe: bool = True, + date_column: str = "date", + suffix: Optional[str] = None, +) -> pd.DataFrame: """ Correctly merge informative samples to the original dataframe, avoiding lookahead bias. @@ -41,37 +46,39 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, minutes = timeframe_to_minutes(timeframe) if minutes == minutes_inf: # No need to forwardshift if the timeframes are identical - informative['date_merge'] = informative[date_column] + informative["date_merge"] = informative[date_column] elif minutes < minutes_inf: # Subtract "small" timeframe so merging is not delayed by 1 small candle # Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073 if not informative.empty: - if timeframe_inf == '1M': - informative['date_merge'] = ( - (informative[date_column] + pd.offsets.MonthBegin(1)) - - pd.to_timedelta(minutes, 'm') - ) + if timeframe_inf == "1M": + informative["date_merge"] = ( + informative[date_column] + pd.offsets.MonthBegin(1) + ) - pd.to_timedelta(minutes, "m") else: - informative['date_merge'] = ( - informative[date_column] + pd.to_timedelta(minutes_inf, 'm') - - pd.to_timedelta(minutes, 'm') + informative["date_merge"] = ( + informative[date_column] + + pd.to_timedelta(minutes_inf, "m") + - pd.to_timedelta(minutes, "m") ) else: - informative['date_merge'] = informative[date_column] + informative["date_merge"] = informative[date_column] else: - raise ValueError("Tried to merge a faster timeframe to a slower timeframe." - "This would create new rows, and can throw off your regular indicators.") + raise ValueError( + "Tried to merge a faster timeframe to a slower timeframe." + "This would create new rows, and can throw off your regular indicators." + ) # Rename columns to be unique - date_merge = 'date_merge' + date_merge = "date_merge" if suffix and append_timeframe: raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.") elif append_timeframe: - date_merge = f'date_merge_{timeframe_inf}' + date_merge = f"date_merge_{timeframe_inf}" informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns] elif suffix: - date_merge = f'date_merge_{suffix}' + date_merge = f"date_merge_{suffix}" informative.columns = [f"{col}_{suffix}" for col in informative.columns] # Combine the 2 dataframes @@ -79,21 +86,25 @@ def merge_informative_pair(dataframe: pd.DataFrame, informative: pd.DataFrame, if ffill: # https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging # merge_ordered - ffill method is 2.5x faster than separate ffill() - dataframe = pd.merge_ordered(dataframe, informative, fill_method="ffill", left_on='date', - right_on=date_merge, how='left') + dataframe = pd.merge_ordered( + dataframe, + informative, + fill_method="ffill", + left_on="date", + right_on=date_merge, + how="left", + ) else: - dataframe = pd.merge(dataframe, informative, left_on='date', - right_on=date_merge, how='left') + dataframe = pd.merge( + dataframe, informative, left_on="date", right_on=date_merge, how="left" + ) dataframe = dataframe.drop(date_merge, axis=1) return dataframe def stoploss_from_open( - open_relative_stop: float, - current_profit: float, - is_short: bool = False, - leverage: float = 1.0 + open_relative_stop: float, current_profit: float, is_short: bool = False, leverage: float = 1.0 ) -> float: """ Given the current profit, and a desired stop loss value relative to the trade entry price, @@ -129,8 +140,9 @@ def stoploss_from_open( return max(stoploss * leverage, 0.0) -def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False, - leverage: float = 1.0) -> float: +def stoploss_from_absolute( + stop_rate: float, current_rate: float, is_short: bool = False, leverage: float = 1.0 +) -> float: """ Given current price and desired stop price, return a stop loss value that is relative to current price. diff --git a/freqtrade/strategy/strategy_wrapper.py b/freqtrade/strategy/strategy_wrapper.py index 8cb0bde15..a6f74f1c0 100644 --- a/freqtrade/strategy/strategy_wrapper.py +++ b/freqtrade/strategy/strategy_wrapper.py @@ -9,7 +9,7 @@ from freqtrade.exceptions import StrategyError logger = logging.getLogger(__name__) -F = TypeVar('F', bound=Callable[..., Any]) +F = TypeVar("F", bound=Callable[..., Any]) def strategy_safe_wrapper(f: F, message: str = "", default_retval=None, supress_error=False) -> F: @@ -18,27 +18,21 @@ def strategy_safe_wrapper(f: F, message: str = "", default_retval=None, supress_ Caches all exceptions and returns either the default_retval (if it's not None) or raises a StrategyError exception, which then needs to be handled by the calling method. """ + @wraps(f) def wrapper(*args, **kwargs): try: - if 'trade' in kwargs: + if "trade" in kwargs: # Protect accidental modifications from within the strategy - kwargs['trade'] = deepcopy(kwargs['trade']) + kwargs["trade"] = deepcopy(kwargs["trade"]) return f(*args, **kwargs) except ValueError as error: - logger.warning( - f"{message}" - f"Strategy caused the following exception: {error}" - f"{f}" - ) + logger.warning(f"{message}Strategy caused the following exception: {error}{f}") if default_retval is None and not supress_error: raise StrategyError(str(error)) from error return default_retval except Exception as error: - logger.exception( - f"{message}" - f"Unexpected error {error} calling {f}" - ) + logger.exception(f"{message}Unexpected error {error} calling {f}") if default_retval is None and not supress_error: raise StrategyError(str(error)) from error return default_retval diff --git a/freqtrade/strategy/strategyupdater.py b/freqtrade/strategy/strategyupdater.py index 2669dcc4a..05494537d 100644 --- a/freqtrade/strategy/strategyupdater.py +++ b/freqtrade/strategy/strategyupdater.py @@ -8,41 +8,39 @@ from freqtrade.constants import Config class StrategyUpdater: name_mapping = { - 'ticker_interval': 'timeframe', - 'buy': 'enter_long', - 'sell': 'exit_long', - 'buy_tag': 'enter_tag', - 'sell_reason': 'exit_reason', - - 'sell_signal': 'exit_signal', - 'custom_sell': 'custom_exit', - 'force_sell': 'force_exit', - 'emergency_sell': 'emergency_exit', - + "ticker_interval": "timeframe", + "buy": "enter_long", + "sell": "exit_long", + "buy_tag": "enter_tag", + "sell_reason": "exit_reason", + "sell_signal": "exit_signal", + "custom_sell": "custom_exit", + "force_sell": "force_exit", + "emergency_sell": "emergency_exit", # Strategy/config settings: - 'use_sell_signal': 'use_exit_signal', - 'sell_profit_only': 'exit_profit_only', - 'sell_profit_offset': 'exit_profit_offset', - 'ignore_roi_if_buy_signal': 'ignore_roi_if_entry_signal', - 'forcebuy_enable': 'force_entry_enable', + "use_sell_signal": "use_exit_signal", + "sell_profit_only": "exit_profit_only", + "sell_profit_offset": "exit_profit_offset", + "ignore_roi_if_buy_signal": "ignore_roi_if_entry_signal", + "forcebuy_enable": "force_entry_enable", } function_mapping = { - 'populate_buy_trend': 'populate_entry_trend', - 'populate_sell_trend': 'populate_exit_trend', - 'custom_sell': 'custom_exit', - 'check_buy_timeout': 'check_entry_timeout', - 'check_sell_timeout': 'check_exit_timeout', + "populate_buy_trend": "populate_entry_trend", + "populate_sell_trend": "populate_exit_trend", + "custom_sell": "custom_exit", + "check_buy_timeout": "check_entry_timeout", + "check_sell_timeout": "check_exit_timeout", # '': '', } # order_time_in_force, order_types, unfilledtimeout otif_ot_unfilledtimeout = { - 'buy': 'entry', - 'sell': 'exit', + "buy": "entry", + "sell": "exit", } # create a dictionary that maps the old column names to the new ones - rename_dict = {'buy': 'enter_long', 'sell': 'exit_long', 'buy_tag': 'enter_tag'} + rename_dict = {"buy": "enter_long", "sell": "exit_long", "buy_tag": "enter_tag"} def start(self, config: Config, strategy_obj: dict) -> None: """ @@ -51,12 +49,12 @@ class StrategyUpdater: :return: None """ - source_file = strategy_obj['location'] - strategies_backup_folder = Path.joinpath(config['user_data_dir'], "strategies_orig_updater") - target_file = Path.joinpath(strategies_backup_folder, strategy_obj['location_rel']) + source_file = strategy_obj["location"] + strategies_backup_folder = Path.joinpath(config["user_data_dir"], "strategies_orig_updater") + target_file = Path.joinpath(strategies_backup_folder, strategy_obj["location_rel"]) # read the file - with Path(source_file).open('r') as f: + with Path(source_file).open("r") as f: old_code = f.read() if not strategies_backup_folder.is_dir(): Path(strategies_backup_folder).mkdir(parents=True, exist_ok=True) @@ -70,7 +68,7 @@ class StrategyUpdater: # update the code new_code = self.update_code(old_code) # write the modified code to the destination folder - with Path(source_file).open('w') as f: + with Path(source_file).open("w") as f: f.write(new_code) # define the function to update the code @@ -106,7 +104,6 @@ class StrategyUpdater: # Here we go through each respective node, slice, elt, key ... to replace outdated entries. class NameUpdater(ast_comments.NodeTransformer): def generic_visit(self, node): - # space is not yet transferred from buy/sell to entry/exit and thereby has to be skipped. if isinstance(node, ast_comments.keyword): if node.arg == "space": @@ -180,37 +177,38 @@ class NameUpdater(ast_comments.NodeTransformer): def visit_Attribute(self, node): if ( - isinstance(node.value, ast_comments.Name) - and node.value.id == 'trade' - and node.attr == 'nr_of_successful_buys' + isinstance(node.value, ast_comments.Name) + and node.value.id == "trade" + and node.attr == "nr_of_successful_buys" ): - node.attr = 'nr_of_successful_entries' + node.attr = "nr_of_successful_entries" return node def visit_ClassDef(self, node): # check if the class is derived from IStrategy - if any(isinstance(base, ast_comments.Name) and - base.id == 'IStrategy' for base in node.bases): + if any( + isinstance(base, ast_comments.Name) and base.id == "IStrategy" for base in node.bases + ): # check if the INTERFACE_VERSION variable exists has_interface_version = any( - isinstance(child, ast_comments.Assign) and - isinstance(child.targets[0], ast_comments.Name) and - child.targets[0].id == 'INTERFACE_VERSION' + isinstance(child, ast_comments.Assign) + and isinstance(child.targets[0], ast_comments.Name) + and child.targets[0].id == "INTERFACE_VERSION" for child in node.body ) # if the INTERFACE_VERSION variable does not exist, add it as the first child if not has_interface_version: - node.body.insert(0, ast_comments.parse('INTERFACE_VERSION = 3').body[0]) + node.body.insert(0, ast_comments.parse("INTERFACE_VERSION = 3").body[0]) # otherwise, update its value to 3 else: for child in node.body: if ( - isinstance(child, ast_comments.Assign) - and isinstance(child.targets[0], ast_comments.Name) - and child.targets[0].id == 'INTERFACE_VERSION' + isinstance(child, ast_comments.Assign) + and isinstance(child.targets[0], ast_comments.Name) + and child.targets[0].id == "INTERFACE_VERSION" ): - child.value = ast_comments.parse('3').body[0].value + child.value = ast_comments.parse("3").body[0].value self.generic_visit(node) return node diff --git a/freqtrade/templates/FreqaiExampleHybridStrategy.py b/freqtrade/templates/FreqaiExampleHybridStrategy.py index 5df03bd5d..e41fbac56 100644 --- a/freqtrade/templates/FreqaiExampleHybridStrategy.py +++ b/freqtrade/templates/FreqaiExampleHybridStrategy.py @@ -61,27 +61,28 @@ class FreqaiExampleHybridStrategy(IStrategy): """ minimal_roi = { + # "120": 0.0, # exit after 120 minutes at break even "60": 0.01, "30": 0.02, - "0": 0.04 + "0": 0.04, } plot_config = { - 'main_plot': { - 'tema': {}, + "main_plot": { + "tema": {}, }, - 'subplots': { + "subplots": { "MACD": { - 'macd': {'color': 'blue'}, - 'macdsignal': {'color': 'orange'}, + "macd": {"color": "blue"}, + "macdsignal": {"color": "orange"}, }, "RSI": { - 'rsi': {'color': 'red'}, + "rsi": {"color": "red"}, }, "Up_or_down": { - '&s-up_or_down': {'color': 'green'}, - } - } + "&s-up_or_down": {"color": "green"}, + }, + }, } process_only_new_candles = True @@ -91,13 +92,14 @@ class FreqaiExampleHybridStrategy(IStrategy): can_short = True # Hyperoptable parameters - buy_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True) - sell_rsi = IntParameter(low=50, high=100, default=70, space='sell', optimize=True, load=True) - short_rsi = IntParameter(low=51, high=100, default=70, space='sell', optimize=True, load=True) - exit_short_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True) + buy_rsi = IntParameter(low=1, high=50, default=30, space="buy", optimize=True, load=True) + sell_rsi = IntParameter(low=50, high=100, default=70, space="sell", optimize=True, load=True) + short_rsi = IntParameter(low=51, high=100, default=70, space="sell", optimize=True, load=True) + exit_short_rsi = IntParameter(low=1, high=50, default=30, space="buy", optimize=True, load=True) - def feature_engineering_expand_all(self, dataframe: DataFrame, period: int, - metadata: Dict, **kwargs) -> DataFrame: + def feature_engineering_expand_all( + self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -136,12 +138,9 @@ class FreqaiExampleHybridStrategy(IStrategy): dataframe["bb_upperband-period"] = bollinger["upper"] dataframe["%-bb_width-period"] = ( - dataframe["bb_upperband-period"] - - dataframe["bb_lowerband-period"] + dataframe["bb_upperband-period"] - dataframe["bb_lowerband-period"] ) / dataframe["bb_middleband-period"] - dataframe["%-close-bb_lower-period"] = ( - dataframe["close"] / dataframe["bb_lowerband-period"] - ) + dataframe["%-close-bb_lower-period"] = dataframe["close"] / dataframe["bb_lowerband-period"] dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period) @@ -152,7 +151,8 @@ class FreqaiExampleHybridStrategy(IStrategy): return dataframe def feature_engineering_expand_basic( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -185,7 +185,8 @@ class FreqaiExampleHybridStrategy(IStrategy): return dataframe def feature_engineering_standard( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This optional function will be called once with the dataframe of the base timeframe. @@ -226,13 +227,13 @@ class FreqaiExampleHybridStrategy(IStrategy): usage example: dataframe["&-target"] = dataframe["close"].shift(-1) / dataframe["close"] """ self.freqai.class_names = ["down", "up"] - dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-50) > - dataframe["close"], 'up', 'down') + dataframe["&s-up_or_down"] = np.where( + dataframe["close"].shift(-50) > dataframe["close"], "up", "down" + ) return dataframe def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: # noqa: C901 - # User creates their own custom strat here. Present example is a supertrend # based strategy. @@ -240,78 +241,81 @@ class FreqaiExampleHybridStrategy(IStrategy): # TA indicators to combine with the Freqai targets # RSI - dataframe['rsi'] = ta.RSI(dataframe) + dataframe["rsi"] = ta.RSI(dataframe) # Bollinger Bands bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2) - dataframe['bb_lowerband'] = bollinger['lower'] - dataframe['bb_middleband'] = bollinger['mid'] - dataframe['bb_upperband'] = bollinger['upper'] - dataframe["bb_percent"] = ( - (dataframe["close"] - dataframe["bb_lowerband"]) / - (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) - ) - dataframe["bb_width"] = ( - (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) / dataframe["bb_middleband"] + dataframe["bb_lowerband"] = bollinger["lower"] + dataframe["bb_middleband"] = bollinger["mid"] + dataframe["bb_upperband"] = bollinger["upper"] + dataframe["bb_percent"] = (dataframe["close"] - dataframe["bb_lowerband"]) / ( + dataframe["bb_upperband"] - dataframe["bb_lowerband"] ) + dataframe["bb_width"] = (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) / dataframe[ + "bb_middleband" + ] # TEMA - Triple Exponential Moving Average - dataframe['tema'] = ta.TEMA(dataframe, timeperiod=9) + dataframe["tema"] = ta.TEMA(dataframe, timeperiod=9) return dataframe def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - df.loc[ ( # Signal: RSI crosses above 30 - (qtpylib.crossed_above(df['rsi'], self.buy_rsi.value)) & - (df['tema'] <= df['bb_middleband']) & # Guard: tema below BB middle - (df['tema'] > df['tema'].shift(1)) & # Guard: tema is raising - (df['volume'] > 0) & # Make sure Volume is not 0 - (df['do_predict'] == 1) & # Make sure Freqai is confident in the prediction + (qtpylib.crossed_above(df["rsi"], self.buy_rsi.value)) + & (df["tema"] <= df["bb_middleband"]) # Guard: tema below BB middle + & (df["tema"] > df["tema"].shift(1)) # Guard: tema is raising + & (df["volume"] > 0) # Make sure Volume is not 0 + & (df["do_predict"] == 1) # Make sure Freqai is confident in the prediction + & # Only enter trade if Freqai thinks the trend is in this direction - (df['&s-up_or_down'] == 'up') + (df["&s-up_or_down"] == "up") ), - 'enter_long'] = 1 + "enter_long", + ] = 1 df.loc[ ( # Signal: RSI crosses above 70 - (qtpylib.crossed_above(df['rsi'], self.short_rsi.value)) & - (df['tema'] > df['bb_middleband']) & # Guard: tema above BB middle - (df['tema'] < df['tema'].shift(1)) & # Guard: tema is falling - (df['volume'] > 0) & # Make sure Volume is not 0 - (df['do_predict'] == 1) & # Make sure Freqai is confident in the prediction + (qtpylib.crossed_above(df["rsi"], self.short_rsi.value)) + & (df["tema"] > df["bb_middleband"]) # Guard: tema above BB middle + & (df["tema"] < df["tema"].shift(1)) # Guard: tema is falling + & (df["volume"] > 0) # Make sure Volume is not 0 + & (df["do_predict"] == 1) # Make sure Freqai is confident in the prediction + & # Only enter trade if Freqai thinks the trend is in this direction - (df['&s-up_or_down'] == 'down') + (df["&s-up_or_down"] == "down") ), - 'enter_short'] = 1 + "enter_short", + ] = 1 return df def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - df.loc[ ( # Signal: RSI crosses above 70 - (qtpylib.crossed_above(df['rsi'], self.sell_rsi.value)) & - (df['tema'] > df['bb_middleband']) & # Guard: tema above BB middle - (df['tema'] < df['tema'].shift(1)) & # Guard: tema is falling - (df['volume'] > 0) # Make sure Volume is not 0 + (qtpylib.crossed_above(df["rsi"], self.sell_rsi.value)) + & (df["tema"] > df["bb_middleband"]) # Guard: tema above BB middle + & (df["tema"] < df["tema"].shift(1)) # Guard: tema is falling + & (df["volume"] > 0) # Make sure Volume is not 0 ), - - 'exit_long'] = 1 + "exit_long", + ] = 1 df.loc[ ( # Signal: RSI crosses above 30 - (qtpylib.crossed_above(df['rsi'], self.exit_short_rsi.value)) & + (qtpylib.crossed_above(df["rsi"], self.exit_short_rsi.value)) + & # Guard: tema below BB middle - (df['tema'] <= df['bb_middleband']) & - (df['tema'] > df['tema'].shift(1)) & # Guard: tema is raising - (df['volume'] > 0) # Make sure Volume is not 0 + (df["tema"] <= df["bb_middleband"]) + & (df["tema"] > df["tema"].shift(1)) # Guard: tema is raising + & (df["volume"] > 0) # Make sure Volume is not 0 ), - 'exit_short'] = 1 + "exit_short", + ] = 1 return df diff --git a/freqtrade/templates/FreqaiExampleStrategy.py b/freqtrade/templates/FreqaiExampleStrategy.py index 93b916e38..a16775163 100644 --- a/freqtrade/templates/FreqaiExampleStrategy.py +++ b/freqtrade/templates/FreqaiExampleStrategy.py @@ -45,8 +45,9 @@ class FreqaiExampleStrategy(IStrategy): startup_candle_count: int = 40 can_short = True - def feature_engineering_expand_all(self, dataframe: DataFrame, period: int, - metadata: Dict, **kwargs) -> DataFrame: + def feature_engineering_expand_all( + self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -89,12 +90,9 @@ class FreqaiExampleStrategy(IStrategy): dataframe["bb_upperband-period"] = bollinger["upper"] dataframe["%-bb_width-period"] = ( - dataframe["bb_upperband-period"] - - dataframe["bb_lowerband-period"] + dataframe["bb_upperband-period"] - dataframe["bb_lowerband-period"] ) / dataframe["bb_middleband-period"] - dataframe["%-close-bb_lower-period"] = ( - dataframe["close"] / dataframe["bb_lowerband-period"] - ) + dataframe["%-close-bb_lower-period"] = dataframe["close"] / dataframe["bb_lowerband-period"] dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period) @@ -105,7 +103,8 @@ class FreqaiExampleStrategy(IStrategy): return dataframe def feature_engineering_expand_basic( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This function will automatically expand the defined features on the config defined @@ -142,7 +141,8 @@ class FreqaiExampleStrategy(IStrategy): return dataframe def feature_engineering_standard( - self, dataframe: DataFrame, metadata: Dict, **kwargs) -> DataFrame: + self, dataframe: DataFrame, metadata: Dict, **kwargs + ) -> DataFrame: """ *Only functional with FreqAI enabled strategies* This optional function will be called once with the dataframe of the base timeframe. @@ -197,7 +197,7 @@ class FreqaiExampleStrategy(IStrategy): .mean() / dataframe["close"] - 1 - ) + ) # Classifiers are typically set up with strings as targets: # df['&s-up_or_down'] = np.where( df["close"].shift(-100) > @@ -224,7 +224,6 @@ class FreqaiExampleStrategy(IStrategy): return dataframe def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame: - # All indicators must be populated by feature_engineering_*() functions # the model will return all labels created by user in `set_freqai_targets()` @@ -237,11 +236,10 @@ class FreqaiExampleStrategy(IStrategy): return dataframe def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - enter_long_conditions = [ df["do_predict"] == 1, df["&-s_close"] > 0.01, - ] + ] if enter_long_conditions: df.loc[ @@ -251,7 +249,7 @@ class FreqaiExampleStrategy(IStrategy): enter_short_conditions = [ df["do_predict"] == 1, df["&-s_close"] < -0.01, - ] + ] if enter_short_conditions: df.loc[ @@ -261,17 +259,11 @@ class FreqaiExampleStrategy(IStrategy): return df def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame: - exit_long_conditions = [ - df["do_predict"] == 1, - df["&-s_close"] < 0 - ] + exit_long_conditions = [df["do_predict"] == 1, df["&-s_close"] < 0] if exit_long_conditions: df.loc[reduce(lambda x, y: x & y, exit_long_conditions), "exit_long"] = 1 - exit_short_conditions = [ - df["do_predict"] == 1, - df["&-s_close"] > 0 - ] + exit_short_conditions = [df["do_predict"] == 1, df["&-s_close"] > 0] if exit_short_conditions: df.loc[reduce(lambda x, y: x & y, exit_short_conditions), "exit_short"] = 1 @@ -289,7 +281,6 @@ class FreqaiExampleStrategy(IStrategy): side: str, **kwargs, ) -> bool: - df, _ = self.dp.get_analyzed_dataframe(pair, self.timeframe) last_candle = df.iloc[-1].squeeze() diff --git a/freqtrade/templates/base_config.json.j2 b/freqtrade/templates/base_config.json.j2 index caa27a69e..4956cf056 100644 --- a/freqtrade/templates/base_config.json.j2 +++ b/freqtrade/templates/base_config.json.j2 @@ -10,7 +10,8 @@ "stake_currency": "{{ stake_currency }}", "stake_amount": {{ stake_amount }}, "tradable_balance_ratio": 0.99, - "fiat_display_currency": "{{ fiat_display_currency }}",{{ ('\n "timeframe": "' + timeframe + '",') if timeframe else '' }} +{{- ('\n "fiat_display_currency": "' + fiat_display_currency + '",') if fiat_display_currency else ''}} +{{- ('\n "timeframe": "' + timeframe + '",') if timeframe else '' }} "dry_run": {{ dry_run | lower }}, "dry_run_wallet": 1000, "cancel_open_orders_on_exit": false, diff --git a/freqtrade/templates/sample_hyperopt_loss.py b/freqtrade/templates/sample_hyperopt_loss.py index 5eab92a0c..4e4afed24 100644 --- a/freqtrade/templates/sample_hyperopt_loss.py +++ b/freqtrade/templates/sample_hyperopt_loss.py @@ -35,17 +35,23 @@ class SampleHyperOptLoss(IHyperOptLoss): """ @staticmethod - def hyperopt_loss_function(results: DataFrame, trade_count: int, - min_date: datetime, max_date: datetime, - config: Config, processed: Dict[str, DataFrame], - *args, **kwargs) -> float: + def hyperopt_loss_function( + results: DataFrame, + trade_count: int, + min_date: datetime, + max_date: datetime, + config: Config, + processed: Dict[str, DataFrame], + *args, + **kwargs, + ) -> float: """ Objective function, returns smaller number for better results """ - total_profit = results['profit_ratio'].sum() - trade_duration = results['trade_duration'].mean() + total_profit = results["profit_ratio"].sum() + trade_duration = results["trade_duration"].mean() - trade_loss = 1 - 0.25 * exp(-(trade_count - TARGET_TRADES) ** 2 / 10 ** 5.8) + trade_loss = 1 - 0.25 * exp(-((trade_count - TARGET_TRADES) ** 2) / 10**5.8) profit_loss = max(0, 1 - total_profit / EXPECTED_MAX_PROFIT) duration_loss = 0.4 * min(trade_duration / MAX_ACCEPTED_TRADE_DURATION, 1) result = trade_loss + profit_loss + duration_loss diff --git a/freqtrade/templates/sample_strategy.py b/freqtrade/templates/sample_strategy.py index dec547715..033c0d24e 100644 --- a/freqtrade/templates/sample_strategy.py +++ b/freqtrade/templates/sample_strategy.py @@ -7,8 +7,13 @@ import pandas as pd # noqa from pandas import DataFrame from typing import Optional, Union -from freqtrade.strategy import (BooleanParameter, CategoricalParameter, DecimalParameter, - IStrategy, IntParameter) +from freqtrade.strategy import ( + BooleanParameter, + CategoricalParameter, + DecimalParameter, + IStrategy, + IntParameter, +) # -------------------------------- # Add your lib to import here @@ -34,6 +39,7 @@ class SampleStrategy(IStrategy): You should keep: - timeframe, minimal_roi, stoploss, trailing_* """ + # Strategy interface version - allow new iterations of the strategy interface. # Check the documentation or the Sample strategy to get the latest version. INTERFACE_VERSION = 3 @@ -44,9 +50,10 @@ class SampleStrategy(IStrategy): # Minimal ROI designed for the strategy. # This attribute will be overridden if the config file contains "minimal_roi". minimal_roi = { + # "120": 0.0, # exit after 120 minutes at break even "60": 0.01, "30": 0.02, - "0": 0.04 + "0": 0.04, } # Optimal stoploss designed for the strategy. @@ -60,7 +67,7 @@ class SampleStrategy(IStrategy): # trailing_stop_positive_offset = 0.0 # Disabled / not configured # Optimal timeframe for the strategy. - timeframe = '5m' + timeframe = "5m" # Run "populate_indicators()" only for new candle. process_only_new_candles = True @@ -71,42 +78,39 @@ class SampleStrategy(IStrategy): ignore_roi_if_entry_signal = False # Hyperoptable parameters - buy_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True) - sell_rsi = IntParameter(low=50, high=100, default=70, space='sell', optimize=True, load=True) - short_rsi = IntParameter(low=51, high=100, default=70, space='sell', optimize=True, load=True) - exit_short_rsi = IntParameter(low=1, high=50, default=30, space='buy', optimize=True, load=True) + buy_rsi = IntParameter(low=1, high=50, default=30, space="buy", optimize=True, load=True) + sell_rsi = IntParameter(low=50, high=100, default=70, space="sell", optimize=True, load=True) + short_rsi = IntParameter(low=51, high=100, default=70, space="sell", optimize=True, load=True) + exit_short_rsi = IntParameter(low=1, high=50, default=30, space="buy", optimize=True, load=True) # Number of candles the strategy requires before producing valid signals startup_candle_count: int = 200 # Optional order type mapping. order_types = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'market', - 'stoploss_on_exchange': False + "entry": "limit", + "exit": "limit", + "stoploss": "market", + "stoploss_on_exchange": False, } # Optional order time in force. - order_time_in_force = { - 'entry': 'GTC', - 'exit': 'GTC' - } + order_time_in_force = {"entry": "GTC", "exit": "GTC"} plot_config = { - 'main_plot': { - 'tema': {}, - 'sar': {'color': 'white'}, + "main_plot": { + "tema": {}, + "sar": {"color": "white"}, }, - 'subplots': { + "subplots": { "MACD": { - 'macd': {'color': 'blue'}, - 'macdsignal': {'color': 'orange'}, + "macd": {"color": "blue"}, + "macdsignal": {"color": "orange"}, }, "RSI": { - 'rsi': {'color': 'red'}, - } - } + "rsi": {"color": "red"}, + }, + }, } def informative_pairs(self): @@ -138,7 +142,7 @@ class SampleStrategy(IStrategy): # ------------------------------------ # ADX - dataframe['adx'] = ta.ADX(dataframe) + dataframe["adx"] = ta.ADX(dataframe) # # Plus Directional Indicator / Movement # dataframe['plus_dm'] = ta.PLUS_DM(dataframe) @@ -177,7 +181,7 @@ class SampleStrategy(IStrategy): # dataframe['cci'] = ta.CCI(dataframe) # RSI - dataframe['rsi'] = ta.RSI(dataframe) + dataframe["rsi"] = ta.RSI(dataframe) # # Inverse Fisher transform on RSI: values [-1.0, 1.0] (https://goo.gl/2JGGoy) # rsi = 0.1 * (dataframe['rsi'] - 50) @@ -193,8 +197,8 @@ class SampleStrategy(IStrategy): # Stochastic Fast stoch_fast = ta.STOCHF(dataframe) - dataframe['fastd'] = stoch_fast['fastd'] - dataframe['fastk'] = stoch_fast['fastk'] + dataframe["fastd"] = stoch_fast["fastd"] + dataframe["fastk"] = stoch_fast["fastk"] # # Stochastic RSI # Please read https://github.com/freqtrade/freqtrade/issues/2961 before using this. @@ -205,12 +209,12 @@ class SampleStrategy(IStrategy): # MACD macd = ta.MACD(dataframe) - dataframe['macd'] = macd['macd'] - dataframe['macdsignal'] = macd['macdsignal'] - dataframe['macdhist'] = macd['macdhist'] + dataframe["macd"] = macd["macd"] + dataframe["macdsignal"] = macd["macdsignal"] + dataframe["macdhist"] = macd["macdhist"] # MFI - dataframe['mfi'] = ta.MFI(dataframe) + dataframe["mfi"] = ta.MFI(dataframe) # # ROC # dataframe['roc'] = ta.ROC(dataframe) @@ -220,16 +224,15 @@ class SampleStrategy(IStrategy): # Bollinger Bands bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2) - dataframe['bb_lowerband'] = bollinger['lower'] - dataframe['bb_middleband'] = bollinger['mid'] - dataframe['bb_upperband'] = bollinger['upper'] - dataframe["bb_percent"] = ( - (dataframe["close"] - dataframe["bb_lowerband"]) / - (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) - ) - dataframe["bb_width"] = ( - (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) / dataframe["bb_middleband"] + dataframe["bb_lowerband"] = bollinger["lower"] + dataframe["bb_middleband"] = bollinger["mid"] + dataframe["bb_upperband"] = bollinger["upper"] + dataframe["bb_percent"] = (dataframe["close"] - dataframe["bb_lowerband"]) / ( + dataframe["bb_upperband"] - dataframe["bb_lowerband"] ) + dataframe["bb_width"] = (dataframe["bb_upperband"] - dataframe["bb_lowerband"]) / dataframe[ + "bb_middleband" + ] # Bollinger Bands - Weighted (EMA based instead of SMA) # weighted_bollinger = qtpylib.weighted_bollinger_bands( @@ -264,17 +267,17 @@ class SampleStrategy(IStrategy): # dataframe['sma100'] = ta.SMA(dataframe, timeperiod=100) # Parabolic SAR - dataframe['sar'] = ta.SAR(dataframe) + dataframe["sar"] = ta.SAR(dataframe) # TEMA - Triple Exponential Moving Average - dataframe['tema'] = ta.TEMA(dataframe, timeperiod=9) + dataframe["tema"] = ta.TEMA(dataframe, timeperiod=9) # Cycle Indicator # ------------------------------------ # Hilbert Transform Indicator - SineWave hilbert = ta.HT_SINE(dataframe) - dataframe['htsine'] = hilbert['sine'] - dataframe['htleadsine'] = hilbert['leadsine'] + dataframe["htsine"] = hilbert["sine"] + dataframe["htleadsine"] = hilbert["leadsine"] # Pattern Recognition - Bullish candlestick patterns # ------------------------------------ @@ -353,22 +356,24 @@ class SampleStrategy(IStrategy): dataframe.loc[ ( # Signal: RSI crosses above 30 - (qtpylib.crossed_above(dataframe['rsi'], self.buy_rsi.value)) & - (dataframe['tema'] <= dataframe['bb_middleband']) & # Guard: tema below BB middle - (dataframe['tema'] > dataframe['tema'].shift(1)) & # Guard: tema is raising - (dataframe['volume'] > 0) # Make sure Volume is not 0 + (qtpylib.crossed_above(dataframe["rsi"], self.buy_rsi.value)) + & (dataframe["tema"] <= dataframe["bb_middleband"]) # Guard: tema below BB middle + & (dataframe["tema"] > dataframe["tema"].shift(1)) # Guard: tema is raising + & (dataframe["volume"] > 0) # Make sure Volume is not 0 ), - 'enter_long'] = 1 + "enter_long", + ] = 1 dataframe.loc[ ( # Signal: RSI crosses above 70 - (qtpylib.crossed_above(dataframe['rsi'], self.short_rsi.value)) & - (dataframe['tema'] > dataframe['bb_middleband']) & # Guard: tema above BB middle - (dataframe['tema'] < dataframe['tema'].shift(1)) & # Guard: tema is falling - (dataframe['volume'] > 0) # Make sure Volume is not 0 + (qtpylib.crossed_above(dataframe["rsi"], self.short_rsi.value)) + & (dataframe["tema"] > dataframe["bb_middleband"]) # Guard: tema above BB middle + & (dataframe["tema"] < dataframe["tema"].shift(1)) # Guard: tema is falling + & (dataframe["volume"] > 0) # Make sure Volume is not 0 ), - 'enter_short'] = 1 + "enter_short", + ] = 1 return dataframe @@ -382,23 +387,25 @@ class SampleStrategy(IStrategy): dataframe.loc[ ( # Signal: RSI crosses above 70 - (qtpylib.crossed_above(dataframe['rsi'], self.sell_rsi.value)) & - (dataframe['tema'] > dataframe['bb_middleband']) & # Guard: tema above BB middle - (dataframe['tema'] < dataframe['tema'].shift(1)) & # Guard: tema is falling - (dataframe['volume'] > 0) # Make sure Volume is not 0 + (qtpylib.crossed_above(dataframe["rsi"], self.sell_rsi.value)) + & (dataframe["tema"] > dataframe["bb_middleband"]) # Guard: tema above BB middle + & (dataframe["tema"] < dataframe["tema"].shift(1)) # Guard: tema is falling + & (dataframe["volume"] > 0) # Make sure Volume is not 0 ), - - 'exit_long'] = 1 + "exit_long", + ] = 1 dataframe.loc[ ( # Signal: RSI crosses above 30 - (qtpylib.crossed_above(dataframe['rsi'], self.exit_short_rsi.value)) & + (qtpylib.crossed_above(dataframe["rsi"], self.exit_short_rsi.value)) + & # Guard: tema below BB middle - (dataframe['tema'] <= dataframe['bb_middleband']) & - (dataframe['tema'] > dataframe['tema'].shift(1)) & # Guard: tema is raising - (dataframe['volume'] > 0) # Make sure Volume is not 0 + (dataframe["tema"] <= dataframe["bb_middleband"]) + & (dataframe["tema"] > dataframe["tema"].shift(1)) # Guard: tema is raising + & (dataframe["volume"] > 0) # Make sure Volume is not 0 ), - 'exit_short'] = 1 + "exit_short", + ] = 1 return dataframe diff --git a/freqtrade/types/__init__.py b/freqtrade/types/__init__.py index 02343f52f..6420baba0 100644 --- a/freqtrade/types/__init__.py +++ b/freqtrade/types/__init__.py @@ -1,5 +1,8 @@ # flake8: noqa: F401 -from freqtrade.types.backtest_result_type import (BacktestHistoryEntryType, BacktestMetadataType, - BacktestResultType, - get_BacktestResultType_default) +from freqtrade.types.backtest_result_type import ( + BacktestHistoryEntryType, + BacktestMetadataType, + BacktestResultType, + get_BacktestResultType_default, +) from freqtrade.types.valid_exchanges_type import ValidExchangesType diff --git a/freqtrade/types/backtest_result_type.py b/freqtrade/types/backtest_result_type.py index 7a6fc79fa..cad956597 100644 --- a/freqtrade/types/backtest_result_type.py +++ b/freqtrade/types/backtest_result_type.py @@ -16,9 +16,9 @@ class BacktestResultType(TypedDict): def get_BacktestResultType_default() -> BacktestResultType: return { - 'metadata': {}, - 'strategy': {}, - 'strategy_comparison': [], + "metadata": {}, + "strategy": {}, + "strategy_comparison": [], } diff --git a/freqtrade/util/__init__.py b/freqtrade/util/__init__.py index 6f523cd8e..503f5861a 100644 --- a/freqtrade/util/__init__.py +++ b/freqtrade/util/__init__.py @@ -1,6 +1,16 @@ -from freqtrade.util.datetime_helpers import (dt_floor_day, dt_from_ts, dt_humanize_delta, dt_now, - dt_ts, dt_ts_def, dt_ts_none, dt_utc, format_date, - format_ms_time, shorten_date) +from freqtrade.util.datetime_helpers import ( + dt_floor_day, + dt_from_ts, + dt_humanize_delta, + dt_now, + dt_ts, + dt_ts_def, + dt_ts_none, + dt_utc, + format_date, + format_ms_time, + shorten_date, +) from freqtrade.util.formatters import decimals_per_coin, fmt_coin, round_value from freqtrade.util.ft_precise import FtPrecise from freqtrade.util.measure_time import MeasureTime @@ -9,21 +19,21 @@ from freqtrade.util.template_renderer import render_template, render_template_wi __all__ = [ - 'dt_floor_day', - 'dt_from_ts', - 'dt_humanize_delta', - 'dt_now', - 'dt_ts', - 'dt_ts_def', - 'dt_ts_none', - 'dt_utc', - 'format_date', - 'format_ms_time', - 'FtPrecise', - 'PeriodicCache', - 'shorten_date', - 'decimals_per_coin', - 'round_value', - 'fmt_coin', - 'MeasureTime', + "dt_floor_day", + "dt_from_ts", + "dt_humanize_delta", + "dt_now", + "dt_ts", + "dt_ts_def", + "dt_ts_none", + "dt_utc", + "format_date", + "format_ms_time", + "FtPrecise", + "PeriodicCache", + "shorten_date", + "decimals_per_coin", + "round_value", + "fmt_coin", + "MeasureTime", ] diff --git a/freqtrade/util/coin_gecko.py b/freqtrade/util/coin_gecko.py new file mode 100644 index 000000000..47b80875b --- /dev/null +++ b/freqtrade/util/coin_gecko.py @@ -0,0 +1,26 @@ +from pycoingecko import CoinGeckoAPI + + +class FtCoinGeckoApi(CoinGeckoAPI): + """ + Simple wrapper around pycoingecko's api to support Demo API keys. + + """ + + __API_URL_BASE = "https://api.coingecko.com/api/v3/" + __PRO_API_URL_BASE = "https://pro-api.coingecko.com/api/v3/" + _api_key: str = "" + + def __init__(self, api_key: str = "", *, is_demo=True, retries=5): + super().__init__(retries=retries) + # Doint' pass api_key to parent, instead set the header on the session directly + self._api_key = api_key + + if api_key and not is_demo: + self.api_base_url = self.__PRO_API_URL_BASE + self.session.params.update({"x_cg_pro_api_key": api_key}) + else: + # Use demo api key + self.api_base_url = self.__API_URL_BASE + if api_key: + self.session.params.update({"x_cg_demo_api_key": api_key}) diff --git a/freqtrade/util/datetime_helpers.py b/freqtrade/util/datetime_helpers.py index 64733721b..3ab20efb7 100644 --- a/freqtrade/util/datetime_helpers.py +++ b/freqtrade/util/datetime_helpers.py @@ -1,5 +1,6 @@ import re from datetime import datetime, timezone +from time import time from typing import Optional, Union import humanize @@ -12,8 +13,15 @@ def dt_now() -> datetime: return datetime.now(timezone.utc) -def dt_utc(year: int, month: int, day: int, hour: int = 0, minute: int = 0, second: int = 0, - microsecond: int = 0) -> datetime: +def dt_utc( + year: int, + month: int, + day: int, + hour: int = 0, + minute: int = 0, + second: int = 0, + microsecond: int = 0, +) -> datetime: """Return a datetime in UTC.""" return datetime(year, month, day, hour, minute, second, microsecond, tzinfo=timezone.utc) @@ -25,7 +33,7 @@ def dt_ts(dt: Optional[datetime] = None) -> int: """ if dt: return int(dt.timestamp() * 1000) - return int(dt_now().timestamp() * 1000) + return int(time() * 1000) def dt_ts_def(dt: Optional[datetime], default: int = 0) -> int: @@ -68,11 +76,11 @@ def shorten_date(_date: str) -> str: """ Trim the date so it fits on small screens """ - new_date = re.sub('seconds?', 'sec', _date) - new_date = re.sub('minutes?', 'min', new_date) - new_date = re.sub('hours?', 'h', new_date) - new_date = re.sub('days?', 'd', new_date) - new_date = re.sub('^an?', '1', new_date) + new_date = re.sub("seconds?", "sec", _date) + new_date = re.sub("minutes?", "min", new_date) + new_date = re.sub("hours?", "h", new_date) + new_date = re.sub("days?", "d", new_date) + new_date = re.sub("^an?", "1", new_date) return new_date @@ -91,7 +99,7 @@ def format_date(date: Optional[datetime]) -> str: """ if date: return date.strftime(DATETIME_PRINT_FORMAT) - return '' + return "" def format_ms_time(date: Union[int, float]) -> str: @@ -99,4 +107,4 @@ def format_ms_time(date: Union[int, float]) -> str: convert MS date to readable format. : epoch-string in ms """ - return dt_from_ts(date).strftime('%Y-%m-%dT%H:%M:%S') + return dt_from_ts(date).strftime("%Y-%m-%dT%H:%M:%S") diff --git a/freqtrade/util/formatters.py b/freqtrade/util/formatters.py index f9d3db6a1..a649b671e 100644 --- a/freqtrade/util/formatters.py +++ b/freqtrade/util/formatters.py @@ -16,7 +16,7 @@ def strip_trailing_zeros(value: str) -> str: :param value: Value to be stripped :return: Stripped value """ - return value.rstrip('0').rstrip('.') + return value.rstrip("0").rstrip(".") def round_value(value: float, decimals: int, keep_trailing_zeros=False) -> str: @@ -33,8 +33,7 @@ def round_value(value: float, decimals: int, keep_trailing_zeros=False) -> str: return val -def fmt_coin( - value: float, coin: str, show_coin_name=True, keep_trailing_zeros=False) -> str: +def fmt_coin(value: float, coin: str, show_coin_name=True, keep_trailing_zeros=False) -> str: """ Format price value for this coin :param value: Value to be printed diff --git a/freqtrade/util/ft_precise.py b/freqtrade/util/ft_precise.py index aba0517a9..9487e3d6e 100644 --- a/freqtrade/util/ft_precise.py +++ b/freqtrade/util/ft_precise.py @@ -2,6 +2,7 @@ Slim wrapper around ccxt's Precise (string math) To have imports from freqtrade - and support float initializers """ + from ccxt import Precise diff --git a/freqtrade/util/measure_time.py b/freqtrade/util/measure_time.py index 8266adfc0..9ce8dba16 100644 --- a/freqtrade/util/measure_time.py +++ b/freqtrade/util/measure_time.py @@ -12,8 +12,10 @@ class MeasureTime: """ Measure the time of a block of code and call a callback if the time limit is exceeded. """ + def __init__( - self, callback: Callable[[float, float], None], time_limit: float, ttl: int = 3600 * 4): + self, callback: Callable[[float, float], None], time_limit: float, ttl: int = 3600 * 4 + ): """ :param callback: The callback to call if the time limit is exceeded. This callback will be called once every "ttl" seconds, @@ -32,7 +34,7 @@ class MeasureTime: def __exit__(self, *args): end = time.time() - if self.__cache.get('value'): + if self.__cache.get("value"): return duration = end - self._start @@ -40,4 +42,4 @@ class MeasureTime: return self._callback(duration, self._time_limit) - self.__cache['value'] = True + self.__cache["value"] = True diff --git a/freqtrade/util/migrations/__init__.py b/freqtrade/util/migrations/__init__.py index 9bd6f6288..d8c7dfad3 100644 --- a/freqtrade/util/migrations/__init__.py +++ b/freqtrade/util/migrations/__init__.py @@ -1,7 +1,6 @@ from typing import Optional from freqtrade.exchange import Exchange -from freqtrade.util.migrations.binance_mig import migrate_binance_futures_names # noqa F401 from freqtrade.util.migrations.binance_mig import migrate_binance_futures_data from freqtrade.util.migrations.funding_rate_mig import migrate_funding_fee_timeframe diff --git a/freqtrade/util/migrations/binance_mig.py b/freqtrade/util/migrations/binance_mig.py index b15e20100..e5255fa6d 100644 --- a/freqtrade/util/migrations/binance_mig.py +++ b/freqtrade/util/migrations/binance_mig.py @@ -14,27 +14,28 @@ logger = logging.getLogger(__name__) def migrate_binance_futures_names(config: Config): - - if ( - not (config.get('trading_mode', TradingMode.SPOT) == TradingMode.FUTURES - and config['exchange']['name'] == 'binance') + if not ( + config.get("trading_mode", TradingMode.SPOT) == TradingMode.FUTURES + and config["exchange"]["name"] == "binance" ): # only act on new futures return import ccxt + if version.parse("2.6.26") > version.parse(ccxt.__version__): raise OperationalException( "Please follow the update instructions in the docs " - f"({DOCS_LINK}/updating/) to install a compatible ccxt version.") + f"({DOCS_LINK}/updating/) to install a compatible ccxt version." + ) _migrate_binance_futures_db(config) migrate_binance_futures_data(config) def _migrate_binance_futures_db(config: Config): - logger.warning('Migrating binance futures pairs in database.') - trades = Trade.get_trades([Trade.exchange == 'binance', Trade.trading_mode == 'FUTURES']).all() + logger.warning("Migrating binance futures pairs in database.") + trades = Trade.get_trades([Trade.exchange == "binance", Trade.trading_mode == "FUTURES"]).all() for trade in trades: - if ':' in trade.pair: + if ":" in trade.pair: # already migrated continue new_pair = f"{trade.pair}:{trade.stake_currency}" @@ -45,34 +46,33 @@ def _migrate_binance_futures_db(config: Config): # Should symbol be migrated too? # order.symbol = new_pair Trade.commit() - pls = PairLock.session.scalars(select(PairLock).filter(PairLock.pair.notlike('%:%'))).all() + pls = PairLock.session.scalars(select(PairLock).filter(PairLock.pair.notlike("%:%"))).all() for pl in pls: pl.pair = f"{pl.pair}:{config['stake_currency']}" # print(pls) # pls.update({'pair': concat(PairLock.pair,':USDT')}) Trade.commit() - logger.warning('Done migrating binance futures pairs in database.') + logger.warning("Done migrating binance futures pairs in database.") def migrate_binance_futures_data(config: Config): - - if ( - not (config.get('trading_mode', TradingMode.SPOT) == TradingMode.FUTURES - and config['exchange']['name'] == 'binance') + if not ( + config.get("trading_mode", TradingMode.SPOT) == TradingMode.FUTURES + and config["exchange"]["name"] == "binance" ): # only act on new futures return from freqtrade.data.history import get_datahandler - dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv']) + + dhc = get_datahandler(config["datadir"], config["dataformat_ohlcv"]) paircombs = dhc.ohlcv_get_available_data( - config['datadir'], - config.get('trading_mode', TradingMode.SPOT) - ) + config["datadir"], config.get("trading_mode", TradingMode.SPOT) + ) for pair, timeframe, candle_type in paircombs: - if ':' in pair: + if ":" in pair: # already migrated continue new_pair = f"{pair}:{config['stake_currency']}" diff --git a/freqtrade/util/migrations/funding_rate_mig.py b/freqtrade/util/migrations/funding_rate_mig.py index 85b66ce3f..16ca60732 100644 --- a/freqtrade/util/migrations/funding_rate_mig.py +++ b/freqtrade/util/migrations/funding_rate_mig.py @@ -11,17 +11,16 @@ logger = logging.getLogger(__name__) def migrate_funding_fee_timeframe(config: Config, exchange: Optional[Exchange]): - if ( - config.get('trading_mode', TradingMode.SPOT) != TradingMode.FUTURES - ): + if config.get("trading_mode", TradingMode.SPOT) != TradingMode.FUTURES: # only act on futures return if not exchange: from freqtrade.resolvers import ExchangeResolver + exchange = ExchangeResolver.load_exchange(config, validate=False) - ff_timeframe = exchange.get_option('funding_fee_timeframe') + ff_timeframe = exchange.get_option("funding_fee_timeframe") - dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv']) + dhc = get_datahandler(config["datadir"], config["dataformat_ohlcv"]) dhc.fix_funding_fee_timeframe(ff_timeframe) diff --git a/freqtrade/util/periodic_cache.py b/freqtrade/util/periodic_cache.py index 1a535440d..4f7405d2b 100644 --- a/freqtrade/util/periodic_cache.py +++ b/freqtrade/util/periodic_cache.py @@ -12,7 +12,7 @@ class PeriodicCache(TTLCache): def __init__(self, maxsize, ttl, getsizeof=None): def local_timer(): ts = datetime.now(timezone.utc).timestamp() - offset = (ts % ttl) + offset = ts % ttl return ts - offset # Init with smlight offset diff --git a/freqtrade/util/template_renderer.py b/freqtrade/util/template_renderer.py index a875818bf..2ea3525aa 100644 --- a/freqtrade/util/template_renderer.py +++ b/freqtrade/util/template_renderer.py @@ -2,28 +2,28 @@ Jinja2 rendering utils, used to generate new strategy and configurations. """ - from typing import Dict, Optional def render_template(templatefile: str, arguments: Dict) -> str: - from jinja2 import Environment, PackageLoader, select_autoescape env = Environment( - loader=PackageLoader('freqtrade', 'templates'), - autoescape=select_autoescape(['html', 'xml']) + loader=PackageLoader("freqtrade", "templates"), + autoescape=select_autoescape(["html", "xml"]), ) template = env.get_template(templatefile) return template.render(**arguments) -def render_template_with_fallback(templatefile: str, templatefallbackfile: str, - arguments: Optional[Dict] = None) -> str: +def render_template_with_fallback( + templatefile: str, templatefallbackfile: str, arguments: Optional[Dict] = None +) -> str: """ Use templatefile if possible, otherwise fall back to templatefallbackfile """ from jinja2.exceptions import TemplateNotFound + if arguments is None: arguments = {} try: diff --git a/freqtrade/vendor/qtpylib/indicators.py b/freqtrade/vendor/qtpylib/indicators.py index a4d92eed3..9c92b2f8e 100644 --- a/freqtrade/vendor/qtpylib/indicators.py +++ b/freqtrade/vendor/qtpylib/indicators.py @@ -42,7 +42,7 @@ def numpy_rolling_series(func): new_series = np.empty(len(series)) * np.nan calculated = func(series, window) - new_series[-len(calculated):] = calculated + new_series[-len(calculated) :] = calculated if as_source and isinstance(data, pd.Series): return pd.Series(index=data.index, data=new_series) @@ -65,97 +65,103 @@ def numpy_rolling_std(data, window, as_source=False): # --------------------------------------------- -def session(df, start='17:00', end='16:00'): - """ remove previous globex day from df """ +def session(df, start="17:00", end="16:00"): + """remove previous globex day from df""" if df.empty: return df # get start/end/now as decimals - int_start = list(map(int, start.split(':'))) + int_start = list(map(int, start.split(":"))) int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001 - int_end = list(map(int, end.split(':'))) + int_end = list(map(int, end.split(":"))) int_end = int_end[0] + int_end[1] / 100 - int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100) + int_now = df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100 # same-dat session? is_same_day = int_end > int_start # set pointers - curr = prev = df[-1:].index[0].strftime('%Y-%m-%d') + curr = prev = df[-1:].index[0].strftime("%Y-%m-%d") # globex/forex session if not is_same_day: - prev = (datetime.strptime(curr, '%Y-%m-%d') - - timedelta(1)).strftime('%Y-%m-%d') + prev = (datetime.strptime(curr, "%Y-%m-%d") - timedelta(1)).strftime("%Y-%m-%d") # slice if int_now >= int_start: - df = df[df.index >= curr + ' ' + start] + df = df[df.index >= curr + " " + start] else: - df = df[df.index >= prev + ' ' + start] + df = df[df.index >= prev + " " + start] return df.copy() + # --------------------------------------------- def heikinashi(bars): bars = bars.copy() - bars['ha_close'] = (bars['open'] + bars['high'] + - bars['low'] + bars['close']) / 4 + bars["ha_close"] = (bars["open"] + bars["high"] + bars["low"] + bars["close"]) / 4 # ha open - bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2 + bars.at[0, "ha_open"] = (bars.at[0, "open"] + bars.at[0, "close"]) / 2 for i in range(1, len(bars)): - bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2 + bars.at[i, "ha_open"] = (bars.at[i - 1, "ha_open"] + bars.at[i - 1, "ha_close"]) / 2 - bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1) - bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1) + bars["ha_high"] = bars.loc[:, ["high", "ha_open", "ha_close"]].max(axis=1) + bars["ha_low"] = bars.loc[:, ["low", "ha_open", "ha_close"]].min(axis=1) + + return pd.DataFrame( + index=bars.index, + data={ + "open": bars["ha_open"], + "high": bars["ha_high"], + "low": bars["ha_low"], + "close": bars["ha_close"], + }, + ) - return pd.DataFrame(index=bars.index, - data={'open': bars['ha_open'], - 'high': bars['ha_high'], - 'low': bars['ha_low'], - 'close': bars['ha_close']}) # --------------------------------------------- -def tdi(series, rsi_lookback=13, rsi_smooth_len=2, - rsi_signal_len=7, bb_lookback=34, bb_std=1.6185): - +def tdi(series, rsi_lookback=13, rsi_smooth_len=2, rsi_signal_len=7, bb_lookback=34, bb_std=1.6185): rsi_data = rsi(series, rsi_lookback) rsi_smooth = sma(rsi_data, rsi_smooth_len) rsi_signal = sma(rsi_data, rsi_signal_len) bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std) - return pd.DataFrame(index=series.index, data={ - "rsi": rsi_data, - "rsi_signal": rsi_signal, - "rsi_smooth": rsi_smooth, - "rsi_bb_upper": bb_series['upper'], - "rsi_bb_lower": bb_series['lower'], - "rsi_bb_mid": bb_series['mid'] - }) + return pd.DataFrame( + index=series.index, + data={ + "rsi": rsi_data, + "rsi_signal": rsi_signal, + "rsi_smooth": rsi_smooth, + "rsi_bb_upper": bb_series["upper"], + "rsi_bb_lower": bb_series["lower"], + "rsi_bb_mid": bb_series["mid"], + }, + ) + # --------------------------------------------- def awesome_oscillator(df, weighted=False, fast=5, slow=34): - midprice = (df['high'] + df['low']) / 2 + midprice = (df["high"] + df["low"]) / 2 if weighted: ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values else: - ao = numpy_rolling_mean(midprice, fast) - \ - numpy_rolling_mean(midprice, slow) + ao = numpy_rolling_mean(midprice, fast) - numpy_rolling_mean(midprice, slow) return pd.Series(index=df.index, data=ao) # --------------------------------------------- + def nans(length=1): mtx = np.empty(length) mtx[:] = np.nan @@ -164,39 +170,45 @@ def nans(length=1): # --------------------------------------------- + def typical_price(bars): - res = (bars['high'] + bars['low'] + bars['close']) / 3. + res = (bars["high"] + bars["low"] + bars["close"]) / 3.0 return pd.Series(index=bars.index, data=res) # --------------------------------------------- + def mid_price(bars): - res = (bars['high'] + bars['low']) / 2. + res = (bars["high"] + bars["low"]) / 2.0 return pd.Series(index=bars.index, data=res) # --------------------------------------------- + def ibs(bars): - """ Internal bar strength """ - res = np.round((bars['close'] - bars['low']) / - (bars['high'] - bars['low']), 2) + """Internal bar strength""" + res = np.round((bars["close"] - bars["low"]) / (bars["high"] - bars["low"]), 2) return pd.Series(index=bars.index, data=res) # --------------------------------------------- + def true_range(bars): - return pd.DataFrame({ - "hl": bars['high'] - bars['low'], - "hc": abs(bars['high'] - bars['close'].shift(1)), - "lc": abs(bars['low'] - bars['close'].shift(1)) - }).max(axis=1) + return pd.DataFrame( + { + "hl": bars["high"] - bars["low"], + "hc": abs(bars["high"] - bars["close"].shift(1)), + "lc": abs(bars["low"] - bars["close"].shift(1)), + } + ).max(axis=1) # --------------------------------------------- + def atr(bars, window=14, exp=False): tr = true_range(bars) @@ -210,6 +222,7 @@ def atr(bars, window=14, exp=False): # --------------------------------------------- + def crossed(series1, series2, direction=None): if isinstance(series1, np.ndarray): series1 = pd.Series(series1) @@ -218,12 +231,10 @@ def crossed(series1, series2, direction=None): series2 = pd.Series(index=series1.index, data=series2) if direction is None or direction == "above": - above = pd.Series((series1 > series2) & ( - series1.shift(1) <= series2.shift(1))) + above = pd.Series((series1 > series2) & (series1.shift(1) <= series2.shift(1))) if direction is None or direction == "below": - below = pd.Series((series1 < series2) & ( - series1.shift(1) >= series2.shift(1))) + below = pd.Series((series1 < series2) & (series1.shift(1) >= series2.shift(1))) if direction is None: return above | below @@ -238,6 +249,7 @@ def crossed_above(series1, series2): def crossed_below(series1, series2): return crossed(series1, series2, "below") + # --------------------------------------------- @@ -251,6 +263,7 @@ def rolling_std(series, window=200, min_periods=None): except Exception as e: # noqa: F841 return pd.Series(series).rolling(window=window, min_periods=min_periods).std() + # --------------------------------------------- @@ -264,6 +277,7 @@ def rolling_mean(series, window=200, min_periods=None): except Exception as e: # noqa: F841 return pd.Series(series).rolling(window=window, min_periods=min_periods).mean() + # --------------------------------------------- @@ -277,6 +291,7 @@ def rolling_min(series, window=14, min_periods=None): # --------------------------------------------- + def rolling_max(series, window=14, min_periods=None): min_periods = window if min_periods is None else min_periods try: @@ -287,6 +302,7 @@ def rolling_max(series, window=14, min_periods=None): # --------------------------------------------- + def rolling_weighted_mean(series, window=200, min_periods=None): min_periods = window if min_periods is None else min_periods try: @@ -297,41 +313,49 @@ def rolling_weighted_mean(series, window=200, min_periods=None): # --------------------------------------------- + def hull_moving_average(series, window=200, min_periods=None): min_periods = window if min_periods is None else min_periods - ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - \ - rolling_weighted_mean(series, window, min_periods) + ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - rolling_weighted_mean( + series, window, min_periods + ) return rolling_weighted_mean(ma, np.sqrt(window), min_periods) # --------------------------------------------- + def sma(series, window=200, min_periods=None): return rolling_mean(series, window=window, min_periods=min_periods) # --------------------------------------------- + def wma(series, window=200, min_periods=None): return rolling_weighted_mean(series, window=window, min_periods=min_periods) # --------------------------------------------- + def hma(series, window=200, min_periods=None): return hull_moving_average(series, window=window, min_periods=min_periods) # --------------------------------------------- + def vwap(bars): """ calculate vwap of entire time series (input can be pandas series or numpy array) bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ] """ - raise ValueError("using `qtpylib.vwap` facilitates lookahead bias. Please use " - "`qtpylib.rolling_vwap` instead, which calculates vwap in a rolling manner.") + raise ValueError( + "using `qtpylib.vwap` facilitates lookahead bias. Please use " + "`qtpylib.rolling_vwap` instead, which calculates vwap in a rolling manner." + ) # typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values # volume = bars['volume'].values @@ -341,6 +365,7 @@ def vwap(bars): # --------------------------------------------- + def rolling_vwap(bars, window=200, min_periods=None): """ calculate vwap using moving window @@ -349,19 +374,22 @@ def rolling_vwap(bars, window=200, min_periods=None): """ min_periods = window if min_periods is None else min_periods - typical = ((bars['high'] + bars['low'] + bars['close']) / 3) - volume = bars['volume'] + typical = (bars["high"] + bars["low"] + bars["close"]) / 3 + volume = bars["volume"] - left = (volume * typical).rolling(window=window, - min_periods=min_periods).sum() + left = (volume * typical).rolling(window=window, min_periods=min_periods).sum() right = volume.rolling(window=window, min_periods=min_periods).sum() - return pd.Series(index=bars.index, data=(left / right) - ).replace([np.inf, -np.inf], float('NaN')).ffill() + return ( + pd.Series(index=bars.index, data=(left / right)) + .replace([np.inf, -np.inf], float("NaN")) + .ffill() + ) # --------------------------------------------- + def rsi(series, window=14): """ compute the n period relative strength indicator @@ -369,13 +397,13 @@ def rsi(series, window=14): # 100-(100/relative_strength) deltas = np.diff(series) - seed = deltas[:window + 1] + seed = deltas[: window + 1] # default values ups = seed[seed > 0].sum() / window downs = -seed[seed < 0].sum() / window rsival = np.zeros_like(series) - rsival[:window] = 100. - 100. / (1. + ups / downs) + rsival[:window] = 100.0 - 100.0 / (1.0 + ups / downs) # period values for i in range(window, len(series)): @@ -388,8 +416,8 @@ def rsi(series, window=14): downval = -delta ups = (ups * (window - 1) + upval) / window - downs = (downs * (window - 1.) + downval) / window - rsival[i] = 100. - 100. / (1. + ups / downs) + downs = (downs * (window - 1.0) + downval) / window + rsival[i] = 100.0 - 100.0 / (1.0 + ups / downs) # return rsival return pd.Series(index=series.index, data=rsival) @@ -397,60 +425,57 @@ def rsi(series, window=14): # --------------------------------------------- + def macd(series, fast=3, slow=10, smooth=16): """ compute the MACD (Moving Average Convergence/Divergence) using a fast and slow exponential moving avg' return value is emaslow, emafast, macd which are len(x) arrays """ - macd_line = rolling_weighted_mean(series, window=fast) - \ - rolling_weighted_mean(series, window=slow) + macd_line = rolling_weighted_mean(series, window=fast) - rolling_weighted_mean( + series, window=slow + ) signal = rolling_weighted_mean(macd_line, window=smooth) histogram = macd_line - signal # return macd_line, signal, histogram - return pd.DataFrame(index=series.index, data={ - 'macd': macd_line.values, - 'signal': signal.values, - 'histogram': histogram.values - }) + return pd.DataFrame( + index=series.index, + data={"macd": macd_line.values, "signal": signal.values, "histogram": histogram.values}, + ) # --------------------------------------------- + def bollinger_bands(series, window=20, stds=2): ma = rolling_mean(series, window=window, min_periods=1) std = rolling_std(series, window=window, min_periods=1) upper = ma + std * stds lower = ma - std * stds - return pd.DataFrame(index=series.index, data={ - 'upper': upper, - 'mid': ma, - 'lower': lower - }) + return pd.DataFrame(index=series.index, data={"upper": upper, "mid": ma, "lower": lower}) # --------------------------------------------- + def weighted_bollinger_bands(series, window=20, stds=2): ema = rolling_weighted_mean(series, window=window) std = rolling_std(series, window=window) upper = ema + std * stds lower = ema - std * stds - return pd.DataFrame(index=series.index, data={ - 'upper': upper.values, - 'mid': ema.values, - 'lower': lower.values - }) + return pd.DataFrame( + index=series.index, data={"upper": upper.values, "mid": ema.values, "lower": lower.values} + ) # --------------------------------------------- + def returns(series): try: - res = (series / series.shift(1) - - 1).replace([np.inf, -np.inf], float('NaN')) + res = (series / series.shift(1) - 1).replace([np.inf, -np.inf], float("NaN")) except Exception as e: # noqa: F841 res = nans(len(series)) @@ -459,10 +484,10 @@ def returns(series): # --------------------------------------------- + def log_returns(series): try: - res = np.log(series / series.shift(1) - ).replace([np.inf, -np.inf], float('NaN')) + res = np.log(series / series.shift(1)).replace([np.inf, -np.inf], float("NaN")) except Exception as e: # noqa: F841 res = nans(len(series)) @@ -471,10 +496,10 @@ def log_returns(series): # --------------------------------------------- + def implied_volatility(series, window=252): try: - logret = np.log(series / series.shift(1) - ).replace([np.inf, -np.inf], float('NaN')) + logret = np.log(series / series.shift(1)).replace([np.inf, -np.inf], float("NaN")) res = numpy_rolling_std(logret, window) * np.sqrt(window) except Exception as e: # noqa: F841 res = nans(len(series)) @@ -484,6 +509,7 @@ def implied_volatility(series, window=252): # --------------------------------------------- + def keltner_channel(bars, window=14, atrs=2): typical_mean = rolling_mean(typical_price(bars), window) atrval = atr(bars, window) * atrs @@ -491,15 +517,15 @@ def keltner_channel(bars, window=14, atrs=2): upper = typical_mean + atrval lower = typical_mean - atrval - return pd.DataFrame(index=bars.index, data={ - 'upper': upper.values, - 'mid': typical_mean.values, - 'lower': lower.values - }) + return pd.DataFrame( + index=bars.index, + data={"upper": upper.values, "mid": typical_mean.values, "lower": lower.values}, + ) # --------------------------------------------- + def roc(series, window=14): """ compute rate of change @@ -510,18 +536,20 @@ def roc(series, window=14): # --------------------------------------------- + def cci(series, window=14): """ compute commodity channel index """ price = typical_price(series) typical_mean = rolling_mean(price, window) - res = (price - typical_mean) / (.015 * np.std(typical_mean)) + res = (price - typical_mean) / (0.015 * np.std(typical_mean)) return pd.Series(index=series.index, data=res) # --------------------------------------------- + def stoch(df, window=14, d=3, k=3, fast=False): """ compute the n period relative strength indicator @@ -530,22 +558,22 @@ def stoch(df, window=14, d=3, k=3, fast=False): my_df = pd.DataFrame(index=df.index) - my_df['rolling_max'] = df['high'].rolling(window).max() - my_df['rolling_min'] = df['low'].rolling(window).min() + my_df["rolling_max"] = df["high"].rolling(window).max() + my_df["rolling_min"] = df["low"].rolling(window).min() - my_df['fast_k'] = ( - 100 * (df['close'] - my_df['rolling_min']) / - (my_df['rolling_max'] - my_df['rolling_min']) + my_df["fast_k"] = ( + 100 * (df["close"] - my_df["rolling_min"]) / (my_df["rolling_max"] - my_df["rolling_min"]) ) - my_df['fast_d'] = my_df['fast_k'].rolling(d).mean() + my_df["fast_d"] = my_df["fast_k"].rolling(d).mean() if fast: - return my_df.loc[:, ['fast_k', 'fast_d']] + return my_df.loc[:, ["fast_k", "fast_d"]] - my_df['slow_k'] = my_df['fast_k'].rolling(k).mean() - my_df['slow_d'] = my_df['slow_k'].rolling(d).mean() + my_df["slow_k"] = my_df["fast_k"].rolling(k).mean() + my_df["slow_d"] = my_df["slow_k"].rolling(d).mean() + + return my_df.loc[:, ["slow_k", "slow_d"]] - return my_df.loc[:, ['slow_k', 'slow_d']] # --------------------------------------------- @@ -559,7 +587,7 @@ def zlma(series, window=20, min_periods=None, kind="ema"): lag = (window - 1) // 2 series = 2 * series - series.shift(lag) - if kind in ['ewm', 'ema']: + if kind in ["ewm", "ema"]: return wma(series, lag, min_periods) elif kind == "hma": return hma(series, lag, min_periods) @@ -577,29 +605,30 @@ def zlsma(series, window, min_periods=None): def zlhma(series, window, min_periods=None): return zlma(series, window, min_periods, kind="hma") + # --------------------------------------------- -def zscore(bars, window=20, stds=1, col='close'): - """ get zscore of price """ +def zscore(bars, window=20, stds=1, col="close"): + """get zscore of price""" std = numpy_rolling_std(bars[col], window) mean = numpy_rolling_mean(bars[col], window) return (bars[col] - mean) / (std * stds) + # --------------------------------------------- def pvt(bars): - """ Price Volume Trend """ - trend = ((bars['close'] - bars['close'].shift(1)) / - bars['close'].shift(1)) * bars['volume'] + """Price Volume Trend""" + trend = ((bars["close"] - bars["close"].shift(1)) / bars["close"].shift(1)) * bars["volume"] return trend.cumsum() def chopiness(bars, window=14): atrsum = true_range(bars).rolling(window).sum() - highs = bars['high'].rolling(window).max() - lows = bars['low'].rolling(window).min() + highs = bars["high"].rolling(window).max() + lows = bars["low"].rolling(window).min() return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window) diff --git a/freqtrade/wallets.py b/freqtrade/wallets.py index 96274e7fc..7f839cb24 100644 --- a/freqtrade/wallets.py +++ b/freqtrade/wallets.py @@ -1,5 +1,5 @@ # pragma pylint: disable=W0603 -""" Wallet """ +"""Wallet""" import logging from copy import deepcopy @@ -31,18 +31,17 @@ class PositionWallet(NamedTuple): position: float = 0 leverage: float = 0 collateral: float = 0 - side: str = 'long' + side: str = "long" class Wallets: - def __init__(self, config: Config, exchange: Exchange, is_backtest: bool = False) -> None: self._config = config self._is_backtest = is_backtest self._exchange = exchange self._wallets: Dict[str, Wallet] = {} self._positions: Dict[str, PositionWallet] = {} - self.start_cap = config['dry_run_wallet'] + self.start_cap = config["dry_run_wallet"] self._last_wallet_refresh: Optional[datetime] = None self.update() @@ -70,7 +69,7 @@ class Wallets: def _update_dry(self) -> None: """ Update from database in dry-run mode - - Apply apply profits of closed trades on top of stake amount + - Apply profits of closed trades on top of stake amount - Subtract currently tied up stake_amount in open trades - update balances for currencies currently in trades """ @@ -88,17 +87,12 @@ class Wallets: tot_in_trades = sum(trade.stake_amount for trade in open_trades) used_stake = 0.0 - if self._config.get('trading_mode', 'spot') != TradingMode.FUTURES: + if self._config.get("trading_mode", "spot") != TradingMode.FUTURES: current_stake = self.start_cap + tot_profit - tot_in_trades total_stake = current_stake for trade in open_trades: curr = self._exchange.get_pair_base_currency(trade.pair) - _wallets[curr] = Wallet( - curr, - trade.amount, - 0, - trade.amount - ) + _wallets[curr] = Wallet(curr, trade.amount, 0, trade.amount) else: tot_in_trades = 0 for position in open_trades: @@ -108,20 +102,21 @@ class Wallets: leverage = position.leverage tot_in_trades += collateral _positions[position.pair] = PositionWallet( - position.pair, position=size, + position.pair, + position=size, leverage=leverage, collateral=collateral, - side=position.trade_direction + side=position.trade_direction, ) current_stake = self.start_cap + tot_profit - tot_in_trades used_stake = tot_in_trades total_stake = current_stake + tot_in_trades - _wallets[self._config['stake_currency']] = Wallet( - currency=self._config['stake_currency'], + _wallets[self._config["stake_currency"]] = Wallet( + currency=self._config["stake_currency"], free=current_stake, used=used_stake, - total=total_stake + total=total_stake, ) self._wallets = _wallets self._positions = _positions @@ -133,9 +128,9 @@ class Wallets: if isinstance(balances[currency], dict): self._wallets[currency] = Wallet( currency, - balances[currency].get('free'), - balances[currency].get('used'), - balances[currency].get('total') + balances[currency].get("free"), + balances[currency].get("used"), + balances[currency].get("total"), ) # Remove currencies no longer in get_balances output for currency in deepcopy(self._wallets): @@ -145,18 +140,19 @@ class Wallets: positions = self._exchange.fetch_positions() self._positions = {} for position in positions: - symbol = position['symbol'] - if position['side'] is None or position['collateral'] == 0.0: + symbol = position["symbol"] + if position["side"] is None or position["collateral"] == 0.0: # Position is not open ... continue - size = self._exchange._contracts_to_amount(symbol, position['contracts']) - collateral = safe_value_fallback(position, 'collateral', 'initialMargin', 0.0) - leverage = position['leverage'] + size = self._exchange._contracts_to_amount(symbol, position["contracts"]) + collateral = safe_value_fallback(position, "collateral", "initialMargin", 0.0) + leverage = position["leverage"] self._positions[symbol] = PositionWallet( - symbol, position=size, + symbol, + position=size, leverage=leverage, collateral=collateral, - side=position['side'] + side=position["side"], ) def update(self, require_update: bool = True) -> None: @@ -173,12 +169,12 @@ class Wallets: or self._last_wallet_refresh is None or (self._last_wallet_refresh + timedelta(seconds=3600) < now) ): - if (not self._config['dry_run'] or self._config.get('runmode') == RunMode.LIVE): + if not self._config["dry_run"] or self._config.get("runmode") == RunMode.LIVE: self._update_live() else: self._update_dry() if not self._is_backtest: - logger.info('Wallets synced.') + logger.info("Wallets synced.") self._last_wallet_refresh = dt_now() def get_all_balances(self) -> Dict[str, Wallet]: @@ -222,11 +218,11 @@ class Wallets: or by using current balance subtracting """ if "available_capital" in self._config: - return self._config['available_capital'] + return self._config["available_capital"] else: tot_profit = Trade.get_total_closed_profit() open_stakes = Trade.total_open_trades_stakes() - available_balance = self.get_free(self._config['stake_currency']) + available_balance = self.get_free(self._config["stake_currency"]) return available_balance - tot_profit + open_stakes def get_total_stake_amount(self): @@ -238,7 +234,7 @@ class Wallets: """ val_tied_up = Trade.total_open_trades_stakes() if "available_capital" in self._config: - starting_balance = self._config['available_capital'] + starting_balance = self._config["available_capital"] tot_profit = Trade.get_total_closed_profit() available_amount = starting_balance + tot_profit @@ -246,8 +242,9 @@ class Wallets: # Ensure % is used from the overall balance # Otherwise we'd risk lowering stakes with each open trade. # (tied up + current free) * ratio) - tied up - available_amount = ((val_tied_up + self.get_free(self._config['stake_currency'])) * - self._config['tradable_balance_ratio']) + available_amount = ( + val_tied_up + self.get_free(self._config["stake_currency"]) + ) * self._config["tradable_balance_ratio"] return available_amount def get_available_stake_amount(self) -> float: @@ -258,11 +255,12 @@ class Wallets: ( + free amount) * tradable_balance_ratio - """ - free = self.get_free(self._config['stake_currency']) + free = self.get_free(self._config["stake_currency"]) return min(self.get_total_stake_amount() - Trade.total_open_trades_stakes(), free) - def _calculate_unlimited_stake_amount(self, available_amount: float, - val_tied_up: float, max_open_trades: IntOrInf) -> float: + def _calculate_unlimited_stake_amount( + self, available_amount: float, val_tied_up: float, max_open_trades: IntOrInf + ) -> float: """ Calculate stake amount for "unlimited" stake amount :return: 0 if max number of trades reached, else stake_amount to use. @@ -282,10 +280,10 @@ class Wallets: :raise: DependencyException if balance is lower than stake-amount """ - if self._config['amend_last_stake_amount']: + if self._config["amend_last_stake_amount"]: # Remaining amount needs to be at least stake_amount * last_stake_amount_min_ratio # Otherwise the remaining amount is too low to trade. - if available_amount > (stake_amount * self._config['last_stake_amount_min_ratio']): + if available_amount > (stake_amount * self._config["last_stake_amount_min_ratio"]): stake_amount = min(stake_amount, available_amount) else: stake_amount = 0 @@ -299,7 +297,8 @@ class Wallets: return stake_amount def get_trade_stake_amount( - self, pair: str, max_open_trades: IntOrInf, edge=None, update: bool = True) -> float: + self, pair: str, max_open_trades: IntOrInf, edge=None, update: bool = True + ) -> float: """ Calculate stake amount for the trade :return: float: Stake amount @@ -315,21 +314,27 @@ class Wallets: if edge: stake_amount = edge.stake_amount( pair, - self.get_free(self._config['stake_currency']), - self.get_total(self._config['stake_currency']), - val_tied_up + self.get_free(self._config["stake_currency"]), + self.get_total(self._config["stake_currency"]), + val_tied_up, ) else: - stake_amount = self._config['stake_amount'] + stake_amount = self._config["stake_amount"] if stake_amount == UNLIMITED_STAKE_AMOUNT: stake_amount = self._calculate_unlimited_stake_amount( - available_amount, val_tied_up, max_open_trades) + available_amount, val_tied_up, max_open_trades + ) return self._check_available_stake_amount(stake_amount, available_amount) - def validate_stake_amount(self, pair: str, stake_amount: Optional[float], - min_stake_amount: Optional[float], max_stake_amount: float, - trade_amount: Optional[float]): + def validate_stake_amount( + self, + pair: str, + stake_amount: Optional[float], + min_stake_amount: Optional[float], + max_stake_amount: float, + trade_amount: Optional[float], + ): if not stake_amount: logger.debug(f"Stake amount is {stake_amount}, ignoring possible trade for {pair}.") return 0 @@ -342,8 +347,10 @@ class Wallets: if min_stake_amount is not None and min_stake_amount > max_allowed_stake: if not self._is_backtest: - logger.warning("Minimum stake amount > available balance. " - f"{min_stake_amount} > {max_allowed_stake}") + logger.warning( + "Minimum stake amount > available balance. " + f"{min_stake_amount} > {max_allowed_stake}" + ) return 0 if min_stake_amount is not None and stake_amount < min_stake_amount: if not self._is_backtest: diff --git a/freqtrade/worker.py b/freqtrade/worker.py index e9dbfa74b..4c8fee356 100644 --- a/freqtrade/worker.py +++ b/freqtrade/worker.py @@ -1,6 +1,7 @@ """ Main Freqtrade worker class. """ + import logging import time import traceback @@ -52,13 +53,15 @@ class Worker: # Init the instance of the bot self.freqtrade = FreqtradeBot(self._config) - internals_config = self._config.get('internals', {}) - self._throttle_secs = internals_config.get('process_throttle_secs', - PROCESS_THROTTLE_SECS) - self._heartbeat_interval = internals_config.get('heartbeat_interval', 60) + internals_config = self._config.get("internals", {}) + self._throttle_secs = internals_config.get("process_throttle_secs", PROCESS_THROTTLE_SECS) + self._heartbeat_interval = internals_config.get("heartbeat_interval", 60) - self._sd_notify = sdnotify.SystemdNotifier() if \ - self._config.get('internals', {}).get('sd_notify', False) else None + self._sd_notify = ( + sdnotify.SystemdNotifier() + if self._config.get("internals", {}).get("sd_notify", False) + else None + ) def _notify(self, message: str) -> None: """ @@ -86,12 +89,12 @@ class Worker: # Log state transition if state != old_state: - if old_state != State.RELOAD_CONFIG: - self.freqtrade.notify_status(f'{state.name.lower()}') + self.freqtrade.notify_status(f"{state.name.lower()}") logger.info( - f"Changing state{f' from {old_state.name}' if old_state else ''} to: {state.name}") + f"Changing state{f' from {old_state.name}' if old_state else ''} to: {state.name}" + ) if state == State.RUNNING: self.freqtrade.startup() @@ -113,26 +116,36 @@ class Worker: self._notify("WATCHDOG=1\nSTATUS=State: RUNNING.") # Use an offset of 1s to ensure a new candle has been issued - self._throttle(func=self._process_running, throttle_secs=self._throttle_secs, - timeframe=self._config['timeframe'] if self._config else None, - timeframe_offset=1) + self._throttle( + func=self._process_running, + throttle_secs=self._throttle_secs, + timeframe=self._config["timeframe"] if self._config else None, + timeframe_offset=1, + ) if self._heartbeat_interval: now = time.time() if (now - self._heartbeat_msg) > self._heartbeat_interval: version = __version__ strategy_version = self.freqtrade.strategy.version() - if (strategy_version is not None): - version += ', strategy_version: ' + strategy_version - logger.info(f"Bot heartbeat. PID={getpid()}, " - f"version='{version}', state='{state.name}'") + if strategy_version is not None: + version += ", strategy_version: " + strategy_version + logger.info( + f"Bot heartbeat. PID={getpid()}, version='{version}', state='{state.name}'" + ) self._heartbeat_msg = now return state - def _throttle(self, func: Callable[..., Any], throttle_secs: float, - timeframe: Optional[str] = None, timeframe_offset: float = 1.0, - *args, **kwargs) -> Any: + def _throttle( + self, + func: Callable[..., Any], + throttle_secs: float, + timeframe: Optional[str] = None, + timeframe_offset: float = 1.0, + *args, + **kwargs, + ) -> Any: """ Throttles the given callable that it takes at least `min_secs` to finish execution. @@ -160,10 +173,11 @@ class Worker: sleep_duration = max(sleep_duration, 0.0) # next_iter = datetime.now(timezone.utc) + timedelta(seconds=sleep_duration) - logger.debug(f"Throttling with '{func.__name__}()': sleep for {sleep_duration:.2f} s, " - f"last iteration took {time_passed:.2f} s." - # f"next: {next_iter}" - ) + logger.debug( + f"Throttling with '{func.__name__}()': sleep for {sleep_duration:.2f} s, " + f"last iteration took {time_passed:.2f} s." + # f"next: {next_iter}" + ) self._sleep(sleep_duration) return result @@ -183,14 +197,13 @@ class Worker: time.sleep(RETRY_TIMEOUT) except OperationalException: tb = traceback.format_exc() - hint = 'Issue `/start` if you think it is safe to restart.' + hint = "Issue `/start` if you think it is safe to restart." self.freqtrade.notify_status( - f'*OperationalException:*\n```\n{tb}```\n {hint}', - msg_type=RPCMessageType.EXCEPTION + f"*OperationalException:*\n```\n{tb}```\n {hint}", msg_type=RPCMessageType.EXCEPTION ) - logger.exception('OperationalException. Stopping trader ...') + logger.exception("OperationalException. Stopping trader ...") self.freqtrade.state = State.STOPPED def _reconfigure(self) -> None: @@ -207,7 +220,7 @@ class Worker: # Load and validate config and create new instance of the bot self._init(True) - self.freqtrade.notify_status('config reloaded') + self.freqtrade.notify_status("config reloaded") # Tell systemd that we completed reconfiguration self._notify("READY=1") @@ -217,5 +230,5 @@ class Worker: self._notify("STOPPING=1") if self.freqtrade: - self.freqtrade.notify_status('process died') + self.freqtrade.notify_status("process died") self.freqtrade.cleanup() diff --git a/ft_client/freqtrade_client/__init__.py b/ft_client/freqtrade_client/__init__.py index f5d6459b2..9ede4dd12 100644 --- a/ft_client/freqtrade_client/__init__.py +++ b/ft_client/freqtrade_client/__init__.py @@ -1,26 +1,37 @@ from freqtrade_client.ft_rest_client import FtRestClient -__version__ = '2024.4-dev' +__version__ = "2024.6-dev" -if 'dev' in __version__: +if "dev" in __version__: from pathlib import Path + try: import subprocess + freqtrade_basedir = Path(__file__).parent - __version__ = __version__ + '-' + subprocess.check_output( - ['git', 'log', '--format="%h"', '-n 1'], - stderr=subprocess.DEVNULL, cwd=freqtrade_basedir).decode("utf-8").rstrip().strip('"') + __version__ = ( + __version__ + + "-" + + subprocess.check_output( + ["git", "log", '--format="%h"', "-n 1"], + stderr=subprocess.DEVNULL, + cwd=freqtrade_basedir, + ) + .decode("utf-8") + .rstrip() + .strip('"') + ) except Exception: # pragma: no cover # git not available, ignore try: # Try Fallback to freqtrade_commit file (created by CI while building docker image) - versionfile = Path('./freqtrade_commit') + versionfile = Path("./freqtrade_commit") if versionfile.is_file(): __version__ = f"docker-{__version__}-{versionfile.read_text()[:8]}" except Exception: pass -__all__ = ['FtRestClient'] +__all__ = ["FtRestClient"] diff --git a/ft_client/freqtrade_client/ft_client.py b/ft_client/freqtrade_client/ft_client.py index 14bb47bc5..d51858fde 100644 --- a/ft_client/freqtrade_client/ft_client.py +++ b/ft_client/freqtrade_client/ft_client.py @@ -8,44 +8,51 @@ from pathlib import Path from typing import Any, Dict import rapidjson + from freqtrade_client import __version__ from freqtrade_client.ft_rest_client import FtRestClient logging.basicConfig( level=logging.INFO, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) logger = logging.getLogger("ft_rest_client") def add_arguments(args: Any = None): - parser = argparse.ArgumentParser() - parser.add_argument("command", - help="Positional argument defining the command to execute.", - nargs="?" - ) - parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}') - parser.add_argument('--show', - help='Show possible methods with this client', - dest='show', - action='store_true', - default=False - ) + parser = argparse.ArgumentParser( + prog="freqtrade-client", + description="Client for the freqtrade REST API", + ) + parser.add_argument( + "command", help="Positional argument defining the command to execute.", nargs="?" + ) + parser.add_argument("-V", "--version", action="version", version=f"%(prog)s {__version__}") + parser.add_argument( + "--show", + help="Show possible methods with this client", + dest="show", + action="store_true", + default=False, + ) - parser.add_argument('-c', '--config', - help='Specify configuration file (default: %(default)s). ', - dest='config', - type=str, - metavar='PATH', - default='config.json' - ) + parser.add_argument( + "-c", + "--config", + help="Specify configuration file (default: %(default)s). ", + dest="config", + type=str, + metavar="PATH", + default="config.json", + ) - parser.add_argument("command_arguments", - help="Positional arguments for the parameters for [command]", - nargs="*", - default=[] - ) + parser.add_argument( + "command_arguments", + help="Positional arguments for the parameters for [command]", + nargs="*", + default=[], + ) pargs = parser.parse_args(args) return vars(pargs) @@ -55,8 +62,9 @@ def load_config(configfile): file = Path(configfile) if file.is_file(): with file.open("r") as f: - config = rapidjson.load(f, parse_mode=rapidjson.PM_COMMENTS | - rapidjson.PM_TRAILING_COMMAS) + config = rapidjson.load( + f, parse_mode=rapidjson.PM_COMMENTS | rapidjson.PM_TRAILING_COMMAS + ) return config else: logger.warning(f"Could not load config file {file}.") @@ -68,27 +76,26 @@ def print_commands(): client = FtRestClient(None) print("Possible commands:\n") for x, _ in inspect.getmembers(client): - if not x.startswith('_'): - doc = re.sub(':return:.*', '', getattr(client, x).__doc__, flags=re.MULTILINE).rstrip() + if not x.startswith("_"): + doc = re.sub(":return:.*", "", getattr(client, x).__doc__, flags=re.MULTILINE).rstrip() print(f"{x}\n\t{doc}\n") def main_exec(args: Dict[str, Any]): - if args.get("show"): print_commands() sys.exit() - config = load_config(args['config']) - url = config.get('api_server', {}).get('listen_ip_address', '127.0.0.1') - port = config.get('api_server', {}).get('listen_port', '8080') - username = config.get('api_server', {}).get('username') - password = config.get('api_server', {}).get('password') + config = load_config(args["config"]) + url = config.get("api_server", {}).get("listen_ip_address", "127.0.0.1") + port = config.get("api_server", {}).get("listen_port", "8080") + username = config.get("api_server", {}).get("username") + password = config.get("api_server", {}).get("password") server_url = f"http://{url}:{port}" client = FtRestClient(server_url, username, password) - m = [x for x, y in inspect.getmembers(client) if not x.startswith('_')] + m = [x for x, y in inspect.getmembers(client) if not x.startswith("_")] command = args["command"] if command not in m: logger.error(f"Command {command} not defined") diff --git a/ft_client/freqtrade_client/ft_rest_client.py b/ft_client/freqtrade_client/ft_rest_client.py index 20e6d6f6e..6e5f7e6c5 100755 --- a/ft_client/freqtrade_client/ft_rest_client.py +++ b/ft_client/freqtrade_client/ft_rest_client.py @@ -21,31 +21,26 @@ PostDataT = Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] class FtRestClient: - - def __init__(self, serverurl, username=None, password=None, *, - pool_connections=10, pool_maxsize=10): - + def __init__( + self, serverurl, username=None, password=None, *, pool_connections=10, pool_maxsize=10 + ): self._serverurl = serverurl self._session = requests.Session() # allow configuration of pool adapter = requests.adapters.HTTPAdapter( - pool_connections=pool_connections, - pool_maxsize=pool_maxsize + pool_connections=pool_connections, pool_maxsize=pool_maxsize ) - self._session.mount('http://', adapter) + self._session.mount("http://", adapter) self._session.auth = (username, password) def _call(self, method, apipath, params: Optional[dict] = None, data=None, files=None): - - if str(method).upper() not in ('GET', 'POST', 'PUT', 'DELETE'): - raise ValueError(f'invalid method <{method}>') + if str(method).upper() not in ("GET", "POST", "PUT", "DELETE"): + raise ValueError(f"invalid method <{method}>") basepath = f"{self._serverurl}/api/v1/{apipath}" - hd = {"Accept": "application/json", - "Content-Type": "application/json" - } + hd = {"Accept": "application/json", "Content-Type": "application/json"} # Split url schema, netloc, path, par, query, fragment = urlparse(basepath) @@ -151,7 +146,7 @@ class FtRestClient: """ return self._delete(f"locks/{lock_id}") - def lock_add(self, pair: str, until: str, side: str = '*', reason: str = ''): + def lock_add(self, pair: str, until: str, side: str = "*", reason: str = ""): """Lock pair :param pair: Pair to lock @@ -160,14 +155,7 @@ class FtRestClient: :param reason: Reason for the lock :return: json object """ - data = [ - { - "pair": pair, - "until": until, - "side": side, - "reason": reason - } - ] + data = [{"pair": pair, "until": until, "side": side, "reason": reason}] return self._post("locks", data=data) def daily(self, days=None): @@ -234,7 +222,7 @@ class FtRestClient: return self._get("version") def show_config(self): - """ Returns part of the configuration, relevant for trading operations. + """Returns part of the configuration, relevant for trading operations. :return: json object containing the version """ return self._get("show_config") @@ -244,7 +232,7 @@ class FtRestClient: configstatus = self.show_config() if not configstatus: return {"status": "not_running"} - elif configstatus['state'] == "running": + elif configstatus["state"] == "running": return {"status": "pong"} else: return {"status": "not_running"} @@ -266,9 +254,9 @@ class FtRestClient: """ params = {} if limit: - params['limit'] = limit + params["limit"] = limit if offset: - params['offset'] = offset + params["offset"] = offset return self._get("trades", params) def trade(self, trade_id): @@ -321,9 +309,7 @@ class FtRestClient: :param price: Optional - price to buy :return: json object of the trade """ - data = {"pair": pair, - "price": price - } + data = {"pair": pair, "price": price} return self._post("forcebuy", data=data) def forceenter(self, pair, side, price=None): @@ -334,11 +320,12 @@ class FtRestClient: :param price: Optional - price to buy :return: json object of the trade """ - data = {"pair": pair, - "side": side, - } + data = { + "pair": pair, + "side": side, + } if price: - data['price'] = price + data["price"] = price return self._post("forceenter", data=data) def forceexit(self, tradeid, ordertype=None, amount=None): @@ -350,11 +337,14 @@ class FtRestClient: :return: json object """ - return self._post("forceexit", data={ - "tradeid": tradeid, - "ordertype": ordertype, - "amount": amount, - }) + return self._post( + "forceexit", + data={ + "tradeid": tradeid, + "ordertype": ordertype, + "amount": amount, + }, + ) def strategies(self): """Lists available strategies @@ -392,17 +382,21 @@ class FtRestClient: :param stake_currency: Only pairs that include this timeframe :return: json object """ - return self._get("available_pairs", params={ - "stake_currency": stake_currency if timeframe else '', - "timeframe": timeframe if timeframe else '', - }) + return self._get( + "available_pairs", + params={ + "stake_currency": stake_currency if timeframe else "", + "timeframe": timeframe if timeframe else "", + }, + ) - def pair_candles(self, pair, timeframe, limit=None): + def pair_candles(self, pair, timeframe, limit=None, columns=None): """Return live dataframe for . :param pair: Pair to get data for :param timeframe: Only pairs with this timeframe available. :param limit: Limit result to the last n candles. + :param columns: List of dataframe columns to return. Empty list will return OHLCV. :return: json object """ params = { @@ -410,7 +404,12 @@ class FtRestClient: "timeframe": timeframe, } if limit: - params['limit'] = limit + params["limit"] = limit + + if columns is not None: + params["columns"] = columns + return self._post("pair_candles", data=params) + return self._get("pair_candles", params=params) def pair_history(self, pair, timeframe, strategy, timerange=None, freqaimodel=None): @@ -423,13 +422,16 @@ class FtRestClient: :param timerange: Timerange to get data for (same format than --timerange endpoints) :return: json object """ - return self._get("pair_history", params={ - "pair": pair, - "timeframe": timeframe, - "strategy": strategy, - "freqaimodel": freqaimodel, - "timerange": timerange if timerange else '', - }) + return self._get( + "pair_history", + params={ + "pair": pair, + "timeframe": timeframe, + "strategy": strategy, + "freqaimodel": freqaimodel, + "timerange": timerange if timerange else "", + }, + ) def sysinfo(self): """Provides system information (CPU, RAM usage) diff --git a/ft_client/requirements.txt b/ft_client/requirements.txt index 56def4059..36cd79635 100644 --- a/ft_client/requirements.txt +++ b/ft_client/requirements.txt @@ -1,3 +1,3 @@ # Requirements for freqtrade client library -requests==2.31.0 -python-rapidjson==1.16 +requests==2.32.2 +python-rapidjson==1.17 diff --git a/ft_client/test_client/test_rest_client.py b/ft_client/test_client/test_rest_client.py index 13e32f1c5..08ccee765 100644 --- a/ft_client/test_client/test_rest_client.py +++ b/ft_client/test_client/test_rest_client.py @@ -2,9 +2,10 @@ import re from unittest.mock import MagicMock import pytest +from requests.exceptions import ConnectionError + from freqtrade_client import FtRestClient from freqtrade_client.ft_client import add_arguments, main_exec -from requests.exceptions import ConnectionError def log_has_re(line, logs): @@ -13,7 +14,7 @@ def log_has_re(line, logs): def get_rest_client(): - client = FtRestClient('http://localhost:8080', 'freqtrader', 'password') + client = FtRestClient("http://localhost:8080", "freqtrader", "password") client._session = MagicMock() request_mock = MagicMock() client._session.request = request_mock @@ -21,93 +22,96 @@ def get_rest_client(): def test_FtRestClient_init(): - client = FtRestClient('http://localhost:8080', 'freqtrader', 'password') + client = FtRestClient("http://localhost:8080", "freqtrader", "password") assert client is not None - assert client._serverurl == 'http://localhost:8080' + assert client._serverurl == "http://localhost:8080" assert client._session is not None assert client._session.auth is not None - assert client._session.auth == ('freqtrader', 'password') + assert client._session.auth == ("freqtrader", "password") -@pytest.mark.parametrize('method', ['GET', 'POST', 'DELETE']) +@pytest.mark.parametrize("method", ["GET", "POST", "DELETE"]) def test_FtRestClient_call(method): client, mock = get_rest_client() - client._call(method, '/dummytest') + client._call(method, "/dummytest") assert mock.call_count == 1 - getattr(client, f"_{method.lower()}")('/dummytest') + getattr(client, f"_{method.lower()}")("/dummytest") assert mock.call_count == 2 def test_FtRestClient_call_invalid(caplog): client, _ = get_rest_client() with pytest.raises(ValueError): - client._call('PUTTY', '/dummytest') + client._call("PUTTY", "/dummytest") client._session.request = MagicMock(side_effect=ConnectionError()) - client._call('GET', '/dummytest') + client._call("GET", "/dummytest") - assert log_has_re('Connection error', caplog) + assert log_has_re("Connection error", caplog) -@pytest.mark.parametrize('method,args', [ - ('start', []), - ('stop', []), - ('stopbuy', []), - ('reload_config', []), - ('balance', []), - ('count', []), - ('entries', []), - ('exits', []), - ('mix_tags', []), - ('locks', []), - ('lock_add', ["XRP/USDT", '2024-01-01 20:00:00Z', '*', 'rand']), - ('delete_lock', [2]), - ('daily', []), - ('daily', [15]), - ('weekly', []), - ('weekly', [15]), - ('monthly', []), - ('monthly', [12]), - ('edge', []), - ('profit', []), - ('stats', []), - ('performance', []), - ('status', []), - ('version', []), - ('show_config', []), - ('ping', []), - ('logs', []), - ('logs', [55]), - ('trades', []), - ('trades', [5]), - ('trades', [5, 5]), # With offset - ('trade', [1]), - ('delete_trade', [1]), - ('cancel_open_order', [1]), - ('whitelist', []), - ('blacklist', []), - ('blacklist', ['XRP/USDT']), - ('blacklist', ['XRP/USDT', 'BTC/USDT']), - ('forcebuy', ['XRP/USDT']), - ('forcebuy', ['XRP/USDT', 1.5]), - ('forceenter', ['XRP/USDT', 'short']), - ('forceenter', ['XRP/USDT', 'short', 1.5]), - ('forceexit', [1]), - ('forceexit', [1, 'limit']), - ('forceexit', [1, 'limit', 100]), - ('strategies', []), - ('strategy', ['sampleStrategy']), - ('pairlists_available', []), - ('plot_config', []), - ('available_pairs', []), - ('available_pairs', ['5m']), - ('pair_candles', ['XRP/USDT', '5m']), - ('pair_candles', ['XRP/USDT', '5m', 500]), - ('pair_history', ['XRP/USDT', '5m', 'SampleStrategy']), - ('sysinfo', []), - ('health', []), -]) +@pytest.mark.parametrize( + "method,args", + [ + ("start", []), + ("stop", []), + ("stopbuy", []), + ("reload_config", []), + ("balance", []), + ("count", []), + ("entries", []), + ("exits", []), + ("mix_tags", []), + ("locks", []), + ("lock_add", ["XRP/USDT", "2024-01-01 20:00:00Z", "*", "rand"]), + ("delete_lock", [2]), + ("daily", []), + ("daily", [15]), + ("weekly", []), + ("weekly", [15]), + ("monthly", []), + ("monthly", [12]), + ("edge", []), + ("profit", []), + ("stats", []), + ("performance", []), + ("status", []), + ("version", []), + ("show_config", []), + ("ping", []), + ("logs", []), + ("logs", [55]), + ("trades", []), + ("trades", [5]), + ("trades", [5, 5]), # With offset + ("trade", [1]), + ("delete_trade", [1]), + ("cancel_open_order", [1]), + ("whitelist", []), + ("blacklist", []), + ("blacklist", ["XRP/USDT"]), + ("blacklist", ["XRP/USDT", "BTC/USDT"]), + ("forcebuy", ["XRP/USDT"]), + ("forcebuy", ["XRP/USDT", 1.5]), + ("forceenter", ["XRP/USDT", "short"]), + ("forceenter", ["XRP/USDT", "short", 1.5]), + ("forceexit", [1]), + ("forceexit", [1, "limit"]), + ("forceexit", [1, "limit", 100]), + ("strategies", []), + ("strategy", ["sampleStrategy"]), + ("pairlists_available", []), + ("plot_config", []), + ("available_pairs", []), + ("available_pairs", ["5m"]), + ("pair_candles", ["XRP/USDT", "5m"]), + ("pair_candles", ["XRP/USDT", "5m", 500]), + ("pair_history", ["XRP/USDT", "5m", "SampleStrategy"]), + ("sysinfo", []), + ("health", []), + ], +) def test_FtRestClient_call_explicit_methods(method, args): client, mock = get_rest_client() exec = getattr(client, method) @@ -117,37 +121,30 @@ def test_FtRestClient_call_explicit_methods(method, args): def test_ft_client(mocker, capsys, caplog): with pytest.raises(SystemExit): - args = add_arguments(['-V']) + args = add_arguments(["-V"]) - args = add_arguments(['--show']) + args = add_arguments(["--show"]) assert isinstance(args, dict) - assert args['show'] is True + assert args["show"] is True with pytest.raises(SystemExit): main_exec(args) captured = capsys.readouterr() - assert 'Possible commands' in captured.out + assert "Possible commands" in captured.out - mock = mocker.patch('freqtrade_client.ft_client.FtRestClient._call') - args = add_arguments([ - '--config', - 'tests/testdata/testconfigs/main_test_config.json', - 'ping' - ]) + mock = mocker.patch("freqtrade_client.ft_client.FtRestClient._call") + args = add_arguments(["--config", "tests/testdata/testconfigs/main_test_config.json", "ping"]) main_exec(args) captured = capsys.readouterr() assert mock.call_count == 1 with pytest.raises(SystemExit): - args = add_arguments(['--config', 'tests/testdata/testconfigs/nonexisting.json']) + args = add_arguments(["--config", "tests/testdata/testconfigs/nonexisting.json"]) main_exec(args) - assert log_has_re(r'Could not load config file .*nonexisting\.json\.', - caplog) + assert log_has_re(r"Could not load config file .*nonexisting\.json\.", caplog) - args = add_arguments([ - '--config', - 'tests/testdata/testconfigs/main_test_config.json', - 'whatever' - ]) + args = add_arguments( + ["--config", "tests/testdata/testconfigs/main_test_config.json", "whatever"] + ) main_exec(args) - assert log_has_re('Command whatever not defined', caplog) + assert log_has_re("Command whatever not defined", caplog) diff --git a/mkdocs.yml b/mkdocs.yml index 0a1b65f03..ef8b7181b 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,5 @@ site_name: Freqtrade -site_url: https://www.freqtrade.io/en/latest/ +site_url: !ENV [READTHEDOCS_CANONICAL_URL, 'https://www.freqtrade.io/en/latest/'] repo_url: https://github.com/freqtrade/freqtrade edit_uri: edit/develop/docs/ use_directory_urls: True @@ -18,7 +18,8 @@ nav: - Start the bot: bot-usage.md - Control the bot: - Telegram: telegram-usage.md - - REST API & FreqUI: rest-api.md + - freqUI: freq-ui.md + - REST API: rest-api.md - Web Hook: webhook-config.md - Data Downloading: data-download.md - Backtesting: backtesting.md @@ -47,9 +48,9 @@ nav: - Advanced Strategy: strategy-advanced.md - Advanced Hyperopt: advanced-hyperopt.md - Producer/Consumer mode: producer-consumer.md + - SQL Cheat-sheet: sql_cheatsheet.md - Edge Positioning: edge.md - FAQ: faq.md - - SQL Cheat-sheet: sql_cheatsheet.md - Strategy migration: strategy_migration.md - Updating Freqtrade: updating.md - Deprecated Features: deprecated.md diff --git a/pyproject.toml b/pyproject.toml index 5e02079a2..e96768c5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,9 +75,11 @@ exclude = ''' [tool.isort] line_length = 100 -multi_line_output=0 +profile = "black" +# multi_line_output=3 lines_after_imports=2 skip_glob = ["**/.env*", "**/env/*", "**/.venv/*", "**/docs/*", "**/user_data/*"] +known_first_party = ["freqtrade_client"] [tool.pytest.ini_options] asyncio_mode = "auto" @@ -128,6 +130,7 @@ extend-select = [ "E", # pycodestyle "W", # pycodestyle "UP", # pyupgrade + "I", # isort "TID", # flake8-tidy-imports # "EXE", # flake8-executable # "C4", # flake8-comprehensions @@ -137,6 +140,7 @@ extend-select = [ # "RSE", # flake8-raise # "TCH", # flake8-type-checking "PTH", # flake8-use-pathlib + # "RUF", # ruff ] extend-ignore = [ @@ -156,10 +160,14 @@ max-complexity = 12 # Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`. extend-immutable-calls = ["fastapi.Depends", "fastapi.Query"] +[tool.ruff.lint.isort] +lines-after-imports = 2 +known-first-party = ["freqtrade_client"] + [tool.flake8] # Default from https://flake8.pycqa.org/en/latest/user/options.html#cmdoption-flake8-ignore # minus E226 -ignore = ["E121","E123","E126","E24","E704","W503","W504"] +ignore = ["E121","E123","E126","E24", "E203","E704","W503","W504"] max-line-length = 100 max-complexity = 12 exclude = [ @@ -173,4 +181,4 @@ exclude = [ [tool.codespell] ignore-words-list = "coo,fo,strat,zar,selectin" -skip="*.svg,./user_data,./freqtrade/rpc/api_server/ui/installed" +skip="*.svg,./user_data,freqtrade/rpc/api_server/ui/installed,freqtrade/exchange/*.json" diff --git a/requirements-dev.txt b/requirements-dev.txt index ee6715225..c9d561b40 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,26 +6,26 @@ -r requirements-freqai-rl.txt -r docs/requirements-docs.txt -coveralls==3.3.1 -ruff==0.4.1 -mypy==1.9.0 -pre-commit==3.7.0 -pytest==8.1.1 -pytest-asyncio==0.23.6 +coveralls==4.0.1 +ruff==0.4.5 +mypy==1.10.0 +pre-commit==3.7.1 +pytest==8.2.1 +pytest-asyncio==0.23.7 pytest-cov==5.0.0 pytest-mock==3.14.0 pytest-random-order==1.1.1 -pytest-xdist==3.5.0 +pytest-xdist==3.6.1 isort==5.13.2 # For datetime mocking time-machine==2.14.1 # Convert jupyter notebooks to markdown documents -nbconvert==7.16.3 +nbconvert==7.16.4 # mypy types types-cachetools==5.3.0.7 types-filelock==3.2.7 -types-requests==2.31.0.20240406 +types-requests==2.32.0.20240523 types-tabulate==0.9.0.20240106 types-python-dateutil==2.9.0.20240316 diff --git a/requirements-freqai-rl.txt b/requirements-freqai-rl.txt index b949bc56b..546c0b62a 100644 --- a/requirements-freqai-rl.txt +++ b/requirements-freqai-rl.txt @@ -4,7 +4,7 @@ # Required for freqai-rl torch==2.2.2 gymnasium==0.29.1 -stable_baselines3==2.3.0 +stable_baselines3==2.3.2 sb3_contrib>=2.2.1 # Progress bar for stable-baselines3 and sb3-contrib -tqdm==4.66.2 +tqdm==4.66.4 diff --git a/requirements-freqai.txt b/requirements-freqai.txt index 6bab60ddc..cdc32e11b 100644 --- a/requirements-freqai.txt +++ b/requirements-freqai.txt @@ -3,8 +3,8 @@ -r requirements-plot.txt # Required for freqai -scikit-learn==1.4.2 -joblib==1.4.0 +scikit-learn==1.5.0 +joblib==1.4.2 catboost==1.2.5; 'arm' not in platform_machine lightgbm==4.3.0 xgboost==2.0.3 diff --git a/requirements-hyperopt.txt b/requirements-hyperopt.txt index 7ab39a42d..99d4ee5c6 100644 --- a/requirements-hyperopt.txt +++ b/requirements-hyperopt.txt @@ -2,7 +2,7 @@ -r requirements.txt # Required for hyperopt -scipy==1.13.0 -scikit-learn==1.4.2 +scipy==1.13.1 +scikit-learn==1.5.0 ft-scikit-optimize==0.9.2 -filelock==3.13.4 +filelock==3.14.0 diff --git a/requirements-plot.txt b/requirements-plot.txt index 2ef738372..b4dc2e46c 100644 --- a/requirements-plot.txt +++ b/requirements-plot.txt @@ -1,4 +1,4 @@ # Include all requirements to run the bot. -r requirements.txt -plotly==5.21.0 +plotly==5.22.0 diff --git a/requirements.txt b/requirements.txt index 303e2405e..8c31cc80e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,42 +2,42 @@ numpy==1.26.4 pandas==2.2.2 pandas-ta==0.3.14b -ccxt==4.3.4 -cryptography==42.0.5 +ccxt==4.3.35 +cryptography==42.0.7 aiohttp==3.9.5 -SQLAlchemy==2.0.29 -python-telegram-bot==21.1.1 +SQLAlchemy==2.0.30 +python-telegram-bot==21.2 # can't be hard-pinned due to telegram-bot pinning httpx with ~ httpx>=0.24.1 humanize==4.9.0 cachetools==5.3.3 -requests==2.31.0 +requests==2.32.2 urllib3==2.2.1 -jsonschema==4.21.1 -TA-Lib==0.4.28 +jsonschema==4.22.0 +TA-Lib==0.4.29 technical==1.4.3 tabulate==0.9.0 pycoingecko==3.1.0 -jinja2==3.1.3 +jinja2==3.1.4 tables==3.9.1 -joblib==1.4.0 +joblib==1.4.2 rich==13.7.1 -pyarrow==16.0.0; platform_machine != 'armv7l' +pyarrow==16.1.0; platform_machine != 'armv7l' # find first, C search in arrays py_find_1st==1.1.6 # Load ticker files 30% faster -python-rapidjson==1.16 +python-rapidjson==1.17 # Properly format api responses -orjson==3.10.1 +orjson==3.10.3 # Notify systemd sdnotify==0.3.2 # API Server -fastapi==0.110.2 -pydantic==2.7.0 +fastapi==0.111.0 +pydantic==2.7.1 uvicorn==0.29.0 pyjwt==2.8.0 aiofiles==23.2.1 @@ -53,7 +53,7 @@ python-dateutil==2.9.0.post0 pytz==2024.1 #Futures -schedule==1.2.1 +schedule==1.2.2 #WS Messages websockets==12.0 diff --git a/scripts/rest_client.py b/scripts/rest_client.py index c22dd18ae..88862b044 100755 --- a/scripts/rest_client.py +++ b/scripts/rest_client.py @@ -10,5 +10,5 @@ so it can be used as a standalone script. from freqtrade_client.ft_client import main -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/scripts/ws_client.py b/scripts/ws_client.py index 818426da2..ec6df5742 100755 --- a/scripts/ws_client.py +++ b/scripts/ws_client.py @@ -6,6 +6,7 @@ a Freqtrade bot's message websocket Should not import anything from freqtrade, so it can be used as a standalone script. """ + import argparse import asyncio import logging @@ -25,35 +26,33 @@ logger = logging.getLogger("WebSocketClient") # --------------------------------------------------------------------------- + def setup_logging(filename: str): logging.basicConfig( level=logging.DEBUG, - format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', - handlers=[ - logging.FileHandler(filename), - logging.StreamHandler() - ] + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[logging.FileHandler(filename), logging.StreamHandler()], ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( - '-c', - '--config', - help='Specify configuration file (default: %(default)s). ', - dest='config', + "-c", + "--config", + help="Specify configuration file (default: %(default)s). ", + dest="config", type=str, - metavar='PATH', - default='config.json' + metavar="PATH", + default="config.json", ) parser.add_argument( - '-l', - '--logfile', - help='The filename to log to.', - dest='logfile', + "-l", + "--logfile", + help="The filename to log to.", + dest="logfile", type=str, - default='ws_client.log' + default="ws_client.log", ) args = parser.parse_args() @@ -64,8 +63,9 @@ def load_config(configfile): file = Path(configfile) if file.is_file(): with file.open("r") as f: - config = rapidjson.load(f, parse_mode=rapidjson.PM_COMMENTS | - rapidjson.PM_TRAILING_COMMAS) + config = rapidjson.load( + f, parse_mode=rapidjson.PM_COMMENTS | rapidjson.PM_TRAILING_COMMAS + ) return config else: logger.warning(f"Could not load config file {file}.") @@ -84,6 +84,7 @@ def readable_timedelta(delta): return f"{int(minutes)}:{int(seconds)}.{int(milliseconds)}" + # ---------------------------------------------------------------------------- @@ -100,16 +101,17 @@ def json_deserialize(message): Deserialize JSON to a dict :param message: The message to deserialize """ + def json_to_dataframe(data: str) -> pandas.DataFrame: - dataframe = pandas.read_json(data, orient='split') - if 'date' in dataframe.columns: - dataframe['date'] = pandas.to_datetime(dataframe['date'], unit='ms', utc=True) + dataframe = pandas.read_json(data, orient="split") + if "date" in dataframe.columns: + dataframe["date"] = pandas.to_datetime(dataframe["date"], unit="ms", utc=True) return dataframe def _json_object_hook(z): - if z.get('__type__') == 'dataframe': - return json_to_dataframe(z.get('__value__')) + if z.get("__type__") == "dataframe": + return json_to_dataframe(z.get("__value__")) return z return rapidjson.loads(message, object_hook=_json_object_hook) @@ -128,16 +130,13 @@ class ClientProtocol: initial_requests = [ { "type": "subscribe", # The subscribe request should always be first - "data": ["analyzed_df", "whitelist"] # The message types we want + "data": ["analyzed_df", "whitelist"], # The message types we want }, { "type": "whitelist", "data": None, }, - { - "type": "analyzed_df", - "data": {"limit": 1500} - } + {"type": "analyzed_df", "data": {"limit": 1500}}, ] for request in initial_requests: @@ -147,8 +146,8 @@ class ClientProtocol: deserialized = json_deserialize(message) message_size = sys.getsizeof(message) - message_type = deserialized.get('type') - message_data = deserialized.get('data') + message_type = deserialized.get("type") + message_data = deserialized.get("data") self.logger.info( f"Received message of type {message_type} [{message_size} bytes] @ [{name}]" @@ -177,7 +176,7 @@ class ClientProtocol: self.logger.info(data) async def _handle_analyzed_df(self, name, type, data): - key, la, df = data['key'], data['la'], data['df'] + key, la, df = data["key"], data["la"], data["df"] if not df.empty: columns = ", ".join([str(column) for column in df.columns]) @@ -196,16 +195,16 @@ class ClientProtocol: async def create_client( - host, - port, - token, - scheme='ws', - name='default', - protocol=None, - sleep_time=10, - ping_timeout=10, - wait_timeout=30, - **kwargs + host, + port, + token, + scheme="ws", + name="default", + protocol=None, + sleep_time=10, + ping_timeout=10, + wait_timeout=30, + **kwargs, ): """ Create a websocket client and listen for messages @@ -231,21 +230,15 @@ async def create_client( # Now listen for messages while 1: try: - message = await asyncio.wait_for( - ws.recv(), - timeout=wait_timeout - ) + message = await asyncio.wait_for(ws.recv(), timeout=wait_timeout) await protocol.on_message(ws, name, message) - except ( - asyncio.TimeoutError, - websockets.exceptions.WebSocketException - ): + except (asyncio.TimeoutError, websockets.exceptions.WebSocketException): # Try pinging try: pong = await ws.ping() - latency = (await asyncio.wait_for(pong, timeout=ping_timeout) * 1000) + latency = await asyncio.wait_for(pong, timeout=ping_timeout) * 1000 logger.info(f"Connection still alive, latency: {latency}ms") @@ -261,7 +254,7 @@ async def create_client( socket.gaierror, ConnectionRefusedError, websockets.exceptions.InvalidStatusCode, - websockets.exceptions.InvalidMessage + websockets.exceptions.InvalidMessage, ) as e: logger.error(f"Connection Refused - {e} retrying in {sleep_time}s") await asyncio.sleep(sleep_time) @@ -270,7 +263,7 @@ async def create_client( except ( websockets.exceptions.ConnectionClosedError, - websockets.exceptions.ConnectionClosedOK + websockets.exceptions.ConnectionClosedOK, ): logger.info("Connection was closed") # Just keep trying to connect again indefinitely @@ -291,30 +284,30 @@ async def create_client( async def _main(args): - setup_logging(args['logfile']) - config = load_config(args['config']) + setup_logging(args["logfile"]) + config = load_config(args["config"]) - emc_config = config.get('external_message_consumer', {}) + emc_config = config.get("external_message_consumer", {}) - producers = emc_config.get('producers', []) + producers = emc_config.get("producers", []) producer = producers[0] - wait_timeout = emc_config.get('wait_timeout', 30) - ping_timeout = emc_config.get('ping_timeout', 10) - sleep_time = emc_config.get('sleep_time', 10) - message_size_limit = (emc_config.get('message_size_limit', 8) << 20) + wait_timeout = emc_config.get("wait_timeout", 30) + ping_timeout = emc_config.get("ping_timeout", 10) + sleep_time = emc_config.get("sleep_time", 10) + message_size_limit = emc_config.get("message_size_limit", 8) << 20 await create_client( - producer['host'], - producer['port'], - producer['ws_token'], - 'wss' if producer.get('secure', False) else 'ws', - producer['name'], + producer["host"], + producer["port"], + producer["ws_token"], + "wss" if producer.get("secure", False) else "ws", + producer["name"], sleep_time=sleep_time, ping_timeout=ping_timeout, wait_timeout=wait_timeout, max_size=message_size_limit, - ping_interval=None + ping_interval=None, ) diff --git a/setup.py b/setup.py index 504d3b2b7..8865f46be 100644 --- a/setup.py +++ b/setup.py @@ -2,127 +2,126 @@ from setuptools import setup # Requirements used for submodules -plot = ['plotly>=4.0'] +plot = ["plotly>=4.0"] hyperopt = [ - 'scipy', - 'scikit-learn', - 'ft-scikit-optimize>=0.9.2', - 'filelock', + "scipy", + "scikit-learn", + "ft-scikit-optimize>=0.9.2", + "filelock", ] freqai = [ - 'scikit-learn', - 'joblib', + "scikit-learn", + "joblib", 'catboost; platform_machine != "aarch64"', - 'lightgbm', - 'xgboost', - 'tensorboard', - 'datasieve>=0.1.5' + "lightgbm", + "xgboost", + "tensorboard", + "datasieve>=0.1.5", ] freqai_rl = [ - 'torch', - 'gymnasium', - 'stable-baselines3', - 'sb3-contrib', - 'tqdm' + "torch", + "gymnasium", + "stable-baselines3", + "sb3-contrib", + "tqdm", ] hdf5 = [ - 'tables', - 'blosc', + "tables", + "blosc", ] develop = [ - 'coveralls', - 'isort', - 'mypy', - 'pre-commit', - 'pytest-asyncio', - 'pytest-cov', - 'pytest-mock', - 'pytest-random-order', - 'pytest', - 'ruff', - 'time-machine', - 'types-cachetools', - 'types-filelock', - 'types-python-dateutil' - 'types-requests', - 'types-tabulate', + "coveralls", + "isort", + "mypy", + "pre-commit", + "pytest-asyncio", + "pytest-cov", + "pytest-mock", + "pytest-random-order", + "pytest", + "ruff", + "time-machine", + "types-cachetools", + "types-filelock", + "types-python-dateutil" "types-requests", + "types-tabulate", ] jupyter = [ - 'jupyter', - 'nbstripout', - 'ipykernel', - 'nbconvert', + "jupyter", + "nbstripout", + "ipykernel", + "nbconvert", ] all_extra = plot + develop + jupyter + hyperopt + hdf5 + freqai + freqai_rl setup( tests_require=[ - 'pytest', - 'pytest-asyncio', - 'pytest-cov', - 'pytest-mock', + "pytest", + "pytest-asyncio", + "pytest-cov", + "pytest-mock", ], install_requires=[ # from requirements.txt - 'ccxt>=4.2.47', - 'SQLAlchemy>=2.0.6', - 'python-telegram-bot>=20.1', - 'humanize>=4.0.0', - 'cachetools', - 'requests', - 'httpx>=0.24.1', - 'urllib3', - 'jsonschema', - 'numpy', - 'pandas>=2.2.0,<3.0', - 'TA-Lib', - 'pandas-ta', - 'technical', - 'tabulate', - 'pycoingecko', - 'py_find_1st', - 'python-rapidjson', - 'orjson', - 'colorama', - 'jinja2', - 'questionary', - 'prompt-toolkit', - 'joblib>=1.2.0', - 'rich', + "ccxt>=4.3.24", + "SQLAlchemy>=2.0.6", + "python-telegram-bot>=20.1", + "humanize>=4.0.0", + "cachetools", + "requests", + "httpx>=0.24.1", + "urllib3", + "jsonschema", + "numpy", + "pandas>=2.2.0,<3.0", + "TA-Lib", + "pandas-ta", + "technical", + "tabulate", + "pycoingecko", + "py_find_1st", + "python-rapidjson", + "orjson", + "colorama", + "jinja2", + "questionary", + "prompt-toolkit", + "joblib>=1.2.0", + "rich", 'pyarrow; platform_machine != "armv7l"', - 'fastapi', - 'pydantic>=2.2.0', - 'pyjwt', - 'websockets', - 'uvicorn', - 'psutil', - 'schedule', - 'janus', - 'ast-comments', - 'aiofiles', - 'aiohttp', - 'cryptography', - 'sdnotify', - 'python-dateutil', - 'pytz', - 'packaging', - 'freqtrade-client', + "fastapi", + "pydantic>=2.2.0", + "pyjwt", + "websockets", + "uvicorn", + "psutil", + "schedule", + "janus", + "ast-comments", + "aiofiles", + "aiohttp", + "cryptography", + "sdnotify", + "python-dateutil", + "pytz", + "packaging", + "freqtrade-client", ], extras_require={ - 'dev': all_extra, - 'plot': plot, - 'jupyter': jupyter, - 'hyperopt': hyperopt, - 'hdf5': hdf5, - 'freqai': freqai, - 'freqai_rl': freqai_rl, - 'all': all_extra, + "dev": all_extra, + "plot": plot, + "jupyter": jupyter, + "hyperopt": hyperopt, + "hdf5": hdf5, + "freqai": freqai, + "freqai_rl": freqai_rl, + "all": all_extra, }, url="https://github.com/freqtrade/freqtrade", ) diff --git a/setup.sh b/setup.sh index 68374a689..18f7682d8 100755 --- a/setup.sh +++ b/setup.sh @@ -25,7 +25,7 @@ function check_installed_python() { exit 2 fi - for v in 11 10 9 + for v in 12 11 10 9 do PYTHON="python3.${v}" which $PYTHON @@ -277,7 +277,7 @@ function install() { install_redhat else echo "This script does not support your OS." - echo "If you have Python version 3.9 - 3.11, pip, virtualenv, ta-lib you can continue." + echo "If you have Python version 3.9 - 3.12, pip, virtualenv, ta-lib you can continue." echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell." sleep 10 fi diff --git a/tests/commands/test_build_config.py b/tests/commands/test_build_config.py index f799be3ba..5d287a35f 100644 --- a/tests/commands/test_build_config.py +++ b/tests/commands/test_build_config.py @@ -4,85 +4,84 @@ from unittest.mock import MagicMock import pytest import rapidjson -from freqtrade.commands.build_config_commands import (ask_user_config, ask_user_overwrite, - start_new_config, validate_is_float, - validate_is_int) +from freqtrade.commands.build_config_commands import ( + ask_user_config, + ask_user_overwrite, + start_new_config, + validate_is_float, + validate_is_int, +) from freqtrade.exceptions import OperationalException from tests.conftest import get_args, log_has_re def test_validate_is_float(): - assert validate_is_float('2.0') - assert validate_is_float('2.1') - assert validate_is_float('0.1') - assert validate_is_float('-0.5') - assert not validate_is_float('-0.5e') + assert validate_is_float("2.0") + assert validate_is_float("2.1") + assert validate_is_float("0.1") + assert validate_is_float("-0.5") + assert not validate_is_float("-0.5e") def test_validate_is_int(): - assert validate_is_int('2') - assert validate_is_int('6') - assert validate_is_int('-1') - assert validate_is_int('500') - assert not validate_is_int('2.0') - assert not validate_is_int('2.1') - assert not validate_is_int('-2.1') - assert not validate_is_int('-ee') + assert validate_is_int("2") + assert validate_is_int("6") + assert validate_is_int("-1") + assert validate_is_int("500") + assert not validate_is_int("2.0") + assert not validate_is_int("2.1") + assert not validate_is_int("-2.1") + assert not validate_is_int("-ee") -@pytest.mark.parametrize('exchange', ['bybit', 'binance', 'kraken']) +@pytest.mark.parametrize("exchange", ["bybit", "binance", "kraken"]) def test_start_new_config(mocker, caplog, exchange): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=True)) unlink_mock = mocker.patch.object(Path, "unlink", MagicMock()) - mocker.patch('freqtrade.commands.build_config_commands.ask_user_overwrite', return_value=True) + mocker.patch("freqtrade.commands.build_config_commands.ask_user_overwrite", return_value=True) sample_selections = { - 'max_open_trades': 3, - 'stake_currency': 'USDT', - 'stake_amount': 100, - 'fiat_display_currency': 'EUR', - 'timeframe': '15m', - 'dry_run': True, - 'trading_mode': 'spot', - 'margin_mode': '', - 'exchange_name': exchange, - 'exchange_key': 'sampleKey', - 'exchange_secret': 'Samplesecret', - 'telegram': False, - 'telegram_token': 'asdf1244', - 'telegram_chat_id': '1144444', - 'api_server': False, - 'api_server_listen_addr': '127.0.0.1', - 'api_server_username': 'freqtrader', - 'api_server_password': 'MoneyMachine', + "max_open_trades": 3, + "stake_currency": "USDT", + "stake_amount": 100, + "fiat_display_currency": "EUR", + "timeframe": "15m", + "dry_run": True, + "trading_mode": "spot", + "margin_mode": "", + "exchange_name": exchange, + "exchange_key": "sampleKey", + "exchange_secret": "Samplesecret", + "telegram": False, + "telegram_token": "asdf1244", + "telegram_chat_id": "1144444", + "api_server": False, + "api_server_listen_addr": "127.0.0.1", + "api_server_username": "freqtrader", + "api_server_password": "MoneyMachine", } - mocker.patch('freqtrade.commands.build_config_commands.ask_user_config', - return_value=sample_selections) - args = [ - "new-config", - "--config", - "coolconfig.json" - ] + mocker.patch( + "freqtrade.commands.build_config_commands.ask_user_config", return_value=sample_selections + ) + args = ["new-config", "--config", "coolconfig.json"] start_new_config(get_args(args)) assert log_has_re("Writing config to .*", caplog) assert wt_mock.call_count == 1 assert unlink_mock.call_count == 1 - result = rapidjson.loads(wt_mock.call_args_list[0][0][0], - parse_mode=rapidjson.PM_COMMENTS | rapidjson.PM_TRAILING_COMMAS) - assert result['exchange']['name'] == exchange - assert result['timeframe'] == '15m' + result = rapidjson.loads( + wt_mock.call_args_list[0][0][0], + parse_mode=rapidjson.PM_COMMENTS | rapidjson.PM_TRAILING_COMMAS, + ) + assert result["exchange"]["name"] == exchange + assert result["timeframe"] == "15m" def test_start_new_config_exists(mocker, caplog): mocker.patch.object(Path, "exists", MagicMock(return_value=True)) - mocker.patch('freqtrade.commands.build_config_commands.ask_user_overwrite', return_value=False) - args = [ - "new-config", - "--config", - "coolconfig.json" - ] + mocker.patch("freqtrade.commands.build_config_commands.ask_user_overwrite", return_value=False) + args = ["new-config", "--config", "coolconfig.json"] with pytest.raises(OperationalException, match=r"Configuration .* already exists\."): start_new_config(get_args(args)) @@ -91,15 +90,17 @@ def test_ask_user_overwrite(mocker): """ Once https://github.com/tmbo/questionary/issues/35 is implemented, improve this test. """ - prompt_mock = mocker.patch('freqtrade.commands.build_config_commands.prompt', - return_value={'overwrite': False}) - assert not ask_user_overwrite(Path('test.json')) + prompt_mock = mocker.patch( + "freqtrade.commands.build_config_commands.prompt", return_value={"overwrite": False} + ) + assert not ask_user_overwrite(Path("test.json")) assert prompt_mock.call_count == 1 prompt_mock.reset_mock() - prompt_mock = mocker.patch('freqtrade.commands.build_config_commands.prompt', - return_value={'overwrite': True}) - assert ask_user_overwrite(Path('test.json')) + prompt_mock = mocker.patch( + "freqtrade.commands.build_config_commands.prompt", return_value={"overwrite": True} + ) + assert ask_user_overwrite(Path("test.json")) assert prompt_mock.call_count == 1 @@ -107,14 +108,14 @@ def test_ask_user_config(mocker): """ Once https://github.com/tmbo/questionary/issues/35 is implemented, improve this test. """ - prompt_mock = mocker.patch('freqtrade.commands.build_config_commands.prompt', - return_value={'overwrite': False}) + prompt_mock = mocker.patch( + "freqtrade.commands.build_config_commands.prompt", return_value={"overwrite": False} + ) answers = ask_user_config() assert isinstance(answers, dict) assert prompt_mock.call_count == 1 - prompt_mock = mocker.patch('freqtrade.commands.build_config_commands.prompt', - return_value={}) + prompt_mock = mocker.patch("freqtrade.commands.build_config_commands.prompt", return_value={}) with pytest.raises(OperationalException, match=r"User interrupted interactive questions\."): ask_user_config() diff --git a/tests/commands/test_commands.py b/tests/commands/test_commands.py index 630950c81..c98c6302e 100644 --- a/tests/commands/test_commands.py +++ b/tests/commands/test_commands.py @@ -8,16 +8,35 @@ from zipfile import ZipFile import pytest -from freqtrade.commands import (start_backtesting_show, start_convert_data, start_convert_trades, - start_create_userdir, start_download_data, start_hyperopt_list, - start_hyperopt_show, start_install_ui, start_list_data, - start_list_exchanges, start_list_markets, start_list_strategies, - start_list_timeframes, start_new_strategy, start_show_config, - start_show_trades, start_strategy_update, start_test_pairlist, - start_trading, start_webserver) +from freqtrade.commands import ( + start_backtesting_show, + start_convert_data, + start_convert_trades, + start_create_userdir, + start_download_data, + start_hyperopt_list, + start_hyperopt_show, + start_install_ui, + start_list_data, + start_list_exchanges, + start_list_markets, + start_list_strategies, + start_list_timeframes, + start_new_strategy, + start_show_config, + start_show_trades, + start_strategy_update, + start_test_pairlist, + start_trading, + start_webserver, +) from freqtrade.commands.db_commands import start_convert_db -from freqtrade.commands.deploy_commands import (clean_ui_subdir, download_and_install_ui, - get_ui_download_url, read_ui_version) +from freqtrade.commands.deploy_commands import ( + clean_ui_subdir, + download_and_install_ui, + get_ui_download_url, + read_ui_version, +) from freqtrade.commands.list_commands import start_list_freqAI_models from freqtrade.configuration import setup_utils_configuration from freqtrade.enums import RunMode @@ -25,40 +44,49 @@ from freqtrade.exceptions import OperationalException from freqtrade.persistence.models import init_db from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.util import dt_floor_day, dt_now, dt_utc -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, create_mock_trades, get_args, log_has, - log_has_re, patch_exchange, patched_configuration_load_config_file) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + create_mock_trades, + get_args, + log_has, + log_has_re, + patch_exchange, + patched_configuration_load_config_file, +) +from tests.conftest_hyperopt import hyperopt_test_result from tests.conftest_trades import MOCK_TRADE_COUNT def test_setup_utils_configuration(): args = [ - 'list-exchanges', '--config', 'tests/testdata/testconfigs/main_test_config.json', + "list-exchanges", + "--config", + "tests/testdata/testconfigs/main_test_config.json", ] config = setup_utils_configuration(get_args(args), RunMode.OTHER) assert "exchange" in config - assert config['dry_run'] is True + assert config["dry_run"] is True args = [ - 'list-exchanges', '--config', 'tests/testdata/testconfigs/testconfig.json', + "list-exchanges", + "--config", + "tests/testdata/testconfigs/testconfig.json", ] config = setup_utils_configuration(get_args(args), RunMode.OTHER, set_dry=False) assert "exchange" in config - assert config['dry_run'] is False + assert config["dry_run"] is False def test_start_trading_fail(mocker, caplog): - mocker.patch("freqtrade.worker.Worker.run", MagicMock(side_effect=OperationalException)) mocker.patch("freqtrade.worker.Worker.__init__", MagicMock(return_value=None)) exitmock = mocker.patch("freqtrade.worker.Worker.exit", MagicMock()) - args = [ - 'trade', - '-c', 'tests/testdata/testconfigs/main_test_config.json' - ] + args = ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"] with pytest.raises(OperationalException): start_trading(get_args(args)) assert exitmock.call_count == 1 @@ -72,19 +100,16 @@ def test_start_trading_fail(mocker, caplog): def test_start_webserver(mocker, caplog): + api_server_mock = mocker.patch( + "freqtrade.rpc.api_server.ApiServer", + ) - api_server_mock = mocker.patch("freqtrade.rpc.api_server.ApiServer", ) - - args = [ - 'webserver', - '-c', 'tests/testdata/testconfigs/main_test_config.json' - ] + args = ["webserver", "-c", "tests/testdata/testconfigs/main_test_config.json"] start_webserver(get_args(args)) assert api_server_mock.call_count == 1 def test_list_exchanges(capsys): - args = [ "list-exchanges", ] @@ -134,72 +159,79 @@ def test_list_exchanges(capsys): def test_list_timeframes(mocker, capsys): - api_mock = MagicMock() - api_mock.timeframes = {'1m': 'oneMin', - '5m': 'fiveMin', - '30m': 'thirtyMin', - '1h': 'hour', - '1d': 'day', - } - patch_exchange(mocker, api_mock=api_mock, id='bybit') + api_mock.timeframes = { + "1m": "oneMin", + "5m": "fiveMin", + "30m": "thirtyMin", + "1h": "hour", + "1d": "day", + } + patch_exchange(mocker, api_mock=api_mock, id="bybit") args = [ "list-timeframes", ] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"This command requires a configured exchange.*"): + pargs["config"] = None + with pytest.raises( + OperationalException, match=r"This command requires a configured exchange.*" + ): start_list_timeframes(pargs) # Test with --config tests/testdata/testconfigs/main_test_config.json args = [ "list-timeframes", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "--config", + "tests/testdata/testconfigs/main_test_config.json", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() - assert re.match("Timeframes available for the exchange `Bybit`: " - "1m, 5m, 30m, 1h, 1d", - captured.out) + assert re.match( + "Timeframes available for the exchange `Bybit`: 1m, 5m, 30m, 1h, 1d", captured.out + ) # Test with --exchange bybit args = [ "list-timeframes", - "--exchange", "bybit", + "--exchange", + "bybit", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() - assert re.match("Timeframes available for the exchange `Bybit`: " - "1m, 5m, 30m, 1h, 1d", - captured.out) + assert re.match( + "Timeframes available for the exchange `Bybit`: 1m, 5m, 30m, 1h, 1d", captured.out + ) - api_mock.timeframes = {'1m': '1m', - '5m': '5m', - '15m': '15m', - '30m': '30m', - '1h': '1h', - '6h': '6h', - '12h': '12h', - '1d': '1d', - '3d': '3d', - } - patch_exchange(mocker, api_mock=api_mock, id='binance') + api_mock.timeframes = { + "1m": "1m", + "5m": "5m", + "15m": "15m", + "30m": "30m", + "1h": "1h", + "6h": "6h", + "12h": "12h", + "1d": "1d", + "3d": "3d", + } + patch_exchange(mocker, api_mock=api_mock, id="binance") # Test with --exchange binance args = [ "list-timeframes", - "--exchange", "binance", + "--exchange", + "binance", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() - assert re.match("Timeframes available for the exchange `Binance`: " - "1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d", - captured.out) + assert re.match( + "Timeframes available for the exchange `Binance`: 1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d", + captured.out, + ) # Test with --one-column args = [ "list-timeframes", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "--config", + "tests/testdata/testconfigs/main_test_config.json", "--one-column", ] start_list_timeframes(get_args(args)) @@ -212,7 +244,8 @@ def test_list_timeframes(mocker, capsys): # Test with --exchange binance --one-column args = [ "list-timeframes", - "--exchange", "binance", + "--exchange", + "binance", "--one-column", ] start_list_timeframes(get_args(args)) @@ -224,259 +257,311 @@ def test_list_timeframes(mocker, capsys): def test_list_markets(mocker, markets_static, capsys): - api_mock = MagicMock() - patch_exchange(mocker, api_mock=api_mock, id='binance', mock_markets=markets_static) + patch_exchange(mocker, api_mock=api_mock, id="binance", mock_markets=markets_static) # Test with no --config args = [ "list-markets", ] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"This command requires a configured exchange.*"): + pargs["config"] = None + with pytest.raises( + OperationalException, match=r"This command requires a configured exchange.*" + ): start_list_markets(pargs, False) # Test with --config tests/testdata/testconfigs/main_test_config.json args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "--config", + "tests/testdata/testconfigs/main_test_config.json", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 12 active markets: " - "ADA/USDT:USDT, BLK/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, " - "LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" - in captured.out) + assert ( + "Exchange Binance has 12 active markets: " + "ADA/USDT:USDT, BLK/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, " + "LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" in captured.out + ) patch_exchange(mocker, api_mock=api_mock, id="binance", mock_markets=markets_static) # Test with --exchange - args = [ - "list-markets", - "--exchange", "binance" - ] + args = ["list-markets", "--exchange", "binance"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_markets(pargs, False) captured = capsys.readouterr() - assert re.match("\nExchange Binance has 12 active markets:\n", - captured.out) + assert re.match("\nExchange Binance has 12 active markets:\n", captured.out) patch_exchange(mocker, api_mock=api_mock, id="binance", mock_markets=markets_static) # Test with --all: all markets args = [ - "list-markets", "--all", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "list-markets", + "--all", + "--config", + "tests/testdata/testconfigs/main_test_config.json", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 14 markets: " - "ADA/USDT:USDT, BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, " - "LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" - in captured.out) + assert ( + "Exchange Binance has 14 markets: " + "ADA/USDT:USDT, BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, " + "LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" + in captured.out + ) # Test list-pairs subcommand: active pairs args = [ "list-pairs", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "--config", + "tests/testdata/testconfigs/main_test_config.json", "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() - assert ("Exchange Binance has 9 active pairs: " - "BLK/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XRP/BTC.\n" - in captured.out) + assert ( + "Exchange Binance has 9 active pairs: " + "BLK/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XRP/BTC.\n" + in captured.out + ) # Test list-pairs subcommand with --all: all pairs args = [ - "list-pairs", "--all", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "list-pairs", + "--all", + "--config", + "tests/testdata/testconfigs/main_test_config.json", "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() - assert ("Exchange Binance has 11 pairs: " - "BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, " - "TKN/BTC, XRP/BTC.\n" - in captured.out) + assert ( + "Exchange Binance has 11 pairs: " + "BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, " + "TKN/BTC, XRP/BTC.\n" in captured.out + ) # active markets, base=ETH, LTC args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "ETH", "LTC", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "ETH", + "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 7 active markets with ETH, LTC as base currencies: " - "ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 7 active markets with ETH, LTC as base currencies: " + "ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out + ) # active markets, base=LTC args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 4 active markets with LTC as base currency: " - "LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 4 active markets with LTC as base currency: " + "LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out + ) # active markets, quote=USDT, USD args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--quote", "USDT", "USD", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--quote", + "USDT", + "USD", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 5 active markets with USDT, USD as quote currencies: " - "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, LTC/USD, XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 5 active markets with USDT, USD as quote currencies: " + "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, LTC/USD, XLTCUSDT.\n" in captured.out + ) # active markets, quote=USDT args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--quote", "USDT", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--quote", + "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 4 active markets with USDT as quote currency: " - "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 4 active markets with USDT as quote currency: " + "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, XLTCUSDT.\n" in captured.out + ) # active markets, base=LTC, quote=USDT args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", "--quote", "USDT", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", + "--quote", + "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 1 active market with LTC as base currency and " - "with USDT as quote currency: XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 1 active market with LTC as base currency and " + "with USDT as quote currency: XLTCUSDT.\n" in captured.out + ) # active pairs, base=LTC, quote=USDT args = [ "list-pairs", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", "--quote", "USD", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", + "--quote", + "USD", "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() - assert ("Exchange Binance has 1 active pair with LTC as base currency and " - "with USD as quote currency: LTC/USD.\n" - in captured.out) + assert ( + "Exchange Binance has 1 active pair with LTC as base currency and " + "with USD as quote currency: LTC/USD.\n" in captured.out + ) # active markets, base=LTC, quote=USDT, NONEXISTENT args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", "--quote", "USDT", "NONEXISTENT", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", + "--quote", + "USDT", + "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 1 active market with LTC as base currency and " - "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" - in captured.out) + assert ( + "Exchange Binance has 1 active market with LTC as base currency and " + "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" in captured.out + ) # active markets, base=LTC, quote=NONEXISTENT args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", "--quote", "NONEXISTENT", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", + "--quote", + "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 0 active markets with LTC as base currency and " - "with NONEXISTENT as quote currency.\n" - in captured.out) + assert ( + "Exchange Binance has 0 active markets with LTC as base currency and " + "with NONEXISTENT as quote currency.\n" in captured.out + ) # Test tabular output args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', + "--config", + "tests/testdata/testconfigs/main_test_config.json", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 12 active markets:\n" - in captured.out) + assert "Exchange Binance has 12 active markets:\n" in captured.out # Test tabular output, no markets found args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--base", "LTC", "--quote", "NONEXISTENT", + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--base", + "LTC", + "--quote", + "NONEXISTENT", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Exchange Binance has 0 active markets with LTC as base currency and " - "with NONEXISTENT as quote currency.\n" - in captured.out) + assert ( + "Exchange Binance has 0 active markets with LTC as base currency and " + "with NONEXISTENT as quote currency.\n" in captured.out + ) # Test --print-json args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--print-json" + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--print-json", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ('["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' - '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' - in captured.out) + assert ( + '["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' + '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' in captured.out + ) # Test --print-csv args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--print-csv" + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--print-csv", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() - assert ("Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out) - assert ("blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out) - assert ("USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out) + assert "Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out + assert "blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out + assert "USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out # Test --one-column args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--one-column" + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--one-column", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert re.search(r"^BLK/BTC$", captured.out, re.MULTILINE) assert re.search(r"^LTC/USD$", captured.out, re.MULTILINE) - mocker.patch(f'{EXMS}.markets', PropertyMock(side_effect=ValueError)) + mocker.patch(f"{EXMS}.markets", PropertyMock(side_effect=ValueError)) # Test --one-column args = [ "list-markets", - '--config', 'tests/testdata/testconfigs/main_test_config.json', - "--one-column" + "--config", + "tests/testdata/testconfigs/main_test_config.json", + "--one-column", ] with pytest.raises(OperationalException, match=r"Cannot get markets.*"): start_list_markets(get_args(args), False) def test_create_datadir_failed(caplog): - args = [ "create-userdir", ] @@ -486,14 +571,9 @@ def test_create_datadir_failed(caplog): def test_create_datadir(caplog, mocker): - cud = mocker.patch("freqtrade.commands.deploy_commands.create_userdata_dir", MagicMock()) csf = mocker.patch("freqtrade.commands.deploy_commands.copy_sample_files", MagicMock()) - args = [ - "create-userdir", - "--userdir", - "/temp/freqtrade/test" - ] + args = ["create-userdir", "--userdir", "/temp/freqtrade/test"] start_create_userdir(get_args(args)) assert cud.call_count == 1 @@ -504,21 +584,18 @@ def test_start_new_strategy(mocker, caplog): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=False)) - args = [ - "new-strategy", - "--strategy", - "CoolNewStrategy" - ] + args = ["new-strategy", "--strategy", "CoolNewStrategy"] start_new_strategy(get_args(args)) assert wt_mock.call_count == 1 assert "CoolNewStrategy" in wt_mock.call_args_list[0][0][0] assert log_has_re("Writing strategy to .*", caplog) - mocker.patch('freqtrade.commands.deploy_commands.setup_utils_configuration') + mocker.patch("freqtrade.commands.deploy_commands.setup_utils_configuration") mocker.patch.object(Path, "exists", MagicMock(return_value=True)) - with pytest.raises(OperationalException, - match=r".* already exists. Please choose another Strategy Name\."): + with pytest.raises( + OperationalException, match=r".* already exists. Please choose another Strategy Name\." + ): start_new_strategy(get_args(args)) @@ -526,17 +603,18 @@ def test_start_new_strategy_no_arg(mocker, caplog): args = [ "new-strategy", ] - with pytest.raises(OperationalException, - match="`new-strategy` requires --strategy to be set."): + with pytest.raises(OperationalException, match="`new-strategy` requires --strategy to be set."): start_new_strategy(get_args(args)) def test_start_install_ui(mocker): - clean_mock = mocker.patch('freqtrade.commands.deploy_commands.clean_ui_subdir') - get_url_mock = mocker.patch('freqtrade.commands.deploy_commands.get_ui_download_url', - return_value=('https://example.com/whatever', '0.0.1')) - download_mock = mocker.patch('freqtrade.commands.deploy_commands.download_and_install_ui') - mocker.patch('freqtrade.commands.deploy_commands.read_ui_version', return_value=None) + clean_mock = mocker.patch("freqtrade.commands.deploy_commands.clean_ui_subdir") + get_url_mock = mocker.patch( + "freqtrade.commands.deploy_commands.get_ui_download_url", + return_value=("https://example.com/whatever", "0.0.1"), + ) + download_mock = mocker.patch("freqtrade.commands.deploy_commands.download_and_install_ui") + mocker.patch("freqtrade.commands.deploy_commands.read_ui_version", return_value=None) args = [ "install-ui", ] @@ -560,15 +638,15 @@ def test_start_install_ui(mocker): def test_clean_ui_subdir(mocker, tmp_path, caplog): - mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir", - side_effect=[True, True]) - mocker.patch("freqtrade.commands.deploy_commands.Path.is_file", - side_effect=[False, True]) + mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir", side_effect=[True, True]) + mocker.patch("freqtrade.commands.deploy_commands.Path.is_file", side_effect=[False, True]) rd_mock = mocker.patch("freqtrade.commands.deploy_commands.Path.rmdir") ul_mock = mocker.patch("freqtrade.commands.deploy_commands.Path.unlink") - mocker.patch("freqtrade.commands.deploy_commands.Path.glob", - return_value=[Path('test1'), Path('test2'), Path('.gitkeep')]) + mocker.patch( + "freqtrade.commands.deploy_commands.Path.glob", + return_value=[Path("test1"), Path("test2"), Path(".gitkeep")], + ) folder = tmp_path / "uitests" clean_ui_subdir(folder) assert log_has("Removing UI directory content.", caplog) @@ -580,16 +658,15 @@ def test_download_and_install_ui(mocker, tmp_path): # Create zipfile requests_mock = MagicMock() file_like_object = BytesIO() - with ZipFile(file_like_object, mode='w') as zipfile: - for file in ('test1.txt', 'hello/', 'test2.txt'): + with ZipFile(file_like_object, mode="w") as zipfile: + for file in ("test1.txt", "hello/", "test2.txt"): zipfile.writestr(file, file) file_like_object.seek(0) requests_mock.content = file_like_object.read() mocker.patch("freqtrade.commands.deploy_commands.requests.get", return_value=requests_mock) - mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir", - side_effect=[True, False]) + mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir", side_effect=[True, False]) wb_mock = mocker.patch("freqtrade.commands.deploy_commands.Path.write_bytes") folder = tmp_path / "uitests_dl" @@ -597,24 +674,28 @@ def test_download_and_install_ui(mocker, tmp_path): assert read_ui_version(folder) is None - download_and_install_ui(folder, 'http://whatever.xxx/download/file.zip', '22') + download_and_install_ui(folder, "http://whatever.xxx/download/file.zip", "22") assert wb_mock.call_count == 2 - assert read_ui_version(folder) == '22' + assert read_ui_version(folder) == "22" def test_get_ui_download_url(mocker): response = MagicMock() response.json = MagicMock( - side_effect=[[{'assets_url': 'http://whatever.json', 'name': '0.0.1'}], - [{'browser_download_url': 'http://download.zip'}]]) - get_mock = mocker.patch("freqtrade.commands.deploy_commands.requests.get", - return_value=response) + side_effect=[ + [{"assets_url": "http://whatever.json", "name": "0.0.1"}], + [{"browser_download_url": "http://download.zip"}], + ] + ) + get_mock = mocker.patch( + "freqtrade.commands.deploy_commands.requests.get", return_value=response + ) x, last_version = get_ui_download_url() assert get_mock.call_count == 2 - assert last_version == '0.0.1' - assert x == 'http://download.zip' + assert last_version == "0.0.1" + assert x == "http://download.zip" def test_get_ui_download_url_direct(mocker): @@ -622,46 +703,53 @@ def test_get_ui_download_url_direct(mocker): response.json = MagicMock( return_value=[ { - 'assets_url': 'http://whatever.json', - 'name': '0.0.2', - 'assets': [{'browser_download_url': 'http://download22.zip'}] + "assets_url": "http://whatever.json", + "name": "0.0.2", + "assets": [{"browser_download_url": "http://download22.zip"}], }, { - 'assets_url': 'http://whatever.json', - 'name': '0.0.1', - 'assets': [{'browser_download_url': 'http://download1.zip'}] + "assets_url": "http://whatever.json", + "name": "0.0.1", + "assets": [{"browser_download_url": "http://download1.zip"}], }, - ]) - get_mock = mocker.patch("freqtrade.commands.deploy_commands.requests.get", - return_value=response) + ] + ) + get_mock = mocker.patch( + "freqtrade.commands.deploy_commands.requests.get", return_value=response + ) x, last_version = get_ui_download_url() assert get_mock.call_count == 1 - assert last_version == '0.0.2' - assert x == 'http://download22.zip' + assert last_version == "0.0.2" + assert x == "http://download22.zip" get_mock.reset_mock() response.json.reset_mock() - x, last_version = get_ui_download_url('0.0.1') - assert last_version == '0.0.1' - assert x == 'http://download1.zip' + x, last_version = get_ui_download_url("0.0.1") + assert last_version == "0.0.1" + assert x == "http://download1.zip" with pytest.raises(ValueError, match="UI-Version not found."): - x, last_version = get_ui_download_url('0.0.3') + x, last_version = get_ui_download_url("0.0.3") def test_download_data_keyboardInterrupt(mocker, markets): - dl_mock = mocker.patch('freqtrade.commands.data_commands.download_data_main', - MagicMock(side_effect=KeyboardInterrupt)) + dl_mock = mocker.patch( + "freqtrade.commands.data_commands.download_data_main", + MagicMock(side_effect=KeyboardInterrupt), + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) args = [ "download-data", - "--exchange", "binance", - "--pairs", "ETH/BTC", "XRP/BTC", + "--exchange", + "binance", + "--pairs", + "ETH/BTC", + "XRP/BTC", ] with pytest.raises(SystemExit): pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_download_data(pargs) @@ -669,120 +757,140 @@ def test_download_data_keyboardInterrupt(mocker, markets): def test_download_data_timerange(mocker, markets): - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - args = [ - "download-data", - "--exchange", "binance", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20", - "--timerange", "20200101-" - ] - with pytest.raises(OperationalException, - match=r"--days and --timerange are mutually.*"): - pargs = get_args(args) - pargs['config'] = None - start_download_data(pargs) - assert dl_mock.call_count == 0 - - args = [ - "download-data", - "--exchange", "binance", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20", - ] - pargs = get_args(args) - pargs['config'] = None - start_download_data(pargs) - assert dl_mock.call_count == 1 - # 20days ago - days_ago = dt_floor_day(dt_now() - timedelta(days=20)).timestamp() - assert dl_mock.call_args_list[0][1]['timerange'].startts == days_ago - - dl_mock.reset_mock() - args = [ - "download-data", - "--exchange", "binance", - "--pairs", "ETH/BTC", "XRP/BTC", - "--timerange", "20200101-" - ] - pargs = get_args(args) - pargs['config'] = None - start_download_data(pargs) - assert dl_mock.call_count == 1 - - assert dl_mock.call_args_list[0][1]['timerange'].startts == int(dt_utc(2020, 1, 1).timestamp()) - - -def test_download_data_no_markets(mocker, caplog): - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) - patch_exchange(mocker, id='binance') - mocker.patch(f'{EXMS}.get_markets', return_value={}) - args = [ - "download-data", - "--exchange", "binance", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20" - ] - start_download_data(get_args(args)) - assert dl_mock.call_args[1]['timerange'].starttype == "date" - assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog) - - -def test_download_data_no_exchange(mocker): - mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) - patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_markets', return_value={}) - args = [ - "download-data", - ] - pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"This command requires a configured exchange.*"): - start_download_data(pargs) - - -def test_download_data_no_pairs(mocker): - - mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) - patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) - args = [ - "download-data", - "--exchange", - "binance", - ] - pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"Downloading data requires a list of pairs\..*"): - start_download_data(pargs) - - -def test_download_data_all_pairs(mocker, markets): - - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) - patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) args = [ "download-data", "--exchange", "binance", "--pairs", - ".*/USDT" + "ETH/BTC", + "XRP/BTC", + "--days", + "20", + "--timerange", + "20200101-", + ] + with pytest.raises(OperationalException, match=r"--days and --timerange are mutually.*"): + pargs = get_args(args) + pargs["config"] = None + start_download_data(pargs) + assert dl_mock.call_count == 0 + + args = [ + "download-data", + "--exchange", + "binance", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--days", + "20", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_download_data(pargs) - expected = set(['BTC/USDT', 'ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) - assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + assert dl_mock.call_count == 1 + # 20days ago + days_ago = dt_floor_day(dt_now() - timedelta(days=20)).timestamp() + assert dl_mock.call_args_list[0][1]["timerange"].startts == days_ago + + dl_mock.reset_mock() + args = [ + "download-data", + "--exchange", + "binance", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--timerange", + "20200101-", + ] + pargs = get_args(args) + pargs["config"] = None + start_download_data(pargs) + assert dl_mock.call_count == 1 + + assert dl_mock.call_args_list[0][1]["timerange"].startts == int(dt_utc(2020, 1, 1).timestamp()) + + +def test_download_data_no_markets(mocker, caplog): + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) + patch_exchange(mocker, id="binance") + mocker.patch(f"{EXMS}.get_markets", return_value={}) + args = [ + "download-data", + "--exchange", + "binance", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--days", + "20", + ] + start_download_data(get_args(args)) + assert dl_mock.call_args[1]["timerange"].starttype == "date" + assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog) + + +def test_download_data_no_exchange(mocker): + mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) + patch_exchange(mocker) + mocker.patch(f"{EXMS}.get_markets", return_value={}) + args = [ + "download-data", + ] + pargs = get_args(args) + pargs["config"] = None + with pytest.raises( + OperationalException, match=r"This command requires a configured exchange.*" + ): + start_download_data(pargs) + + +def test_download_data_no_pairs(mocker): + mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) + patch_exchange(mocker) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value={})) + args = [ + "download-data", + "--exchange", + "binance", + ] + pargs = get_args(args) + pargs["config"] = None + with pytest.raises( + OperationalException, match=r"Downloading data requires a list of pairs\..*" + ): + start_download_data(pargs) + + +def test_download_data_all_pairs(mocker, markets): + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) + patch_exchange(mocker) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + args = ["download-data", "--exchange", "binance", "--pairs", ".*/USDT"] + pargs = get_args(args) + pargs["config"] = None + start_download_data(pargs) + expected = set(["BTC/USDT", "ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"]) + assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected assert dl_mock.call_count == 1 dl_mock.reset_mock() @@ -795,79 +903,98 @@ def test_download_data_all_pairs(mocker, markets): "--include-inactive-pairs", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_download_data(pargs) - expected = set(['BTC/USDT', 'ETH/USDT', 'LTC/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) - assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + expected = set(["BTC/USDT", "ETH/USDT", "LTC/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"]) + assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected def test_download_data_trades(mocker): - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_trades_data', - MagicMock(return_value=[])) - convert_mock = mocker.patch('freqtrade.data.history.history_utils.convert_trades_to_ohlcv', - MagicMock(return_value=[])) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_trades_data", + MagicMock(return_value=[]), + ) + convert_mock = mocker.patch( + "freqtrade.data.history.history_utils.convert_trades_to_ohlcv", MagicMock(return_value=[]) + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_markets', return_value={}) + mocker.patch(f"{EXMS}.get_markets", return_value={}) args = [ "download-data", - "--exchange", "kraken", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20", - "--dl-trades" + "--exchange", + "kraken", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--days", + "20", + "--dl-trades", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_download_data(pargs) - assert dl_mock.call_args[1]['timerange'].starttype == "date" + assert dl_mock.call_args[1]["timerange"].starttype == "date" assert dl_mock.call_count == 1 assert convert_mock.call_count == 1 args = [ "download-data", - "--exchange", "kraken", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20", - "--trading-mode", "futures", - "--dl-trades" + "--exchange", + "kraken", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--days", + "20", + "--trading-mode", + "futures", + "--dl-trades", ] def test_download_data_data_invalid(mocker): patch_exchange(mocker, id="kraken") - mocker.patch(f'{EXMS}.get_markets', return_value={}) + mocker.patch(f"{EXMS}.get_markets", return_value={}) args = [ "download-data", - "--exchange", "kraken", - "--pairs", "ETH/BTC", "XRP/BTC", - "--days", "20", + "--exchange", + "kraken", + "--pairs", + "ETH/BTC", + "XRP/BTC", + "--days", + "20", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None with pytest.raises(OperationalException, match=r"Historic klines not available for .*"): start_download_data(pargs) def test_start_convert_trades(mocker): - convert_mock = mocker.patch('freqtrade.commands.data_commands.convert_trades_to_ohlcv', - MagicMock(return_value=[])) + convert_mock = mocker.patch( + "freqtrade.commands.data_commands.convert_trades_to_ohlcv", MagicMock(return_value=[]) + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_markets') - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f"{EXMS}.get_markets") + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value={})) args = [ "trades-to-ohlcv", - "--exchange", "kraken", - "--pairs", "ETH/BTC", "XRP/BTC", + "--exchange", + "kraken", + "--pairs", + "ETH/BTC", + "XRP/BTC", ] start_convert_trades(get_args(args)) assert convert_mock.call_count == 1 def test_start_list_strategies(capsys): - args = [ "list-strategies", "--strategy-path", str(Path(__file__).parent.parent / "strategy" / "strats"), - "-1" + "-1", ] pargs = get_args(args) # pargs['config'] = None @@ -882,7 +1009,7 @@ def test_start_list_strategies(capsys): "list-strategies", "--strategy-path", str(Path(__file__).parent.parent / "strategy" / "strats"), - '--no-color', + "--no-color", ] pargs = get_args(args) # pargs['config'] = None @@ -914,8 +1041,8 @@ def test_start_list_strategies(capsys): "list-strategies", "--strategy-path", str(Path(__file__).parent.parent / "strategy" / "strats"), - '--no-color', - '--recursive-strategy-search' + "--no-color", + "--recursive-strategy-search", ] pargs = get_args(args) # pargs['config'] = None @@ -929,13 +1056,9 @@ def test_start_list_strategies(capsys): def test_start_list_freqAI_models(capsys): - - args = [ - "list-freqaimodels", - "-1" - ] + args = ["list-freqaimodels", "-1"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_freqAI_models(pargs) captured = capsys.readouterr() assert "LightGBMClassifier" in captured.out @@ -947,7 +1070,7 @@ def test_start_list_freqAI_models(capsys): "list-freqaimodels", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_freqAI_models(pargs) captured = capsys.readouterr() assert "LightGBMClassifier" in captured.out @@ -958,12 +1081,13 @@ def test_start_list_freqAI_models(capsys): def test_start_test_pairlist(mocker, caplog, tickers, default_conf, capsys): patch_exchange(mocker, mock_markets=True) - mocker.patch.multiple(EXMS, - exchange_has=MagicMock(return_value=True), - get_tickers=tickers, - ) + mocker.patch.multiple( + EXMS, + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - default_conf['pairlists'] = [ + default_conf["pairlists"] = [ { "method": "VolumePairList", "number_assets": 5, @@ -974,10 +1098,7 @@ def test_start_test_pairlist(mocker, caplog, tickers, default_conf, capsys): ] patched_configuration_load_config_file(mocker, default_conf) - args = [ - 'test-pairlist', - '-c', 'tests/testdata/testconfigs/main_test_config.json' - ] + args = ["test-pairlist", "-c", "tests/testdata/testconfigs/main_test_config.json"] start_test_pairlist(get_args(args)) @@ -989,45 +1110,47 @@ def test_start_test_pairlist(mocker, caplog, tickers, default_conf, capsys): assert re.match("['ETH/BTC', 'TKN/BTC', 'BLK/BTC', 'LTC/BTC', 'XRP/BTC']", captured.out) args = [ - 'test-pairlist', - '-c', 'tests/testdata/testconfigs/main_test_config.json', - '--one-column', + "test-pairlist", + "-c", + "tests/testdata/testconfigs/main_test_config.json", + "--one-column", ] start_test_pairlist(get_args(args)) captured = capsys.readouterr() assert re.match(r"ETH/BTC\nTKN/BTC\nBLK/BTC\nLTC/BTC\nXRP/BTC\n", captured.out) args = [ - 'test-pairlist', - '-c', 'tests/testdata/testconfigs/main_test_config.json', - '--print-json', + "test-pairlist", + "-c", + "tests/testdata/testconfigs/main_test_config.json", + "--print-json", ] start_test_pairlist(get_args(args)) captured = capsys.readouterr() try: json_pairs = json.loads(captured.out) - assert 'ETH/BTC' in json_pairs - assert 'TKN/BTC' in json_pairs - assert 'BLK/BTC' in json_pairs - assert 'LTC/BTC' in json_pairs - assert 'XRP/BTC' in json_pairs + assert "ETH/BTC" in json_pairs + assert "TKN/BTC" in json_pairs + assert "BLK/BTC" in json_pairs + assert "LTC/BTC" in json_pairs + assert "XRP/BTC" in json_pairs except json.decoder.JSONDecodeError: - pytest.fail(f'Expected well formed JSON, but failed to parse: {captured.out}') + pytest.fail(f"Expected well formed JSON, but failed to parse: {captured.out}") -def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path): +def test_hyperopt_list(mocker, capsys, caplog, tmp_path): + saved_hyperopt_results = hyperopt_test_result() csv_file = tmp_path / "test.csv" mocker.patch( - 'freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist', - return_value=True + "freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist", + return_value=True, ) def fake_iterator(*args, **kwargs): yield from [saved_hyperopt_results] mocker.patch( - 'freqtrade.optimize.hyperopt_tools.HyperoptTools._read_results', - side_effect=fake_iterator + "freqtrade.optimize.hyperopt_tools.HyperoptTools._read_results", side_effect=fake_iterator ) args = [ @@ -1036,13 +1159,26 @@ def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path) "--no-color", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 4/12", " 5/12", - " 6/12", " 7/12", " 8/12", " 9/12", " 10/12", - " 11/12", " 12/12"]) + assert all( + x in captured.out + for x in [ + " 1/12", + " 2/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 10/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--best", @@ -1050,14 +1186,14 @@ def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path) "--no-color", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 1/12", " 5/12", " 10/12"]) - assert all(x not in captured.out - for x in [" 2/12", " 3/12", " 4/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 1/12", " 5/12", " 10/12"]) + assert all( + x not in captured.out + for x in [" 2/12", " 3/12", " 4/12", " 6/12", " 7/12", " 8/12", " 9/12", " 11/12", " 12/12"] + ) args = [ "hyperopt-list", "--profitable", @@ -1065,180 +1201,293 @@ def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path) "--no-color", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 2/12", " 10/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 2/12", " 10/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--profitable", "--no-color", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 2/12", " 10/12", "Best result:", "Buy hyperspace params", - "Sell hyperspace params", "ROI table", "Stoploss"]) - assert all(x not in captured.out - for x in [" 1/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 11/12", " 12/12"]) + assert all( + x in captured.out + for x in [ + " 2/12", + " 10/12", + "Best result:", + "Buy hyperspace params", + "Sell hyperspace params", + "ROI table", + "Stoploss", + ] + ) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", "--no-color", - "--min-trades", "20", + "--min-trades", + "20", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 3/12", " 6/12", " 7/12", " 9/12", " 11/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 2/12", " 4/12", " 5/12", " 8/12", " 10/12", " 12/12"]) + assert all(x in captured.out for x in [" 3/12", " 6/12", " 7/12", " 9/12", " 11/12"]) + assert all( + x not in captured.out + for x in [" 1/12", " 2/12", " 4/12", " 5/12", " 8/12", " 10/12", " 12/12"] + ) args = [ "hyperopt-list", "--profitable", "--no-details", "--no-color", - "--max-trades", "20", + "--max-trades", + "20", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 2/12", " 10/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 2/12", " 10/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--profitable", "--no-details", "--no-color", - "--min-avg-profit", "0.11", + "--min-avg-profit", + "0.11", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 2/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 10/12", " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 2/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 10/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", "--no-color", - "--max-avg-profit", "0.10", + "--max-avg-profit", + "0.10", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 1/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", - " 11/12"]) - assert all(x not in captured.out - for x in [" 2/12", " 4/12", " 10/12", " 12/12"]) + assert all( + x in captured.out + for x in [" 1/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", " 11/12"] + ) + assert all(x not in captured.out for x in [" 2/12", " 4/12", " 10/12", " 12/12"]) args = [ "hyperopt-list", "--no-details", "--no-color", - "--min-total-profit", "0.4", + "--min-total-profit", + "0.4", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 10/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", - " 9/12", " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 10/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 2/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", "--no-color", - "--max-total-profit", "0.4", + "--max-total-profit", + "0.4", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", - " 9/12", " 11/12"]) - assert all(x not in captured.out - for x in [" 4/12", " 10/12", " 12/12"]) + assert all( + x in captured.out + for x in [" 1/12", " 2/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", " 11/12"] + ) + assert all(x not in captured.out for x in [" 4/12", " 10/12", " 12/12"]) args = [ "hyperopt-list", "--no-details", "--no-color", - "--min-objective", "0.1", + "--min-objective", + "0.1", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 10/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", " 8/12", - " 9/12", " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 10/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 2/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", - "--max-objective", "0.1", + "--max-objective", + "0.1", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", - " 9/12", " 11/12"]) - assert all(x not in captured.out - for x in [" 4/12", " 10/12", " 12/12"]) + assert all( + x in captured.out + for x in [" 1/12", " 2/12", " 3/12", " 5/12", " 6/12", " 7/12", " 8/12", " 9/12", " 11/12"] + ) + assert all(x not in captured.out for x in [" 4/12", " 10/12", " 12/12"]) args = [ "hyperopt-list", "--profitable", "--no-details", "--no-color", - "--min-avg-time", "2000", + "--min-avg-time", + "2000", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 10/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 2/12", " 3/12", " 4/12", " 5/12", " 6/12", " 7/12", - " 8/12", " 9/12", " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 10/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 2/12", + " 3/12", + " 4/12", + " 5/12", + " 6/12", + " 7/12", + " 8/12", + " 9/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", "--no-color", - "--max-avg-time", "1500", + "--max-avg-time", + "1500", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() - assert all(x in captured.out - for x in [" 2/12", " 6/12"]) - assert all(x not in captured.out - for x in [" 1/12", " 3/12", " 4/12", " 5/12", " 7/12", " 8/12" - " 9/12", " 10/12", " 11/12", " 12/12"]) + assert all(x in captured.out for x in [" 2/12", " 6/12"]) + assert all( + x not in captured.out + for x in [ + " 1/12", + " 3/12", + " 4/12", + " 5/12", + " 7/12", + " 8/12", + " 9/12", + " 10/12", + " 11/12", + " 12/12", + ] + ) args = [ "hyperopt-list", "--no-details", @@ -1247,103 +1496,85 @@ def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path) str(csv_file), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_list(pargs) captured = capsys.readouterr() log_has("CSV file created: test_file.csv", caplog) assert csv_file.is_file() line = csv_file.read_text() - assert ('Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,"3,930.0 m",0.43662' in line - or "Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,2 days 17:30:00,2,0,0.43662" in line) + assert ( + 'Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,"3,930.0 m",0.43662' in line + or "Best,1,2,-1.25%,-1.2222,-0.00125625,,-2.51,2 days 17:30:00,2,0,0.43662" in line + ) csv_file.unlink() -def test_hyperopt_show(mocker, capsys, saved_hyperopt_results): +def test_hyperopt_show(mocker, capsys): + saved_hyperopt_results = hyperopt_test_result() mocker.patch( - 'freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist', - return_value=True + "freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist", + return_value=True, ) def fake_iterator(*args, **kwargs): yield from [saved_hyperopt_results] mocker.patch( - 'freqtrade.optimize.hyperopt_tools.HyperoptTools._read_results', - side_effect=fake_iterator + "freqtrade.optimize.hyperopt_tools.HyperoptTools._read_results", side_effect=fake_iterator ) - mocker.patch('freqtrade.commands.hyperopt_commands.show_backtest_result') + mocker.patch("freqtrade.commands.hyperopt_commands.show_backtest_result") args = [ "hyperopt-show", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_show(pargs) captured = capsys.readouterr() assert " 12/12" in captured.out - args = [ - "hyperopt-show", - "--best" - ] + args = ["hyperopt-show", "--best"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_show(pargs) captured = capsys.readouterr() assert " 10/12" in captured.out - args = [ - "hyperopt-show", - "-n", "1" - ] + args = ["hyperopt-show", "-n", "1"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_show(pargs) captured = capsys.readouterr() assert " 1/12" in captured.out - args = [ - "hyperopt-show", - "--best", - "-n", "2" - ] + args = ["hyperopt-show", "--best", "-n", "2"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_show(pargs) captured = capsys.readouterr() assert " 5/12" in captured.out - args = [ - "hyperopt-show", - "--best", - "-n", "-1" - ] + args = ["hyperopt-show", "--best", "-n", "-1"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_hyperopt_show(pargs) captured = capsys.readouterr() assert " 10/12" in captured.out - args = [ - "hyperopt-show", - "--best", - "-n", "-4" - ] + args = ["hyperopt-show", "--best", "-n", "-4"] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match="The index of the epoch to show should be greater than -4."): + pargs["config"] = None + with pytest.raises( + OperationalException, match="The index of the epoch to show should be greater than -4." + ): start_hyperopt_show(pargs) - args = [ - "hyperopt-show", - "--best", - "-n", "4" - ] + args = ["hyperopt-show", "--best", "-n", "4"] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match="The index of the epoch to show should be less than 4."): + pargs["config"] = None + with pytest.raises( + OperationalException, match="The index of the epoch to show should be less than 4." + ): start_hyperopt_show(pargs) @@ -1360,13 +1591,13 @@ def test_convert_data(mocker, testdatadir): str(testdatadir), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_convert_data(pargs, True) assert trades_mock.call_count == 0 assert ohlcv_mock.call_count == 1 - assert ohlcv_mock.call_args[1]['convert_from'] == 'json' - assert ohlcv_mock.call_args[1]['convert_to'] == 'jsongz' - assert ohlcv_mock.call_args[1]['erase'] is False + assert ohlcv_mock.call_args[1]["convert_from"] == "json" + assert ohlcv_mock.call_args[1]["convert_to"] == "jsongz" + assert ohlcv_mock.call_args[1]["erase"] is False def test_convert_data_trades(mocker, testdatadir): @@ -1382,13 +1613,13 @@ def test_convert_data_trades(mocker, testdatadir): str(testdatadir), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_convert_data(pargs, False) assert ohlcv_mock.call_count == 0 assert trades_mock.call_count == 1 - assert trades_mock.call_args[1]['convert_from'] == 'jsongz' - assert trades_mock.call_args[1]['convert_to'] == 'json' - assert trades_mock.call_args[1]['erase'] is False + assert trades_mock.call_args[1]["convert_from"] == "jsongz" + assert trades_mock.call_args[1]["convert_to"] == "json" + assert trades_mock.call_args[1]["erase"] is False def test_start_list_data(testdatadir, capsys): @@ -1398,7 +1629,7 @@ def test_start_list_data(testdatadir, capsys): str(testdatadir), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_data(pargs) captured = capsys.readouterr() assert "Found 16 pair / timeframe combinations." in captured.out @@ -1409,12 +1640,13 @@ def test_start_list_data(testdatadir, capsys): "list-data", "--data-format-ohlcv", "feather", - "--pairs", "XRP/ETH", + "--pairs", + "XRP/ETH", "--datadir", str(testdatadir), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_data(pargs) captured = capsys.readouterr() assert "Found 2 pair / timeframe combinations." in captured.out @@ -1424,12 +1656,13 @@ def test_start_list_data(testdatadir, capsys): args = [ "list-data", - "--trading-mode", "futures", + "--trading-mode", + "futures", "--datadir", str(testdatadir), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_data(pargs) captured = capsys.readouterr() @@ -1440,51 +1673,44 @@ def test_start_list_data(testdatadir, capsys): args = [ "list-data", - "--pairs", "XRP/ETH", + "--pairs", + "XRP/ETH", "--datadir", str(testdatadir), "--show-timerange", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_list_data(pargs) captured = capsys.readouterr() assert "Found 2 pair / timeframe combinations." in captured.out assert ( "\n| Pair | Timeframe | Type " - "| From | To | Candles |\n") in captured.out + "| From | To | Candles |\n" + ) in captured.out assert "UNITTEST/BTC" not in captured.out assert ( "\n| XRP/ETH | 1m | spot | " - "2019-10-11 00:00:00 | 2019-10-13 11:19:00 | 2469 |\n") in captured.out + "2019-10-11 00:00:00 | 2019-10-13 11:19:00 | 2469 |\n" + ) in captured.out @pytest.mark.usefixtures("init_persistence") def test_show_trades(mocker, fee, capsys, caplog): mocker.patch("freqtrade.persistence.init_db") create_mock_trades(fee, False) - args = [ - "show-trades", - "--db-url", - "sqlite:///" - ] + args = ["show-trades", "--db-url", "sqlite:///"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_show_trades(pargs) assert log_has(f"Printing {MOCK_TRADE_COUNT} Trades: ", caplog) captured = capsys.readouterr() assert "Trade(id=1" in captured.out assert "Trade(id=2" in captured.out assert "Trade(id=3" in captured.out - args = [ - "show-trades", - "--db-url", - "sqlite:///", - "--print-json", - "--trade-ids", "1", "2" - ] + args = ["show-trades", "--db-url", "sqlite:///", "--print-json", "--trade-ids", "1", "2"] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_show_trades(pargs) captured = capsys.readouterr() @@ -1496,22 +1722,22 @@ def test_show_trades(mocker, fee, capsys, caplog): "show-trades", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None with pytest.raises(OperationalException, match=r"--db-url is required for this command."): start_show_trades(pargs) def test_backtesting_show(mocker, testdatadir, capsys): - sbr = mocker.patch('freqtrade.optimize.optimize_reports.show_backtest_results') + sbr = mocker.patch("freqtrade.optimize.optimize_reports.show_backtest_results") args = [ "backtesting-show", "--export-filename", f"{testdatadir / 'backtest_results/backtest-result.json'}", - "--show-pair-list" + "--show-pair-list", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_backtesting_show(pargs) assert sbr.call_count == 1 out, _err = capsys.readouterr() @@ -1536,21 +1762,21 @@ def test_start_convert_db(fee, tmp_path): create_mock_trades(fee) - PairLocks.timeframe = '5m' - PairLocks.lock_pair('XRP/USDT', datetime.now(), 'Random reason 125', side='long') + PairLocks.timeframe = "5m" + PairLocks.lock_pair("XRP/USDT", datetime.now(), "Random reason 125", side="long") assert db_src_file.is_file() assert not db_target_file.is_file() pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_convert_db(pargs) assert db_target_file.is_file() def test_start_strategy_updater(mocker, tmp_path): - sc_mock = mocker.patch('freqtrade.commands.strategy_utils_commands.start_conversion') - teststrats = Path(__file__).parent.parent / 'strategy/strats' + sc_mock = mocker.patch("freqtrade.commands.strategy_utils_commands.start_conversion") + teststrats = Path(__file__).parent.parent / "strategy/strats" args = [ "strategy-updater", "--userdir", @@ -1559,7 +1785,7 @@ def test_start_strategy_updater(mocker, tmp_path): str(teststrats), ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_strategy_update(pargs) # Number of strategies in the test directory assert sc_mock.call_count == 12 @@ -1573,10 +1799,10 @@ def test_start_strategy_updater(mocker, tmp_path): str(teststrats), "--strategy-list", "StrategyTestV3", - "StrategyTestV2" + "StrategyTestV2", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_strategy_update(pargs) # Number of strategies in the test directory assert sc_mock.call_count == 2 @@ -1600,7 +1826,7 @@ def test_start_show_config(capsys, caplog): "show-config", "--config", "tests/testdata/testconfigs/main_test_config.json", - "--show-sensitive" + "--show-sensitive", ] pargs = get_args(args) start_show_config(pargs) @@ -1609,4 +1835,4 @@ def test_start_show_config(capsys, caplog): assert "Your combined configuration is:" in captured.out assert '"max_open_trades":' in captured.out assert '"secret": "REDACTED"' not in captured.out - assert log_has_re(r'Sensitive information will be shown in the upcoming output.*', caplog) + assert log_has_re(r"Sensitive information will be shown in the upcoming output.*", caplog) diff --git a/tests/conftest.py b/tests/conftest.py index b46f30f8f..3686a548a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -24,27 +24,46 @@ from freqtrade.persistence import LocalTrade, Order, Trade, init_db from freqtrade.resolvers import ExchangeResolver from freqtrade.util import dt_now, dt_ts from freqtrade.worker import Worker -from tests.conftest_trades import (leverage_trade, mock_trade_1, mock_trade_2, mock_trade_3, - mock_trade_4, mock_trade_5, mock_trade_6, short_trade) -from tests.conftest_trades_usdt import (mock_trade_usdt_1, mock_trade_usdt_2, mock_trade_usdt_3, - mock_trade_usdt_4, mock_trade_usdt_5, mock_trade_usdt_6, - mock_trade_usdt_7) +from tests.conftest_trades import ( + leverage_trade, + mock_trade_1, + mock_trade_2, + mock_trade_3, + mock_trade_4, + mock_trade_5, + mock_trade_6, + short_trade, +) +from tests.conftest_trades_usdt import ( + mock_trade_usdt_1, + mock_trade_usdt_2, + mock_trade_usdt_3, + mock_trade_usdt_4, + mock_trade_usdt_5, + mock_trade_usdt_6, + mock_trade_usdt_7, +) -logging.getLogger('').setLevel(logging.INFO) +logging.getLogger("").setLevel(logging.INFO) # Do not mask numpy errors as warnings that no one read, raise the exсeption -np.seterr(all='raise') +np.seterr(all="raise") -CURRENT_TEST_STRATEGY = 'StrategyTestV3' -TRADE_SIDES = ('long', 'short') -EXMS = 'freqtrade.exchange.exchange.Exchange' +CURRENT_TEST_STRATEGY = "StrategyTestV3" +TRADE_SIDES = ("long", "short") +EXMS = "freqtrade.exchange.exchange.Exchange" def pytest_addoption(parser): - parser.addoption('--longrun', action='store_true', dest="longrun", - default=False, help="Enable long-run tests (ccxt compat)") + parser.addoption( + "--longrun", + action="store_true", + dest="longrun", + default=False, + help="Enable long-run tests (ccxt compat)", + ) def pytest_configure(config): @@ -52,7 +71,7 @@ def pytest_configure(config): "markers", "longrun: mark test that is running slowly and should not be run regularly" ) if not config.option.longrun: - config.option.markexpr = 'not longrun' + config.option.markexpr = "not longrun" class FixtureScheduler(LoadScopeScheduling): @@ -60,10 +79,10 @@ class FixtureScheduler(LoadScopeScheduling): # https://github.com/pytest-dev/pytest-xdist/issues/18 def _split_scope(self, nodeid): - if 'exchange_online' in nodeid: + if "exchange_online" in nodeid: try: # Extract exchange ID from nodeid - exchange_id = nodeid.split('[')[1].split('-')[0].rstrip(']') + exchange_id = nodeid.split("[")[1].split("-")[0].rstrip("]") return exchange_id except Exception as e: print(e) @@ -116,14 +135,13 @@ def generate_trades_history(n_rows, start_date: Optional[datetime] = None, days= _end_timestamp = pd.to_datetime(end_date).timestamp() random_timestamps_in_seconds = np.random.uniform(_start_timestamp, _end_timestamp, n_rows) - timestamp = pd.to_datetime(random_timestamps_in_seconds, unit='s') + timestamp = pd.to_datetime(random_timestamps_in_seconds, unit="s") id = [ - f'a{np.random.randint(1e6, 1e7 - 1)}cd{np.random.randint(100, 999)}' - for _ in range(n_rows) + f"a{np.random.randint(1e6, 1e7 - 1)}cd{np.random.randint(100, 999)}" for _ in range(n_rows) ] - side = np.random.choice(['buy', 'sell'], n_rows) + side = np.random.choice(["buy", "sell"], n_rows) # Initial price and subsequent changes initial_price = 0.019626 @@ -134,50 +152,60 @@ def generate_trades_history(n_rows, start_date: Optional[datetime] = None, days= cost = price * amount # Create DataFrame - df = pd.DataFrame({'timestamp': timestamp, 'id': id, 'type': None, 'side': side, - 'price': price, 'amount': amount, 'cost': cost}) - df['date'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True) - df = df.sort_values('timestamp').reset_index(drop=True) - assert list(df.columns) == constants.DEFAULT_TRADES_COLUMNS + ['date'] + df = pd.DataFrame( + { + "timestamp": timestamp, + "id": id, + "type": None, + "side": side, + "price": price, + "amount": amount, + "cost": cost, + } + ) + df["date"] = pd.to_datetime(df["timestamp"], unit="ms", utc=True) + df = df.sort_values("timestamp").reset_index(drop=True) + assert list(df.columns) == constants.DEFAULT_TRADES_COLUMNS + ["date"] return df -def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05', random_seed=42): +def generate_test_data(timeframe: str, size: int, start: str = "2020-07-05", random_seed=42): np.random.seed(random_seed) base = np.random.normal(20, 2, size=size) - if timeframe == '1y': - date = pd.date_range(start, periods=size, freq='1YS', tz='UTC') - elif timeframe == '1M': - date = pd.date_range(start, periods=size, freq='1MS', tz='UTC') - elif timeframe == '3M': - date = pd.date_range(start, periods=size, freq='3MS', tz='UTC') - elif timeframe == '1w' or timeframe == '7d': - date = pd.date_range(start, periods=size, freq='1W-MON', tz='UTC') + if timeframe == "1y": + date = pd.date_range(start, periods=size, freq="1YS", tz="UTC") + elif timeframe == "1M": + date = pd.date_range(start, periods=size, freq="1MS", tz="UTC") + elif timeframe == "3M": + date = pd.date_range(start, periods=size, freq="3MS", tz="UTC") + elif timeframe == "1w" or timeframe == "7d": + date = pd.date_range(start, periods=size, freq="1W-MON", tz="UTC") else: tf_mins = timeframe_to_minutes(timeframe) if tf_mins >= 1: - date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC') + date = pd.date_range(start, periods=size, freq=f"{tf_mins}min", tz="UTC") else: tf_secs = timeframe_to_seconds(timeframe) - date = pd.date_range(start, periods=size, freq=f'{tf_secs}s', tz='UTC') - df = pd.DataFrame({ - 'date': date, - 'open': base, - 'high': base + np.random.normal(2, 1, size=size), - 'low': base - np.random.normal(2, 1, size=size), - 'close': base + np.random.normal(0, 1, size=size), - 'volume': np.random.normal(200, size=size) - } + date = pd.date_range(start, periods=size, freq=f"{tf_secs}s", tz="UTC") + df = pd.DataFrame( + { + "date": date, + "open": base, + "high": base + np.random.normal(2, 1, size=size), + "low": base - np.random.normal(2, 1, size=size), + "close": base + np.random.normal(0, 1, size=size), + "volume": np.random.normal(200, size=size), + } ) df = df.dropna() return df -def generate_test_data_raw(timeframe: str, size: int, start: str = '2020-07-05', random_seed=42): - """ Generates data in the ohlcv format used by ccxt """ +def generate_test_data_raw(timeframe: str, size: int, start: str = "2020-07-05", random_seed=42): + """Generates data in the ohlcv format used by ccxt""" df = generate_test_data(timeframe, size, start, random_seed) - df['date'] = df.loc[:, 'date'].astype(np.int64) // 1000 // 1000 + df["date"] = df.loc[:, "date"].astype(np.int64) // 1000 // 1000 return list(list(x) for x in zip(*(df[x].values.tolist() for x in df.columns))) @@ -203,53 +231,53 @@ def get_mock_coro(return_value=None, side_effect=None): def patched_configuration_load_config_file(mocker, config) -> None: mocker.patch( - 'freqtrade.configuration.load_config.load_config_file', - lambda *args, **kwargs: config + "freqtrade.configuration.load_config.load_config_file", lambda *args, **kwargs: config ) def patch_exchange( - mocker, - api_mock=None, - id='binance', - mock_markets=True, - mock_supported_modes=True + mocker, api_mock=None, id="binance", mock_markets=True, mock_supported_modes=True ) -> None: - mocker.patch(f'{EXMS}._load_async_markets', return_value={}) - mocker.patch(f'{EXMS}.validate_config', MagicMock()) - mocker.patch(f'{EXMS}.validate_timeframes', MagicMock()) - mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id)) - mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title())) - mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2)) + mocker.patch(f"{EXMS}._load_async_markets", return_value={}) + mocker.patch(f"{EXMS}.validate_config", MagicMock()) + mocker.patch(f"{EXMS}.validate_timeframes", MagicMock()) + mocker.patch(f"{EXMS}.id", PropertyMock(return_value=id)) + mocker.patch(f"{EXMS}.name", PropertyMock(return_value=id.title())) + mocker.patch(f"{EXMS}.precisionMode", PropertyMock(return_value=2)) + # Temporary patch ... + mocker.patch("freqtrade.exchange.bybit.Bybit.cache_leverage_tiers") if mock_markets: if isinstance(mock_markets, bool): mock_markets = get_markets() - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=mock_markets)) if mock_supported_modes: mocker.patch( - f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs', - PropertyMock(return_value=[ - (TradingMode.MARGIN, MarginMode.CROSS), - (TradingMode.MARGIN, MarginMode.ISOLATED), - (TradingMode.FUTURES, MarginMode.CROSS), - (TradingMode.FUTURES, MarginMode.ISOLATED) - ]) + f"freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs", + PropertyMock( + return_value=[ + (TradingMode.MARGIN, MarginMode.CROSS), + (TradingMode.MARGIN, MarginMode.ISOLATED), + (TradingMode.FUTURES, MarginMode.CROSS), + (TradingMode.FUTURES, MarginMode.ISOLATED), + ] + ), ) if api_mock: - mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock) + mocker.patch(f"{EXMS}._init_ccxt", return_value=api_mock) else: - mocker.patch(f'{EXMS}._init_ccxt', MagicMock()) - mocker.patch(f'{EXMS}.timeframes', PropertyMock( - return_value=['5m', '15m', '1h', '1d'])) + mocker.patch(f"{EXMS}.get_fee", return_value=0.0025) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock()) + mocker.patch(f"{EXMS}.timeframes", PropertyMock(return_value=["5m", "15m", "1h", "1d"])) -def get_patched_exchange(mocker, config, api_mock=None, id='binance', - mock_markets=True, mock_supported_modes=True) -> Exchange: +def get_patched_exchange( + mocker, config, api_mock=None, id="binance", mock_markets=True, mock_supported_modes=True +) -> Exchange: patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes) - config['exchange']['name'] = id + config["exchange"]["name"] = id try: exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True) except ImportError: @@ -258,14 +286,14 @@ def get_patched_exchange(mocker, config, api_mock=None, id='binance', def patch_wallet(mocker, free=999.9) -> None: - mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock( - return_value=free - )) + mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=free)) def patch_whitelist(mocker, conf) -> None: - mocker.patch('freqtrade.freqtradebot.FreqtradeBot._refresh_active_whitelist', - MagicMock(return_value=conf['exchange']['pair_whitelist'])) + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot._refresh_active_whitelist", + MagicMock(return_value=conf["exchange"]["pair_whitelist"]), + ) def patch_edge(mocker) -> None: @@ -274,13 +302,16 @@ def patch_edge(mocker) -> None: # "XRP/BTC", # "NEO/BTC" - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'NEO/BTC': PairInfo(-0.20, 0.66, 3.71, 0.50, 1.71, 10, 25), - 'LTC/BTC': PairInfo(-0.21, 0.66, 3.71, 0.50, 1.71, 11, 20), - } - )) - mocker.patch('freqtrade.edge.Edge.calculate', MagicMock(return_value=True)) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "NEO/BTC": PairInfo(-0.20, 0.66, 3.71, 0.50, 1.71, 10, 25), + "LTC/BTC": PairInfo(-0.21, 0.66, 3.71, 0.50, 1.71, 11, 20), + } + ), + ) + mocker.patch("freqtrade.edge.Edge.calculate", MagicMock(return_value=True)) # Functions for recurrent object patching @@ -293,13 +324,13 @@ def patch_freqtradebot(mocker, config) -> None: :param config: Config to pass to the bot :return: None """ - mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock()) + mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock()) patch_exchange(mocker) - mocker.patch('freqtrade.freqtradebot.RPCManager._init', MagicMock()) - mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock()) + mocker.patch("freqtrade.freqtradebot.RPCManager._init", MagicMock()) + mocker.patch("freqtrade.freqtradebot.RPCManager.send_msg", MagicMock()) patch_whitelist(mocker, config) - mocker.patch('freqtrade.freqtradebot.ExternalMessageConsumer') - mocker.patch('freqtrade.configuration.config_validation._validate_consumers') + mocker.patch("freqtrade.freqtradebot.ExternalMessageConsumer") + mocker.patch("freqtrade.configuration.config_validation._validate_consumers") def get_patched_freqtradebot(mocker, config) -> FreqtradeBot: @@ -337,6 +368,7 @@ def patch_get_signal( :param mocker: mocker to patch IStrategy class :return: None """ + # returns (Signal-direction, signaname) def patched_get_entry_signal(*args, **kwargs): direction = None @@ -366,11 +398,13 @@ def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = Tru Create some fake trades ... :param is_short: Optional bool, None creates a mix of long and short trades. """ + def add_trade(trade): if use_db: Trade.session.add(trade) else: LocalTrade.add_bt_trade(trade) + is_short1 = is_short if is_short is not None else True is_short2 = is_short if is_short is not None else False # Simulate dry_run entries @@ -442,6 +476,7 @@ def create_mock_trades_usdt(fee, is_short: Optional[bool] = False, use_db: bool """ Create some fake trades ... """ + def add_trade(trade): if use_db: Trade.session.add(trade) @@ -484,8 +519,7 @@ def patch_gc(mocker) -> None: @pytest.fixture(autouse=True) def user_dir(mocker, tmp_path) -> Path: user_dir = tmp_path / "user_data" - mocker.patch('freqtrade.configuration.configuration.create_userdata_dir', - return_value=user_dir) + mocker.patch("freqtrade.configuration.configuration.create_userdata_dir", return_value=user_dir) return user_dir @@ -497,23 +531,23 @@ def patch_coingecko(mocker) -> None: :return: None """ - tickermock = MagicMock(return_value={'bitcoin': {'usd': 12345.0}, 'ethereum': {'usd': 12345.0}}) - listmock = MagicMock(return_value=[{'id': 'bitcoin', 'name': 'Bitcoin', 'symbol': 'btc', - 'website_slug': 'bitcoin'}, - {'id': 'ethereum', 'name': 'Ethereum', 'symbol': 'eth', - 'website_slug': 'ethereum'} - ]) + tickermock = MagicMock(return_value={"bitcoin": {"usd": 12345.0}, "ethereum": {"usd": 12345.0}}) + listmock = MagicMock( + return_value=[ + {"id": "bitcoin", "name": "Bitcoin", "symbol": "btc", "website_slug": "bitcoin"}, + {"id": "ethereum", "name": "Ethereum", "symbol": "eth", "website_slug": "ethereum"}, + ] + ) mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", get_price=tickermock, get_coins_list=listmock, - ) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def init_persistence(default_conf): - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) @pytest.fixture(scope="function") @@ -527,35 +561,24 @@ def default_conf_usdt(testdatadir): def get_default_conf(testdatadir): - """ Returns validated configuration suitable for most tests """ + """Returns validated configuration suitable for most tests""" configuration = { "max_open_trades": 1, "stake_currency": "BTC", "stake_amount": 0.001, "fiat_display_currency": "USD", - "timeframe": '5m', + "timeframe": "5m", "dry_run": True, "cancel_open_orders_on_exit": False, - "minimal_roi": { - "40": 0.0, - "30": 0.01, - "20": 0.02, - "0": 0.04 - }, + "minimal_roi": {"40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04}, "dry_run_wallet": 1000, "stoploss": -0.10, - "unfilledtimeout": { - "entry": 10, - "exit": 30 - }, + "unfilledtimeout": {"entry": 10, "exit": 30}, "entry_pricing": { "price_last_balance": 0.0, "use_order_book": False, "order_book_top": 1, - "check_depth_of_market": { - "enabled": False, - "bids_to_ask_delta": 1 - } + "check_depth_of_market": {"enabled": False, "bids_to_ask_delta": 1}, }, "exit_pricing": { "use_order_book": False, @@ -565,20 +588,13 @@ def get_default_conf(testdatadir): "name": "binance", "key": "key", "secret": "secret", - "pair_whitelist": [ - "ETH/BTC", - "LTC/BTC", - "XRP/BTC", - "NEO/BTC" - ], + "pair_whitelist": ["ETH/BTC", "LTC/BTC", "XRP/BTC", "NEO/BTC"], "pair_blacklist": [ "DOGE/BTC", "HOT/BTC", - ] + ], }, - "pairlists": [ - {"method": "StaticPairList"} - ], + "pairlists": [{"method": "StaticPairList"}], "telegram": { "enabled": False, "token": "token", @@ -604,27 +620,29 @@ def get_default_conf(testdatadir): def get_default_conf_usdt(testdatadir): configuration = get_default_conf(testdatadir) - configuration.update({ - "stake_amount": 60.0, - "stake_currency": "USDT", - "exchange": { - "name": "binance", - "enabled": True, - "key": "key", - "secret": "secret", - "pair_whitelist": [ - "ETH/USDT", - "LTC/USDT", - "XRP/USDT", - "NEO/USDT", - "TKN/USDT", - ], - "pair_blacklist": [ - "DOGE/USDT", - "HOT/USDT", - ] - }, - }) + configuration.update( + { + "stake_amount": 60.0, + "stake_currency": "USDT", + "exchange": { + "name": "binance", + "enabled": True, + "key": "key", + "secret": "secret", + "pair_whitelist": [ + "ETH/USDT", + "LTC/USDT", + "XRP/USDT", + "NEO/USDT", + "TKN/USDT", + ], + "pair_blacklist": [ + "DOGE/USDT", + "HOT/USDT", + ], + }, + } + ) return configuration @@ -635,56 +653,68 @@ def fee(): @pytest.fixture def ticker(): - return MagicMock(return_value={ - 'bid': 0.00001098, - 'ask': 0.00001099, - 'last': 0.00001098, - }) + return MagicMock( + return_value={ + "bid": 0.00001098, + "ask": 0.00001099, + "last": 0.00001098, + } + ) @pytest.fixture def ticker_sell_up(): - return MagicMock(return_value={ - 'bid': 0.00001172, - 'ask': 0.00001173, - 'last': 0.00001172, - }) + return MagicMock( + return_value={ + "bid": 0.00001172, + "ask": 0.00001173, + "last": 0.00001172, + } + ) @pytest.fixture def ticker_sell_down(): - return MagicMock(return_value={ - 'bid': 0.00001044, - 'ask': 0.00001043, - 'last': 0.00001044, - }) + return MagicMock( + return_value={ + "bid": 0.00001044, + "ask": 0.00001043, + "last": 0.00001044, + } + ) @pytest.fixture def ticker_usdt(): - return MagicMock(return_value={ - 'bid': 2.0, - 'ask': 2.02, - 'last': 2.0, - }) + return MagicMock( + return_value={ + "bid": 2.0, + "ask": 2.02, + "last": 2.0, + } + ) @pytest.fixture def ticker_usdt_sell_up(): - return MagicMock(return_value={ - 'bid': 2.2, - 'ask': 2.3, - 'last': 2.2, - }) + return MagicMock( + return_value={ + "bid": 2.2, + "ask": 2.3, + "last": 2.2, + } + ) @pytest.fixture def ticker_usdt_sell_down(): - return MagicMock(return_value={ - 'bid': 2.01, - 'ask': 2.0, - 'last': 2.01, - }) + return MagicMock( + return_value={ + "bid": 2.01, + "ask": 2.0, + "last": 2.01, + } + ) @pytest.fixture @@ -696,874 +726,799 @@ def get_markets(): # See get_markets_static() for immutable markets and do not modify them unless absolutely # necessary! return { - 'ETH/BTC': { - 'id': 'ethbtc', - 'symbol': 'ETH/BTC', - 'base': 'ETH', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "ETH/BTC": { + "id": "ethbtc", + "symbol": "ETH/BTC", + "base": "ETH", + "quote": "BTC", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000, }, - 'price': { - 'min': None, - 'max': 500000, + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, + "cost": { + "min": 0.0001, + "max": 500000, }, - 'leverage': { - 'min': 1.0, - 'max': 2.0 - } + "leverage": {"min": 1.0, "max": 2.0}, }, }, - 'TKN/BTC': { - 'id': 'tknbtc', - 'symbol': 'TKN/BTC', - 'base': 'TKN', - 'quote': 'BTC', + "TKN/BTC": { + "id": "tknbtc", + "symbol": "TKN/BTC", + "base": "TKN", + "quote": "BTC", # According to ccxt, markets without active item set are also active # 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000, }, - 'price': { - 'min': None, - 'max': 500000, + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, + "cost": { + "min": 0.0001, + "max": 500000, }, - 'leverage': { - 'min': 1.0, - 'max': 5.0 - } + "leverage": {"min": 1.0, "max": 5.0}, }, }, - 'BLK/BTC': { - 'id': 'blkbtc', - 'symbol': 'BLK/BTC', - 'base': 'BLK', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "BLK/BTC": { + "id": "blkbtc", + "symbol": "BLK/BTC", + "base": "BLK", + "quote": "BTC", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 1000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 1000, }, - 'price': { - 'min': None, - 'max': 500000, + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, - }, - 'leverage': { - 'min': 1.0, - 'max': 3.0 + "cost": { + "min": 0.0001, + "max": 500000, }, + "leverage": {"min": 1.0, "max": 3.0}, }, }, - 'LTC/BTC': { - 'id': 'ltcbtc', - 'symbol': 'LTC/BTC', - 'base': 'LTC', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "LTC/BTC": { + "id": "ltcbtc", + "symbol": "LTC/BTC", + "base": "LTC", + "quote": "BTC", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000, }, - 'price': { - 'min': None, - 'max': 500000, + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, - }, - 'leverage': { - 'min': None, - 'max': None + "cost": { + "min": 0.0001, + "max": 500000, }, + "leverage": {"min": None, "max": None}, }, - 'info': {}, + "info": {}, }, - 'XRP/BTC': { - 'id': 'xrpbtc', - 'symbol': 'XRP/BTC', - 'base': 'XRP', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "XRP/BTC": { + "id": "xrpbtc", + "symbol": "XRP/BTC", + "base": "XRP", + "quote": "BTC", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000, }, - 'price': { - 'min': None, - 'max': 500000, + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, + "cost": { + "min": 0.0001, + "max": 500000, }, - 'leverage': { - 'min': None, - 'max': None, - }, - }, - 'info': {}, - }, - 'NEO/BTC': { - 'id': 'neobtc', - 'symbol': 'NEO/BTC', - 'base': 'NEO', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, - }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000, - }, - 'price': { - 'min': None, - 'max': 500000, - }, - 'cost': { - 'min': 0.0001, - 'max': 500000, - }, - 'leverage': { - 'min': None, - 'max': None, - }, - }, - 'info': {}, - }, - 'BTT/BTC': { - 'id': 'BTTBTC', - 'symbol': 'BTT/BTC', - 'base': 'BTT', - 'quote': 'BTC', - 'active': False, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'contractSize': None, - 'precision': { - 'base': 8, - 'quote': 8, - 'amount': 0, - 'price': 8 - }, - 'limits': { - 'amount': { - 'min': 1.0, - 'max': 90000000.0 - }, - 'price': { - 'min': None, - 'max': None - }, - 'cost': { - 'min': 0.0001, - 'max': None - }, - 'leverage': { - 'min': None, - 'max': None, - }, - }, - 'info': {}, - }, - 'ETH/USDT': { - 'id': 'USDT-ETH', - 'symbol': 'ETH/USDT', - 'base': 'ETH', - 'quote': 'USDT', - 'settle': None, - 'baseId': 'ETH', - 'quoteId': 'USDT', - 'settleId': None, - 'type': 'spot', - 'spot': True, - 'margin': True, - 'swap': True, - 'future': True, - 'option': False, - 'active': True, - 'contract': None, - 'linear': None, - 'inverse': None, - 'taker': 0.0006, - 'maker': 0.0002, - 'contractSize': None, - 'expiry': None, - 'expiryDateTime': None, - 'strike': None, - 'optionType': None, - 'precision': { - 'amount': 8, - 'price': 8, - }, - 'limits': { - 'leverage': { - 'min': 1, - 'max': 100, - }, - 'amount': { - 'min': 0.02214286, - 'max': None, - }, - 'price': { - 'min': 1e-08, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - 'info': { - 'maintenance_rate': '0.005', - }, - }, - 'BTC/USDT': { - 'id': 'USDT-BTC', - 'symbol': 'BTC/USDT', - 'base': 'BTC', - 'quote': 'USDT', - 'settle': None, - 'baseId': 'BTC', - 'quoteId': 'USDT', - 'settleId': None, - 'type': 'spot', - 'spot': True, - 'margin': True, - 'swap': False, - 'future': False, - 'option': False, - 'active': True, - 'contract': None, - 'linear': None, - 'inverse': None, - 'taker': 0.0006, - 'maker': 0.0002, - 'contractSize': None, - 'expiry': None, - 'expiryDateTime': None, - 'strike': None, - 'optionType': None, - 'precision': { - 'amount': 4, - 'price': 4, - }, - 'limits': { - 'leverage': { - 'min': 1, - 'max': 100, - }, - 'amount': { - 'min': 0.000221, - 'max': None, - }, - 'price': { - 'min': 1e-02, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - 'info': { - 'maintenance_rate': '0.005', - }, - }, - 'LTC/USDT': { - 'id': 'USDT-LTC', - 'symbol': 'LTC/USDT', - 'base': 'LTC', - 'quote': 'USDT', - 'active': False, - 'spot': True, - 'future': True, - 'swap': True, - 'margin': True, - 'linear': None, - 'inverse': False, - 'type': 'spot', - 'contractSize': None, - 'taker': 0.0006, - 'maker': 0.0002, - 'precision': { - 'amount': 8, - 'price': 8 - }, - 'limits': { - 'amount': { - 'min': 0.06646786, - 'max': None - }, - 'price': { - 'min': 1e-08, - 'max': None - }, - 'leverage': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - 'info': {}, - }, - 'XRP/USDT': { - 'id': 'xrpusdt', - 'symbol': 'XRP/USDT', - 'base': 'XRP', - 'quote': 'USDT', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'taker': 0.0006, - 'maker': 0.0002, - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, - }, - 'lot': 0.00000001, - 'contractSize': None, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 1000, - }, - 'price': { - 'min': None, - 'max': 500000, - }, - 'cost': { - 'min': 0.0001, - 'max': 500000, - }, - }, - 'info': {}, - }, - 'NEO/USDT': { - 'id': 'neousdt', - 'symbol': 'NEO/USDT', - 'base': 'NEO', - 'quote': 'USDT', - 'settle': '', - 'baseId': 'NEO', - 'quoteId': 'USDT', - 'settleId': '', - 'type': 'spot', - 'spot': True, - 'margin': True, - 'swap': False, - 'futures': False, - 'option': False, - 'active': True, - 'contract': False, - 'linear': None, - 'inverse': None, - 'taker': 0.0006, - 'maker': 0.0002, - 'contractSize': None, - 'expiry': None, - 'expiryDatetime': None, - 'strike': None, - 'optionType': None, - 'tierBased': None, - 'percentage': None, - 'lot': 0.00000001, - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, - }, - 'limits': { "leverage": { - 'min': 1, - 'max': 10 - }, - 'amount': { - 'min': 0.01, - 'max': 1000, - }, - 'price': { - 'min': None, - 'max': 500000, - }, - 'cost': { - 'min': 0.0001, - 'max': 500000, + "min": None, + "max": None, }, }, - 'info': {}, + "info": {}, }, - 'TKN/USDT': { - 'id': 'tknusdt', - 'symbol': 'TKN/USDT', - 'base': 'TKN', - 'quote': 'USDT', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'contractSize': None, - 'taker': 0.0006, - 'maker': 0.0002, - 'precision': { - 'price': 8, - 'amount': 8, - 'cost': 8, + "NEO/BTC": { + "id": "neobtc", + "symbol": "NEO/BTC", + "base": "NEO", + "quote": "BTC", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'lot': 0.00000001, - 'limits': { - 'amount': { - 'min': 0.01, - 'max': 100000000000, + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000, }, - 'price': { - 'min': None, - 'max': 500000 + "price": { + "min": None, + "max": 500000, }, - 'cost': { - 'min': 0.0001, - 'max': 500000, + "cost": { + "min": 0.0001, + "max": 500000, }, - 'leverage': { - 'min': None, - 'max': None, + "leverage": { + "min": None, + "max": None, }, }, - 'info': {}, + "info": {}, }, - 'LTC/USD': { - 'id': 'USD-LTC', - 'symbol': 'LTC/USD', - 'base': 'LTC', - 'quote': 'USD', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'contractSize': None, - 'precision': { - 'amount': 8, - 'price': 8 - }, - 'limits': { - 'amount': { - 'min': 0.06646786, - 'max': None - }, - 'price': { - 'min': 1e-08, - 'max': None - }, - 'leverage': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, + "BTT/BTC": { + "id": "BTTBTC", + "symbol": "BTT/BTC", + "base": "BTT", + "quote": "BTC", + "active": False, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "contractSize": None, + "precision": {"base": 8, "quote": 8, "amount": 0, "price": 8}, + "limits": { + "amount": {"min": 1.0, "max": 90000000.0}, + "price": {"min": None, "max": None}, + "cost": {"min": 0.0001, "max": None}, + "leverage": { + "min": None, + "max": None, }, }, - 'info': {}, + "info": {}, }, - 'XLTCUSDT': { - 'id': 'xLTCUSDT', - 'symbol': 'XLTCUSDT', - 'base': 'LTC', - 'quote': 'USDT', - 'active': True, - 'spot': False, - 'type': 'swap', - 'contractSize': 0.01, - 'swap': False, - 'linear': False, - 'taker': 0.0006, - 'maker': 0.0002, - 'precision': { - 'amount': 8, - 'price': 8 + "ETH/USDT": { + "id": "USDT-ETH", + "symbol": "ETH/USDT", + "base": "ETH", + "quote": "USDT", + "settle": None, + "baseId": "ETH", + "quoteId": "USDT", + "settleId": None, + "type": "spot", + "spot": True, + "margin": True, + "swap": True, + "future": True, + "option": False, + "active": True, + "contract": None, + "linear": None, + "inverse": None, + "taker": 0.0006, + "maker": 0.0002, + "contractSize": None, + "expiry": None, + "expiryDateTime": None, + "strike": None, + "optionType": None, + "precision": { + "amount": 8, + "price": 8, }, - 'limits': { - 'leverage': { - 'min': None, - 'max': None, + "limits": { + "leverage": { + "min": 1, + "max": 100, }, - 'amount': { - 'min': 0.06646786, - 'max': None + "amount": { + "min": 0.02214286, + "max": None, }, - 'price': { - 'min': 1e-08, - 'max': None + "price": { + "min": 1e-08, + "max": None, }, - 'cost': { - 'min': None, - 'max': None, + "cost": { + "min": None, + "max": None, }, }, - 'info': {}, + "info": { + "maintenance_rate": "0.005", + }, }, - 'LTC/ETH': { - 'id': 'LTCETH', - 'symbol': 'LTC/ETH', - 'base': 'LTC', - 'quote': 'ETH', - 'active': True, - 'spot': True, - 'swap': False, - 'linear': None, - 'type': 'spot', - 'contractSize': None, - 'precision': { - 'base': 8, - 'quote': 8, - 'amount': 3, - 'price': 5 + "BTC/USDT": { + "id": "USDT-BTC", + "symbol": "BTC/USDT", + "base": "BTC", + "quote": "USDT", + "settle": None, + "baseId": "BTC", + "quoteId": "USDT", + "settleId": None, + "type": "spot", + "spot": True, + "margin": True, + "swap": False, + "future": False, + "option": False, + "active": True, + "contract": None, + "linear": None, + "inverse": None, + "taker": 0.0006, + "maker": 0.0002, + "contractSize": None, + "expiry": None, + "expiryDateTime": None, + "strike": None, + "optionType": None, + "precision": { + "amount": 4, + "price": 4, }, - 'limits': { - 'leverage': { - 'min': None, - 'max': None, + "limits": { + "leverage": { + "min": 1, + "max": 100, }, - 'amount': { - 'min': 0.001, - 'max': 10000000.0 + "amount": { + "min": 0.000221, + "max": None, }, - 'price': { - 'min': 1e-05, - 'max': 1000.0 + "price": { + "min": 1e-02, + "max": None, + }, + "cost": { + "min": None, + "max": None, }, - 'cost': { - 'min': 0.01, - 'max': None - } }, - 'info': { - } + "info": { + "maintenance_rate": "0.005", + }, }, - 'ETH/USDT:USDT': { - 'id': 'ETH_USDT', - 'symbol': 'ETH/USDT:USDT', - 'base': 'ETH', - 'quote': 'USDT', - 'settle': 'USDT', - 'baseId': 'ETH', - 'quoteId': 'USDT', - 'settleId': 'USDT', - 'type': 'swap', - 'spot': False, - 'margin': False, - 'swap': True, - 'future': True, # Binance mode ... - 'option': False, - 'contract': True, - 'linear': True, - 'inverse': False, - 'tierBased': False, - 'percentage': True, - 'taker': 0.0006, - 'maker': 0.0002, - 'contractSize': 10, - 'active': True, - 'expiry': None, - 'expiryDatetime': None, - 'strike': None, - 'optionType': None, - 'limits': { - 'leverage': { - 'min': 1, - 'max': 100 + "LTC/USDT": { + "id": "USDT-LTC", + "symbol": "LTC/USDT", + "base": "LTC", + "quote": "USDT", + "active": False, + "spot": True, + "future": True, + "swap": True, + "margin": True, + "linear": None, + "inverse": False, + "type": "spot", + "contractSize": None, + "taker": 0.0006, + "maker": 0.0002, + "precision": {"amount": 8, "price": 8}, + "limits": { + "amount": {"min": 0.06646786, "max": None}, + "price": {"min": 1e-08, "max": None}, + "leverage": { + "min": None, + "max": None, }, - 'amount': { - 'min': 1, - 'max': 300000 + "cost": { + "min": None, + "max": None, }, - 'price': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - } }, - 'precision': { - 'price': 0.05, - 'amount': 1 - }, - 'info': {} + "info": {}, }, - 'ADA/USDT:USDT': { - 'limits': { - 'leverage': { - 'min': 1, - 'max': 20, - }, - 'amount': { - 'min': 1, - 'max': 1000000, - }, - 'price': { - 'min': 0.52981, - 'max': 1.58943, - }, - 'cost': { - 'min': None, - 'max': None, - } + "XRP/USDT": { + "id": "xrpusdt", + "symbol": "XRP/USDT", + "base": "XRP", + "quote": "USDT", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "taker": 0.0006, + "maker": 0.0002, + "precision": { + "price": 8, + "amount": 8, + "cost": 8, }, - 'precision': { - 'amount': 1, - 'price': 0.00001 + "lot": 0.00000001, + "contractSize": None, + "limits": { + "amount": { + "min": 0.01, + "max": 1000, + }, + "price": { + "min": None, + "max": 500000, + }, + "cost": { + "min": 0.0001, + "max": 500000, + }, }, - 'tierBased': True, - 'percentage': True, - 'taker': 0.0000075, - 'maker': -0.0000025, - 'feeSide': 'get', - 'tiers': { - 'maker': [ - [0, 0.002], [1.5, 0.00185], - [3, 0.00175], [6, 0.00165], - [12.5, 0.00155], [25, 0.00145], - [75, 0.00135], [200, 0.00125], - [500, 0.00115], [1250, 0.00105], - [2500, 0.00095], [3000, 0.00085], - [6000, 0.00075], [11000, 0.00065], - [20000, 0.00055], [40000, 0.00055], - [75000, 0.00055] + "info": {}, + }, + "NEO/USDT": { + "id": "neousdt", + "symbol": "NEO/USDT", + "base": "NEO", + "quote": "USDT", + "settle": "", + "baseId": "NEO", + "quoteId": "USDT", + "settleId": "", + "type": "spot", + "spot": True, + "margin": True, + "swap": False, + "futures": False, + "option": False, + "active": True, + "contract": False, + "linear": None, + "inverse": None, + "taker": 0.0006, + "maker": 0.0002, + "contractSize": None, + "expiry": None, + "expiryDatetime": None, + "strike": None, + "optionType": None, + "tierBased": None, + "percentage": None, + "lot": 0.00000001, + "precision": { + "price": 8, + "amount": 8, + "cost": 8, + }, + "limits": { + "leverage": {"min": 1, "max": 10}, + "amount": { + "min": 0.01, + "max": 1000, + }, + "price": { + "min": None, + "max": 500000, + }, + "cost": { + "min": 0.0001, + "max": 500000, + }, + }, + "info": {}, + }, + "TKN/USDT": { + "id": "tknusdt", + "symbol": "TKN/USDT", + "base": "TKN", + "quote": "USDT", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "contractSize": None, + "taker": 0.0006, + "maker": 0.0002, + "precision": { + "price": 8, + "amount": 8, + "cost": 8, + }, + "lot": 0.00000001, + "limits": { + "amount": { + "min": 0.01, + "max": 100000000000, + }, + "price": {"min": None, "max": 500000}, + "cost": { + "min": 0.0001, + "max": 500000, + }, + "leverage": { + "min": None, + "max": None, + }, + }, + "info": {}, + }, + "LTC/USD": { + "id": "USD-LTC", + "symbol": "LTC/USD", + "base": "LTC", + "quote": "USD", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "contractSize": None, + "precision": {"amount": 8, "price": 8}, + "limits": { + "amount": {"min": 0.06646786, "max": None}, + "price": {"min": 1e-08, "max": None}, + "leverage": { + "min": None, + "max": None, + }, + "cost": { + "min": None, + "max": None, + }, + }, + "info": {}, + }, + "XLTCUSDT": { + "id": "xLTCUSDT", + "symbol": "XLTCUSDT", + "base": "LTC", + "quote": "USDT", + "active": True, + "spot": False, + "type": "swap", + "contractSize": 0.01, + "swap": False, + "linear": False, + "taker": 0.0006, + "maker": 0.0002, + "precision": {"amount": 8, "price": 8}, + "limits": { + "leverage": { + "min": None, + "max": None, + }, + "amount": {"min": 0.06646786, "max": None}, + "price": {"min": 1e-08, "max": None}, + "cost": { + "min": None, + "max": None, + }, + }, + "info": {}, + }, + "LTC/ETH": { + "id": "LTCETH", + "symbol": "LTC/ETH", + "base": "LTC", + "quote": "ETH", + "active": True, + "spot": True, + "swap": False, + "linear": None, + "type": "spot", + "contractSize": None, + "precision": {"base": 8, "quote": 8, "amount": 3, "price": 5}, + "limits": { + "leverage": { + "min": None, + "max": None, + }, + "amount": {"min": 0.001, "max": 10000000.0}, + "price": {"min": 1e-05, "max": 1000.0}, + "cost": {"min": 0.01, "max": None}, + }, + "info": {}, + }, + "ETH/USDT:USDT": { + "id": "ETH_USDT", + "symbol": "ETH/USDT:USDT", + "base": "ETH", + "quote": "USDT", + "settle": "USDT", + "baseId": "ETH", + "quoteId": "USDT", + "settleId": "USDT", + "type": "swap", + "spot": False, + "margin": False, + "swap": True, + "future": True, # Binance mode ... + "option": False, + "contract": True, + "linear": True, + "inverse": False, + "tierBased": False, + "percentage": True, + "taker": 0.0006, + "maker": 0.0002, + "contractSize": 10, + "active": True, + "expiry": None, + "expiryDatetime": None, + "strike": None, + "optionType": None, + "limits": { + "leverage": {"min": 1, "max": 100}, + "amount": {"min": 1, "max": 300000}, + "price": { + "min": None, + "max": None, + }, + "cost": { + "min": None, + "max": None, + }, + }, + "precision": {"price": 0.05, "amount": 1}, + "info": {}, + }, + "ADA/USDT:USDT": { + "limits": { + "leverage": { + "min": 1, + "max": 20, + }, + "amount": { + "min": 1, + "max": 1000000, + }, + "price": { + "min": 0.52981, + "max": 1.58943, + }, + "cost": { + "min": None, + "max": None, + }, + }, + "precision": {"amount": 1, "price": 0.00001}, + "tierBased": True, + "percentage": True, + "taker": 0.0000075, + "maker": -0.0000025, + "feeSide": "get", + "tiers": { + "maker": [ + [0, 0.002], + [1.5, 0.00185], + [3, 0.00175], + [6, 0.00165], + [12.5, 0.00155], + [25, 0.00145], + [75, 0.00135], + [200, 0.00125], + [500, 0.00115], + [1250, 0.00105], + [2500, 0.00095], + [3000, 0.00085], + [6000, 0.00075], + [11000, 0.00065], + [20000, 0.00055], + [40000, 0.00055], + [75000, 0.00055], + ], + "taker": [ + [0, 0.002], + [1.5, 0.00195], + [3, 0.00185], + [6, 0.00175], + [12.5, 0.00165], + [25, 0.00155], + [75, 0.00145], + [200, 0.00135], + [500, 0.00125], + [1250, 0.00115], + [2500, 0.00105], + [3000, 0.00095], + [6000, 0.00085], + [11000, 0.00075], + [20000, 0.00065], + [40000, 0.00065], + [75000, 0.00065], ], - 'taker': [ - [0, 0.002], [1.5, 0.00195], - [3, 0.00185], [6, 0.00175], - [12.5, 0.00165], [25, 0.00155], - [75, 0.00145], [200, 0.00135], - [500, 0.00125], [1250, 0.00115], - [2500, 0.00105], [3000, 0.00095], - [6000, 0.00085], [11000, 0.00075], - [20000, 0.00065], [40000, 0.00065], - [75000, 0.00065] - ] }, - 'id': 'ADA_USDT', - 'symbol': 'ADA/USDT:USDT', - 'base': 'ADA', - 'quote': 'USDT', - 'settle': 'USDT', - 'baseId': 'ADA', - 'quoteId': 'USDT', - 'settleId': 'usdt', - 'type': 'swap', - 'spot': False, - 'margin': False, - 'swap': True, - 'future': True, # Binance mode ... - 'option': False, - 'active': True, - 'contract': True, - 'linear': True, - 'inverse': False, - 'contractSize': 0.01, - 'expiry': None, - 'expiryDatetime': None, - 'strike': None, - 'optionType': None, - 'info': {} + "id": "ADA_USDT", + "symbol": "ADA/USDT:USDT", + "base": "ADA", + "quote": "USDT", + "settle": "USDT", + "baseId": "ADA", + "quoteId": "USDT", + "settleId": "usdt", + "type": "swap", + "spot": False, + "margin": False, + "swap": True, + "future": True, # Binance mode ... + "option": False, + "active": True, + "contract": True, + "linear": True, + "inverse": False, + "contractSize": 0.01, + "expiry": None, + "expiryDatetime": None, + "strike": None, + "optionType": None, + "info": {}, }, - 'SOL/BUSD:BUSD': { - 'limits': { - 'leverage': {'min': None, 'max': None}, - 'amount': {'min': 1, 'max': 1000000}, - 'price': {'min': 0.04, 'max': 100000}, - 'cost': {'min': 5, 'max': None}, - 'market': {'min': 1, 'max': 1500} + "SOL/BUSD:BUSD": { + "limits": { + "leverage": {"min": None, "max": None}, + "amount": {"min": 1, "max": 1000000}, + "price": {"min": 0.04, "max": 100000}, + "cost": {"min": 5, "max": None}, + "market": {"min": 1, "max": 1500}, }, - 'precision': {'amount': 0, 'price': 2, 'base': 8, 'quote': 8}, - 'tierBased': False, - 'percentage': True, - 'taker': 0.0004, - 'maker': 0.0002, - 'feeSide': 'get', - 'id': 'SOLBUSD', - 'lowercaseId': 'solbusd', - 'symbol': 'SOL/BUSD', - 'base': 'SOL', - 'quote': 'BUSD', - 'settle': 'BUSD', - 'baseId': 'SOL', - 'quoteId': 'BUSD', - 'settleId': 'BUSD', - 'type': 'future', - 'spot': False, - 'margin': False, - 'future': True, - 'delivery': False, - 'option': False, - 'active': True, - 'contract': True, - 'linear': True, - 'inverse': False, - 'contractSize': 1, - 'expiry': None, - 'expiryDatetime': None, - 'strike': None, - 'optionType': None, - 'info': { - 'symbol': 'SOLBUSD', - 'pair': 'SOLBUSD', - 'contractType': 'PERPETUAL', - 'deliveryDate': '4133404800000', - 'onboardDate': '1630566000000', - 'status': 'TRADING', - 'maintMarginPercent': '2.5000', - 'requiredMarginPercent': '5.0000', - 'baseAsset': 'SOL', - 'quoteAsset': 'BUSD', - 'marginAsset': 'BUSD', - 'pricePrecision': '4', - 'quantityPrecision': '0', - 'baseAssetPrecision': '8', - 'quotePrecision': '8', - 'underlyingType': 'COIN', - 'underlyingSubType': [], - 'settlePlan': '0', - 'triggerProtect': '0.0500', - 'liquidationFee': '0.005000', - 'marketTakeBound': '0.05', - 'filters': [ + "precision": {"amount": 0, "price": 2, "base": 8, "quote": 8}, + "tierBased": False, + "percentage": True, + "taker": 0.0004, + "maker": 0.0002, + "feeSide": "get", + "id": "SOLBUSD", + "lowercaseId": "solbusd", + "symbol": "SOL/BUSD", + "base": "SOL", + "quote": "BUSD", + "settle": "BUSD", + "baseId": "SOL", + "quoteId": "BUSD", + "settleId": "BUSD", + "type": "future", + "spot": False, + "margin": False, + "future": True, + "delivery": False, + "option": False, + "active": True, + "contract": True, + "linear": True, + "inverse": False, + "contractSize": 1, + "expiry": None, + "expiryDatetime": None, + "strike": None, + "optionType": None, + "info": { + "symbol": "SOLBUSD", + "pair": "SOLBUSD", + "contractType": "PERPETUAL", + "deliveryDate": "4133404800000", + "onboardDate": "1630566000000", + "status": "TRADING", + "maintMarginPercent": "2.5000", + "requiredMarginPercent": "5.0000", + "baseAsset": "SOL", + "quoteAsset": "BUSD", + "marginAsset": "BUSD", + "pricePrecision": "4", + "quantityPrecision": "0", + "baseAssetPrecision": "8", + "quotePrecision": "8", + "underlyingType": "COIN", + "underlyingSubType": [], + "settlePlan": "0", + "triggerProtect": "0.0500", + "liquidationFee": "0.005000", + "marketTakeBound": "0.05", + "filters": [ { - 'minPrice': '0.0400', - 'maxPrice': '100000', - 'filterType': 'PRICE_FILTER', - 'tickSize': '0.0100' + "minPrice": "0.0400", + "maxPrice": "100000", + "filterType": "PRICE_FILTER", + "tickSize": "0.0100", }, + {"stepSize": "1", "filterType": "LOT_SIZE", "maxQty": "1000000", "minQty": "1"}, { - 'stepSize': '1', - 'filterType': 'LOT_SIZE', - 'maxQty': '1000000', - 'minQty': '1' + "stepSize": "1", + "filterType": "MARKET_LOT_SIZE", + "maxQty": "1500", + "minQty": "1", }, + {"limit": "200", "filterType": "MAX_NUM_ORDERS"}, + {"limit": "10", "filterType": "MAX_NUM_ALGO_ORDERS"}, + {"notional": "5", "filterType": "MIN_NOTIONAL"}, { - 'stepSize': '1', - 'filterType': 'MARKET_LOT_SIZE', - 'maxQty': '1500', - 'minQty': '1' + "multiplierDown": "0.9500", + "multiplierUp": "1.0500", + "multiplierDecimal": "4", + "filterType": "PERCENT_PRICE", }, - {'limit': '200', 'filterType': 'MAX_NUM_ORDERS'}, - {'limit': '10', 'filterType': 'MAX_NUM_ALGO_ORDERS'}, - {'notional': '5', 'filterType': 'MIN_NOTIONAL'}, - { - 'multiplierDown': '0.9500', - 'multiplierUp': '1.0500', - 'multiplierDecimal': '4', - 'filterType': 'PERCENT_PRICE' - } ], - 'orderTypes': [ - 'LIMIT', - 'MARKET', - 'STOP', - 'STOP_MARKET', - 'TAKE_PROFIT', - 'TAKE_PROFIT_MARKET', - 'TRAILING_STOP_MARKET' + "orderTypes": [ + "LIMIT", + "MARKET", + "STOP", + "STOP_MARKET", + "TAKE_PROFIT", + "TAKE_PROFIT_MARKET", + "TRAILING_STOP_MARKET", ], - 'timeInForce': ['GTC', 'IOC', 'FOK', 'GTX'] - } + "timeInForce": ["GTC", "IOC", "FOK", "GTX"], + }, }, } @@ -1573,10 +1528,22 @@ def markets_static(): # These markets are used in some tests that would need adaptation should anything change in # market list. Do not modify this list without a good reason! Do not modify market parameters # of listed pairs in get_markets() without a good reason either! - static_markets = ['BLK/BTC', 'BTT/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', - 'LTC/USDT', 'NEO/BTC', 'TKN/BTC', 'XLTCUSDT', 'XRP/BTC', - 'ADA/USDT:USDT', 'ETH/USDT:USDT', - ] + static_markets = [ + "BLK/BTC", + "BTT/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "LTC/USDT", + "NEO/BTC", + "TKN/BTC", + "XLTCUSDT", + "XRP/BTC", + "ADA/USDT:USDT", + "ETH/USDT:USDT", + ] all_markets = get_markets() return {m: all_markets[m] for m in static_markets} @@ -1587,191 +1554,150 @@ def shitcoinmarkets(markets_static): Fixture with shitcoin markets - used to test filters in pairlists """ shitmarkets = deepcopy(markets_static) - shitmarkets.update({ - 'HOT/BTC': { - 'id': 'HOTBTC', - 'symbol': 'HOT/BTC', - 'base': 'HOT', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'type': 'spot', - 'precision': { - 'base': 8, - 'quote': 8, - 'amount': 0, - 'price': 8 + shitmarkets.update( + { + "HOT/BTC": { + "id": "HOTBTC", + "symbol": "HOT/BTC", + "base": "HOT", + "quote": "BTC", + "active": True, + "spot": True, + "type": "spot", + "precision": {"base": 8, "quote": 8, "amount": 0, "price": 8}, + "limits": { + "amount": {"min": 1.0, "max": 90000000.0}, + "price": {"min": None, "max": None}, + "cost": {"min": 0.001, "max": None}, + }, + "info": {}, }, - 'limits': { - 'amount': { - 'min': 1.0, - 'max': 90000000.0 + "FUEL/BTC": { + "id": "FUELBTC", + "symbol": "FUEL/BTC", + "base": "FUEL", + "quote": "BTC", + "active": True, + "spot": True, + "type": "spot", + "precision": {"base": 8, "quote": 8, "amount": 0, "price": 8}, + "limits": { + "amount": {"min": 1.0, "max": 90000000.0}, + "price": {"min": 1e-08, "max": 1000.0}, + "cost": {"min": 0.001, "max": None}, }, - 'price': { - 'min': None, - 'max': None - }, - 'cost': { - 'min': 0.001, - 'max': None - } + "info": {}, }, - 'info': {}, - }, - 'FUEL/BTC': { - 'id': 'FUELBTC', - 'symbol': 'FUEL/BTC', - 'base': 'FUEL', - 'quote': 'BTC', - 'active': True, - 'spot': True, - 'type': 'spot', - 'precision': { - 'base': 8, - 'quote': 8, - 'amount': 0, - 'price': 8 + "NANO/USDT": { + "percentage": True, + "tierBased": False, + "taker": 0.001, + "maker": 0.001, + "precision": {"base": 8, "quote": 8, "amount": 2, "price": 4}, + "limits": { + "leverage": { + "min": None, + "max": None, + }, + "amount": { + "min": None, + "max": None, + }, + "price": { + "min": None, + "max": None, + }, + "cost": { + "min": None, + "max": None, + }, + }, + "id": "NANOUSDT", + "symbol": "NANO/USDT", + "base": "NANO", + "quote": "USDT", + "baseId": "NANO", + "quoteId": "USDT", + "info": {}, + "type": "spot", + "spot": True, + "future": False, + "active": True, }, - 'limits': { - 'amount': { - 'min': 1.0, - 'max': 90000000.0 + "ADAHALF/USDT": { + "percentage": True, + "tierBased": False, + "taker": 0.001, + "maker": 0.001, + "precision": {"base": 8, "quote": 8, "amount": 2, "price": 4}, + "limits": { + "leverage": { + "min": None, + "max": None, + }, + "amount": { + "min": None, + "max": None, + }, + "price": { + "min": None, + "max": None, + }, + "cost": { + "min": None, + "max": None, + }, }, - 'price': { - 'min': 1e-08, - 'max': 1000.0 - }, - 'cost': { - 'min': 0.001, - 'max': None - } + "id": "ADAHALFUSDT", + "symbol": "ADAHALF/USDT", + "base": "ADAHALF", + "quote": "USDT", + "baseId": "ADAHALF", + "quoteId": "USDT", + "info": {}, + "type": "spot", + "spot": True, + "future": False, + "active": True, }, - 'info': {}, - }, - 'NANO/USDT': { - "percentage": True, - "tierBased": False, - "taker": 0.001, - "maker": 0.001, - "precision": { - "base": 8, - "quote": 8, - "amount": 2, - "price": 4 + "ADADOUBLE/USDT": { + "percentage": True, + "tierBased": False, + "taker": 0.001, + "maker": 0.001, + "precision": {"base": 8, "quote": 8, "amount": 2, "price": 4}, + "limits": { + "leverage": { + "min": None, + "max": None, + }, + "amount": { + "min": None, + "max": None, + }, + "price": { + "min": None, + "max": None, + }, + "cost": { + "min": None, + "max": None, + }, + }, + "id": "ADADOUBLEUSDT", + "symbol": "ADADOUBLE/USDT", + "base": "ADADOUBLE", + "quote": "USDT", + "baseId": "ADADOUBLE", + "quoteId": "USDT", + "info": {}, + "type": "spot", + "spot": True, + "future": False, + "active": True, }, - "limits": { - 'leverage': { - 'min': None, - 'max': None, - }, - 'amount': { - 'min': None, - 'max': None, - }, - 'price': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - "id": "NANOUSDT", - "symbol": "NANO/USDT", - "base": "NANO", - "quote": "USDT", - "baseId": "NANO", - "quoteId": "USDT", - "info": {}, - "type": "spot", - "spot": True, - "future": False, - "active": True - }, - 'ADAHALF/USDT': { - "percentage": True, - "tierBased": False, - "taker": 0.001, - "maker": 0.001, - "precision": { - "base": 8, - "quote": 8, - "amount": 2, - "price": 4 - }, - "limits": { - 'leverage': { - 'min': None, - 'max': None, - }, - 'amount': { - 'min': None, - 'max': None, - }, - 'price': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - "id": "ADAHALFUSDT", - "symbol": "ADAHALF/USDT", - "base": "ADAHALF", - "quote": "USDT", - "baseId": "ADAHALF", - "quoteId": "USDT", - "info": {}, - "type": "spot", - "spot": True, - "future": False, - "active": True - }, - 'ADADOUBLE/USDT': { - "percentage": True, - "tierBased": False, - "taker": 0.001, - "maker": 0.001, - "precision": { - "base": 8, - "quote": 8, - "amount": 2, - "price": 4 - }, - "limits": { - 'leverage': { - 'min': None, - 'max': None, - }, - 'amount': { - 'min': None, - 'max': None, - }, - 'price': { - 'min': None, - 'max': None, - }, - 'cost': { - 'min': None, - 'max': None, - }, - }, - "id": "ADADOUBLEUSDT", - "symbol": "ADADOUBLE/USDT", - "base": "ADADOUBLE", - "quote": "USDT", - "baseId": "ADADOUBLE", - "quoteId": "USDT", - "info": {}, - "type": "spot", - "spot": True, - "future": False, - "active": True - }, - }) + } + ) return shitmarkets @@ -1780,257 +1706,261 @@ def markets_empty(): return MagicMock(return_value=[]) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_buy_order_open(): return { - 'id': 'mocked_limit_buy', - 'type': 'limit', - 'side': 'buy', - 'symbol': 'mocked', - 'timestamp': dt_ts(), - 'datetime': dt_now().isoformat(), - 'price': 0.00001099, - 'average': 0.00001099, - 'amount': 90.99181073, - 'filled': 0.0, - 'cost': 0.0009999, - 'remaining': 90.99181073, - 'status': 'open' + "id": "mocked_limit_buy", + "type": "limit", + "side": "buy", + "symbol": "mocked", + "timestamp": dt_ts(), + "datetime": dt_now().isoformat(), + "price": 0.00001099, + "average": 0.00001099, + "amount": 90.99181073, + "filled": 0.0, + "cost": 0.0009999, + "remaining": 90.99181073, + "status": "open", } -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_buy_order(limit_buy_order_open): order = deepcopy(limit_buy_order_open) - order['status'] = 'closed' - order['filled'] = order['amount'] - order['remaining'] = 0.0 + order["status"] = "closed" + order["filled"] = order["amount"] + order["remaining"] = 0.0 return order @pytest.fixture def limit_buy_order_old(): return { - 'id': 'mocked_limit_buy_old', - 'type': 'limit', - 'side': 'buy', - 'symbol': 'mocked', - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'price': 0.00001099, - 'amount': 90.99181073, - 'filled': 0.0, - 'remaining': 90.99181073, - 'status': 'open' + "id": "mocked_limit_buy_old", + "type": "limit", + "side": "buy", + "symbol": "mocked", + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "price": 0.00001099, + "amount": 90.99181073, + "filled": 0.0, + "remaining": 90.99181073, + "status": "open", } @pytest.fixture def limit_sell_order_old(): return { - 'id': 'mocked_limit_sell_old', - 'type': 'limit', - 'side': 'sell', - 'symbol': 'ETH/BTC', - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'price': 0.00001099, - 'amount': 90.99181073, - 'filled': 0.0, - 'remaining': 90.99181073, - 'status': 'open' + "id": "mocked_limit_sell_old", + "type": "limit", + "side": "sell", + "symbol": "ETH/BTC", + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "price": 0.00001099, + "amount": 90.99181073, + "filled": 0.0, + "remaining": 90.99181073, + "status": "open", } @pytest.fixture def limit_buy_order_old_partial(): return { - 'id': 'mocked_limit_buy_old_partial', - 'type': 'limit', - 'side': 'buy', - 'symbol': 'ETH/BTC', - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'price': 0.00001099, - 'amount': 90.99181073, - 'filled': 23.0, - 'cost': 90.99181073 * 23.0, - 'remaining': 67.99181073, - 'status': 'open' + "id": "mocked_limit_buy_old_partial", + "type": "limit", + "side": "buy", + "symbol": "ETH/BTC", + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "price": 0.00001099, + "amount": 90.99181073, + "filled": 23.0, + "cost": 90.99181073 * 23.0, + "remaining": 67.99181073, + "status": "open", } @pytest.fixture def limit_buy_order_old_partial_canceled(limit_buy_order_old_partial): res = deepcopy(limit_buy_order_old_partial) - res['status'] = 'canceled' - res['fee'] = {'cost': 0.023, 'currency': 'ETH'} + res["status"] = "canceled" + res["fee"] = {"cost": 0.023, "currency": "ETH"} return res -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_buy_order_canceled_empty(request): # Indirect fixture # Documentation: # https://docs.pytest.org/en/latest/example/parametrize.html#apply-indirect-on-particular-arguments exchange_name = request.param - if exchange_name == 'kraken': + if exchange_name == "kraken": return { - 'info': {}, - 'id': 'AZNPFF-4AC4N-7MKTAT', - 'clientOrderId': None, - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'lastTradeTimestamp': None, - 'status': 'canceled', - 'symbol': 'LTC/USDT', - 'type': 'limit', - 'side': 'buy', - 'price': 34.3225, - 'cost': 0.0, - 'amount': 0.55, - 'filled': 0.0, - 'average': 0.0, - 'remaining': 0.55, - 'fee': {'cost': 0.0, 'rate': None, 'currency': 'USDT'}, - 'trades': [] + "info": {}, + "id": "AZNPFF-4AC4N-7MKTAT", + "clientOrderId": None, + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "lastTradeTimestamp": None, + "status": "canceled", + "symbol": "LTC/USDT", + "type": "limit", + "side": "buy", + "price": 34.3225, + "cost": 0.0, + "amount": 0.55, + "filled": 0.0, + "average": 0.0, + "remaining": 0.55, + "fee": {"cost": 0.0, "rate": None, "currency": "USDT"}, + "trades": [], } - elif exchange_name == 'binance': + elif exchange_name == "binance": return { - 'info': {}, - 'id': '1234512345', - 'clientOrderId': 'alb1234123', - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'lastTradeTimestamp': None, - 'symbol': 'LTC/USDT', - 'type': 'limit', - 'side': 'buy', - 'price': 0.016804, - 'amount': 0.55, - 'cost': 0.0, - 'average': None, - 'filled': 0.0, - 'remaining': 0.55, - 'status': 'canceled', - 'fee': None, - 'trades': None + "info": {}, + "id": "1234512345", + "clientOrderId": "alb1234123", + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "lastTradeTimestamp": None, + "symbol": "LTC/USDT", + "type": "limit", + "side": "buy", + "price": 0.016804, + "amount": 0.55, + "cost": 0.0, + "average": None, + "filled": 0.0, + "remaining": 0.55, + "status": "canceled", + "fee": None, + "trades": None, } else: return { - 'info': {}, - 'id': '1234512345', - 'clientOrderId': 'alb1234123', - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'lastTradeTimestamp': None, - 'symbol': 'LTC/USDT', - 'type': 'limit', - 'side': 'buy', - 'price': 0.016804, - 'amount': 0.55, - 'cost': 0.0, - 'average': None, - 'filled': 0.0, - 'remaining': 0.55, - 'status': 'canceled', - 'fee': None, - 'trades': None + "info": {}, + "id": "1234512345", + "clientOrderId": "alb1234123", + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "lastTradeTimestamp": None, + "symbol": "LTC/USDT", + "type": "limit", + "side": "buy", + "price": 0.016804, + "amount": 0.55, + "cost": 0.0, + "average": None, + "filled": 0.0, + "remaining": 0.55, + "status": "canceled", + "fee": None, + "trades": None, } @pytest.fixture def limit_sell_order_open(): return { - 'id': 'mocked_limit_sell', - 'type': 'limit', - 'side': 'sell', - 'symbol': 'mocked', - 'datetime': dt_now().isoformat(), - 'timestamp': dt_ts(), - 'price': 0.00001173, - 'amount': 90.99181073, - 'filled': 0.0, - 'remaining': 90.99181073, - 'status': 'open' + "id": "mocked_limit_sell", + "type": "limit", + "side": "sell", + "symbol": "mocked", + "datetime": dt_now().isoformat(), + "timestamp": dt_ts(), + "price": 0.00001173, + "amount": 90.99181073, + "filled": 0.0, + "remaining": 90.99181073, + "status": "open", } @pytest.fixture def limit_sell_order(limit_sell_order_open): order = deepcopy(limit_sell_order_open) - order['remaining'] = 0.0 - order['filled'] = order['amount'] - order['status'] = 'closed' + order["remaining"] = 0.0 + order["filled"] = order["amount"] + order["status"] = "closed" return order @pytest.fixture def order_book_l2(): - return MagicMock(return_value={ - 'bids': [ - [0.043936, 10.442], - [0.043935, 31.865], - [0.043933, 11.212], - [0.043928, 0.088], - [0.043925, 10.0], - [0.043921, 10.0], - [0.04392, 37.64], - [0.043899, 0.066], - [0.043885, 0.676], - [0.04387, 22.758] - ], - 'asks': [ - [0.043949, 0.346], - [0.04395, 0.608], - [0.043951, 3.948], - [0.043954, 0.288], - [0.043958, 9.277], - [0.043995, 1.566], - [0.044, 0.588], - [0.044002, 0.992], - [0.044003, 0.095], - [0.04402, 37.64] - ], - 'timestamp': None, - 'datetime': None, - 'nonce': 288004540 - }) + return MagicMock( + return_value={ + "bids": [ + [0.043936, 10.442], + [0.043935, 31.865], + [0.043933, 11.212], + [0.043928, 0.088], + [0.043925, 10.0], + [0.043921, 10.0], + [0.04392, 37.64], + [0.043899, 0.066], + [0.043885, 0.676], + [0.04387, 22.758], + ], + "asks": [ + [0.043949, 0.346], + [0.04395, 0.608], + [0.043951, 3.948], + [0.043954, 0.288], + [0.043958, 9.277], + [0.043995, 1.566], + [0.044, 0.588], + [0.044002, 0.992], + [0.044003, 0.095], + [0.04402, 37.64], + ], + "timestamp": None, + "datetime": None, + "nonce": 288004540, + } + ) @pytest.fixture def order_book_l2_usd(): - return MagicMock(return_value={ - 'symbol': 'LTC/USDT', - 'bids': [ - [25.563, 49.269], - [25.562, 83.0], - [25.56, 106.0], - [25.559, 15.381], - [25.558, 29.299], - [25.557, 34.624], - [25.556, 10.0], - [25.555, 14.684], - [25.554, 45.91], - [25.553, 50.0] - ], - 'asks': [ - [25.566, 14.27], - [25.567, 48.484], - [25.568, 92.349], - [25.572, 31.48], - [25.573, 23.0], - [25.574, 20.0], - [25.575, 89.606], - [25.576, 262.016], - [25.577, 178.557], - [25.578, 78.614] - ], - 'timestamp': None, - 'datetime': None, - 'nonce': 2372149736 - }) + return MagicMock( + return_value={ + "symbol": "LTC/USDT", + "bids": [ + [25.563, 49.269], + [25.562, 83.0], + [25.56, 106.0], + [25.559, 15.381], + [25.558, 29.299], + [25.557, 34.624], + [25.556, 10.0], + [25.555, 14.684], + [25.554, 45.91], + [25.553, 50.0], + ], + "asks": [ + [25.566, 14.27], + [25.567, 48.484], + [25.568, 92.349], + [25.572, 31.48], + [25.573, 23.0], + [25.574, 20.0], + [25.575, 89.606], + [25.576, 262.016], + [25.577, 178.557], + [25.578, 78.614], + ], + "timestamp": None, + "datetime": None, + "nonce": 2372149736, + } + ) @pytest.fixture @@ -2038,11 +1968,11 @@ def ohlcv_history_list(): return [ [ 1511686200000, # unix timestamp ms - 8.794e-05, # open - 8.948e-05, # high - 8.794e-05, # low - 8.88e-05, # close - 0.0877869, # volume (in quote currency) + 8.794e-05, # open + 8.948e-05, # high + 8.794e-05, # low + 8.88e-05, # close + 0.0877869, # volume (in quote currency) ], [ 1511686500000, @@ -2052,557 +1982,581 @@ def ohlcv_history_list(): 8.893e-05, 0.05874751, ], - [ - 1511686800000, - 8.891e-05, - 8.893e-05, - 8.875e-05, - 8.877e-05, - 0.7039405 - ] + [1511686800000, 8.891e-05, 8.893e-05, 8.875e-05, 8.877e-05, 0.7039405], ] @pytest.fixture def ohlcv_history(ohlcv_history_list): - return ohlcv_to_dataframe(ohlcv_history_list, "5m", pair="UNITTEST/BTC", - fill_missing=True, drop_incomplete=False) + return ohlcv_to_dataframe( + ohlcv_history_list, "5m", pair="UNITTEST/BTC", fill_missing=True, drop_incomplete=False + ) @pytest.fixture def tickers(): - return MagicMock(return_value={ - 'ETH/BTC': { - 'symbol': 'ETH/BTC', - 'timestamp': 1522014806207, - 'datetime': '2018-03-25T21:53:26.207Z', - 'high': 0.061697, - 'low': 0.060531, - 'bid': 0.061588, - 'bidVolume': 3.321, - 'ask': 0.061655, - 'askVolume': 0.212, - 'vwap': 0.06105296, - 'open': 0.060809, - 'close': 0.060761, - 'first': None, - 'last': 0.061588, - 'change': 1.281, - 'percentage': None, - 'average': None, - 'baseVolume': 111649.001, - 'quoteVolume': 6816.50176926, - 'info': {} - }, - 'TKN/BTC': { - 'symbol': 'TKN/BTC', - 'timestamp': 1522014806169, - 'datetime': '2018-03-25T21:53:26.169Z', - 'high': 0.01885, - 'low': 0.018497, - 'bid': 0.018799, - 'bidVolume': 8.38, - 'ask': 0.018802, - 'askVolume': 15.0, - 'vwap': 0.01869197, - 'open': 0.018585, - 'close': 0.018573, - 'last': 0.018799, - 'baseVolume': 81058.66, - 'quoteVolume': 2247.48374509, - }, - 'BLK/BTC': { - 'symbol': 'BLK/BTC', - 'timestamp': 1522014806072, - 'datetime': '2018-03-25T21:53:26.072Z', - 'high': 0.007745, - 'low': 0.007512, - 'bid': 0.007729, - 'bidVolume': 0.01, - 'ask': 0.007743, - 'askVolume': 21.37, - 'vwap': 0.00761466, - 'open': 0.007653, - 'close': 0.007652, - 'first': None, - 'last': 0.007743, - 'change': 1.176, - 'percentage': None, - 'average': None, - 'baseVolume': 295152.26, - 'quoteVolume': 1515.14631229, - 'info': {} - }, - 'LTC/BTC': { - 'symbol': 'LTC/BTC', - 'timestamp': 1523787258992, - 'datetime': '2018-04-15T10:14:19.992Z', - 'high': 0.015978, - 'low': 0.0157, - 'bid': 0.015954, - 'bidVolume': 12.83, - 'ask': 0.015957, - 'askVolume': 0.49, - 'vwap': 0.01581636, - 'open': 0.015823, - 'close': 0.01582, - 'first': None, - 'last': 0.015951, - 'change': 0.809, - 'percentage': None, - 'average': None, - 'baseVolume': 88620.68, - 'quoteVolume': 1401.65697943, - 'info': {} - }, - 'BTT/BTC': { - 'symbol': 'BTT/BTC', - 'timestamp': 1550936557206, - 'datetime': '2019-02-23T15:42:37.206Z', - 'high': 0.00000026, - 'low': 0.00000024, - 'bid': 0.00000024, - 'bidVolume': 2446894197.0, - 'ask': 0.00000025, - 'askVolume': 2447913837.0, - 'vwap': 0.00000025, - 'open': 0.00000026, - 'close': 0.00000024, - 'last': 0.00000024, - 'previousClose': 0.00000026, - 'change': -0.00000002, - 'percentage': -7.692, - 'average': None, - 'baseVolume': 4886464537.0, - 'quoteVolume': 1215.14489611, - 'info': {} - }, - 'HOT/BTC': { - 'symbol': 'HOT/BTC', - 'timestamp': 1572273518661, - 'datetime': '2019-10-28T14:38:38.661Z', - 'high': 0.00000011, - 'low': 0.00000009, - 'bid': 0.0000001, - 'bidVolume': 1476027288.0, - 'ask': 0.00000011, - 'askVolume': 820153831.0, - 'vwap': 0.0000001, - 'open': 0.00000009, - 'close': 0.00000011, - 'last': 0.00000011, - 'previousClose': 0.00000009, - 'change': 0.00000002, - 'percentage': 22.222, - 'average': None, - 'baseVolume': 1442290324.0, - 'quoteVolume': 143.78311994, - 'info': {} - }, - 'FUEL/BTC': { - 'symbol': 'FUEL/BTC', - 'timestamp': 1572340250771, - 'datetime': '2019-10-29T09:10:50.771Z', - 'high': 0.00000040, - 'low': 0.00000035, - 'bid': 0.00000036, - 'bidVolume': 8932318.0, - 'ask': 0.00000037, - 'askVolume': 10140774.0, - 'vwap': 0.00000037, - 'open': 0.00000039, - 'close': 0.00000037, - 'last': 0.00000037, - 'previousClose': 0.00000038, - 'change': -0.00000002, - 'percentage': -5.128, - 'average': None, - 'baseVolume': 168927742.0, - 'quoteVolume': 62.68220262, - 'info': {} - }, - 'BTC/USDT': { - 'symbol': 'BTC/USDT', - 'timestamp': 1573758371399, - 'datetime': '2019-11-14T19:06:11.399Z', - 'high': 8800.0, - 'low': 8582.6, - 'bid': 8648.16, - 'bidVolume': 0.238771, - 'ask': 8648.72, - 'askVolume': 0.016253, - 'vwap': 8683.13647806, - 'open': 8759.7, - 'close': 8648.72, - 'last': 8648.72, - 'previousClose': 8759.67, - 'change': -110.98, - 'percentage': -1.267, - 'average': None, - 'baseVolume': 35025.943355, - 'quoteVolume': 304135046.4242901, - 'info': {} - }, - 'ETH/USDT': { - 'symbol': 'ETH/USDT', - 'timestamp': 1522014804118, - 'datetime': '2018-03-25T21:53:24.118Z', - 'high': 530.88, - 'low': 512.0, - 'bid': 529.73, - 'bidVolume': 0.2, - 'ask': 530.21, - 'askVolume': 0.2464, - 'vwap': 521.02438405, - 'open': 527.27, - 'close': 528.42, - 'first': None, - 'last': 530.21, - 'change': 0.558, - 'percentage': None, - 'average': None, - 'baseVolume': 72300.0659, - 'quoteVolume': 37670097.3022171, - 'info': {} - }, - 'TKN/USDT': { - 'symbol': 'TKN/USDT', - 'timestamp': 1522014806198, - 'datetime': '2018-03-25T21:53:26.198Z', - 'high': 8718.0, - 'low': 8365.77, - 'bid': 8603.64, - 'bidVolume': 0.15846, - 'ask': 8603.67, - 'askVolume': 0.069147, - 'vwap': 8536.35621697, - 'open': 8680.0, - 'close': 8680.0, - 'first': None, - 'last': 8603.67, - 'change': -0.879, - 'percentage': None, - 'average': None, - 'baseVolume': 30414.604298, - 'quoteVolume': 259629896.48584127, - 'info': {} - }, - 'BLK/USDT': { - 'symbol': 'BLK/USDT', - 'timestamp': 1522014806145, - 'datetime': '2018-03-25T21:53:26.145Z', - 'high': 66.95, - 'low': 63.38, - 'bid': 66.473, - 'bidVolume': 4.968, - 'ask': 66.54, - 'askVolume': 2.704, - 'vwap': 65.0526901, - 'open': 66.43, - 'close': 66.383, - 'first': None, - 'last': 66.5, - 'change': 0.105, - 'percentage': None, - 'average': None, - 'baseVolume': 294106.204, - 'quoteVolume': 19132399.743954, - 'info': {} - }, - 'LTC/USDT': { - 'symbol': 'LTC/USDT', - 'timestamp': 1523787257812, - 'datetime': '2018-04-15T10:14:18.812Z', - 'high': 129.94, - 'low': 124.0, - 'bid': 129.28, - 'bidVolume': 0.03201, - 'ask': 129.52, - 'askVolume': 0.14529, - 'vwap': 126.92838682, - 'open': 127.0, - 'close': 127.1, - 'first': None, - 'last': 129.28, - 'change': 1.795, - 'percentage': None, - 'average': None, - 'baseVolume': 59698.79897, - 'quoteVolume': 29132399.743954, - 'info': {} - }, - 'XRP/BTC': { - 'symbol': 'XRP/BTC', - 'timestamp': 1573758257534, - 'datetime': '2019-11-14T19:04:17.534Z', - 'high': 3.126e-05, - 'low': 3.061e-05, - 'bid': 3.093e-05, - 'bidVolume': 27901.0, - 'ask': 3.095e-05, - 'askVolume': 10551.0, - 'vwap': 3.091e-05, - 'open': 3.119e-05, - 'close': 3.094e-05, - 'last': 3.094e-05, - 'previousClose': 3.117e-05, - 'change': -2.5e-07, - 'percentage': -0.802, - 'average': None, - 'baseVolume': 37334921.0, - 'quoteVolume': 1154.19266394, - 'info': {} - }, - "NANO/USDT": { - "symbol": "NANO/USDT", - "timestamp": 1580469388244, - "datetime": "2020-01-31T11:16:28.244Z", - "high": 0.7519, - "low": 0.7154, - "bid": 0.7305, - "bidVolume": 300.3, - "ask": 0.7342, - "askVolume": 15.14, - "vwap": 0.73645591, - "open": 0.7154, - "close": 0.7342, - "last": 0.7342, - "previousClose": 0.7189, - "change": 0.0188, - "percentage": 2.628, - "average": None, - "baseVolume": 439472.44, - "quoteVolume": 323652.075405, - "info": {} - }, - # Example of leveraged pair with incomplete info - "ADAHALF/USDT": { - "symbol": "ADAHALF/USDT", - "timestamp": 1580469388244, - "datetime": "2020-01-31T11:16:28.244Z", - "high": None, - "low": None, - "bid": 0.7305, - "bidVolume": None, - "ask": 0.7342, - "askVolume": None, - "vwap": None, - "open": None, - "close": None, - "last": None, - "previousClose": None, - "change": None, - "percentage": 2.628, - "average": None, - "baseVolume": 0.0, - "quoteVolume": 0.0, - "info": {} - }, - "ADADOUBLE/USDT": { - "symbol": "ADADOUBLE/USDT", - "timestamp": 1580469388244, - "datetime": "2020-01-31T11:16:28.244Z", - "high": None, - "low": None, - "bid": 0.7305, - "bidVolume": None, - "ask": 0.7342, - "askVolume": None, - "vwap": None, - "open": None, - "close": None, - "last": 0, - "previousClose": None, - "change": None, - "percentage": 2.628, - "average": None, - "baseVolume": 0.0, - "quoteVolume": 0.0, - "info": {} - }, - }) + return MagicMock( + return_value={ + "ETH/BTC": { + "symbol": "ETH/BTC", + "timestamp": 1522014806207, + "datetime": "2018-03-25T21:53:26.207Z", + "high": 0.061697, + "low": 0.060531, + "bid": 0.061588, + "bidVolume": 3.321, + "ask": 0.061655, + "askVolume": 0.212, + "vwap": 0.06105296, + "open": 0.060809, + "close": 0.060761, + "first": None, + "last": 0.061588, + "change": 1.281, + "percentage": None, + "average": None, + "baseVolume": 111649.001, + "quoteVolume": 6816.50176926, + "info": {}, + }, + "TKN/BTC": { + "symbol": "TKN/BTC", + "timestamp": 1522014806169, + "datetime": "2018-03-25T21:53:26.169Z", + "high": 0.01885, + "low": 0.018497, + "bid": 0.018799, + "bidVolume": 8.38, + "ask": 0.018802, + "askVolume": 15.0, + "vwap": 0.01869197, + "open": 0.018585, + "close": 0.018573, + "last": 0.018799, + "baseVolume": 81058.66, + "quoteVolume": 2247.48374509, + }, + "BLK/BTC": { + "symbol": "BLK/BTC", + "timestamp": 1522014806072, + "datetime": "2018-03-25T21:53:26.072Z", + "high": 0.007745, + "low": 0.007512, + "bid": 0.007729, + "bidVolume": 0.01, + "ask": 0.007743, + "askVolume": 21.37, + "vwap": 0.00761466, + "open": 0.007653, + "close": 0.007652, + "first": None, + "last": 0.007743, + "change": 1.176, + "percentage": None, + "average": None, + "baseVolume": 295152.26, + "quoteVolume": 1515.14631229, + "info": {}, + }, + "LTC/BTC": { + "symbol": "LTC/BTC", + "timestamp": 1523787258992, + "datetime": "2018-04-15T10:14:19.992Z", + "high": 0.015978, + "low": 0.0157, + "bid": 0.015954, + "bidVolume": 12.83, + "ask": 0.015957, + "askVolume": 0.49, + "vwap": 0.01581636, + "open": 0.015823, + "close": 0.01582, + "first": None, + "last": 0.015951, + "change": 0.809, + "percentage": None, + "average": None, + "baseVolume": 88620.68, + "quoteVolume": 1401.65697943, + "info": {}, + }, + "BTT/BTC": { + "symbol": "BTT/BTC", + "timestamp": 1550936557206, + "datetime": "2019-02-23T15:42:37.206Z", + "high": 0.00000026, + "low": 0.00000024, + "bid": 0.00000024, + "bidVolume": 2446894197.0, + "ask": 0.00000025, + "askVolume": 2447913837.0, + "vwap": 0.00000025, + "open": 0.00000026, + "close": 0.00000024, + "last": 0.00000024, + "previousClose": 0.00000026, + "change": -0.00000002, + "percentage": -7.692, + "average": None, + "baseVolume": 4886464537.0, + "quoteVolume": 1215.14489611, + "info": {}, + }, + "HOT/BTC": { + "symbol": "HOT/BTC", + "timestamp": 1572273518661, + "datetime": "2019-10-28T14:38:38.661Z", + "high": 0.00000011, + "low": 0.00000009, + "bid": 0.0000001, + "bidVolume": 1476027288.0, + "ask": 0.00000011, + "askVolume": 820153831.0, + "vwap": 0.0000001, + "open": 0.00000009, + "close": 0.00000011, + "last": 0.00000011, + "previousClose": 0.00000009, + "change": 0.00000002, + "percentage": 22.222, + "average": None, + "baseVolume": 1442290324.0, + "quoteVolume": 143.78311994, + "info": {}, + }, + "FUEL/BTC": { + "symbol": "FUEL/BTC", + "timestamp": 1572340250771, + "datetime": "2019-10-29T09:10:50.771Z", + "high": 0.00000040, + "low": 0.00000035, + "bid": 0.00000036, + "bidVolume": 8932318.0, + "ask": 0.00000037, + "askVolume": 10140774.0, + "vwap": 0.00000037, + "open": 0.00000039, + "close": 0.00000037, + "last": 0.00000037, + "previousClose": 0.00000038, + "change": -0.00000002, + "percentage": -5.128, + "average": None, + "baseVolume": 168927742.0, + "quoteVolume": 62.68220262, + "info": {}, + }, + "BTC/USDT": { + "symbol": "BTC/USDT", + "timestamp": 1573758371399, + "datetime": "2019-11-14T19:06:11.399Z", + "high": 8800.0, + "low": 8582.6, + "bid": 8648.16, + "bidVolume": 0.238771, + "ask": 8648.72, + "askVolume": 0.016253, + "vwap": 8683.13647806, + "open": 8759.7, + "close": 8648.72, + "last": 8648.72, + "previousClose": 8759.67, + "change": -110.98, + "percentage": -1.267, + "average": None, + "baseVolume": 35025.943355, + "quoteVolume": 304135046.4242901, + "info": {}, + }, + "ETH/USDT": { + "symbol": "ETH/USDT", + "timestamp": 1522014804118, + "datetime": "2018-03-25T21:53:24.118Z", + "high": 530.88, + "low": 512.0, + "bid": 529.73, + "bidVolume": 0.2, + "ask": 530.21, + "askVolume": 0.2464, + "vwap": 521.02438405, + "open": 527.27, + "close": 528.42, + "first": None, + "last": 530.21, + "change": 0.558, + "percentage": None, + "average": None, + "baseVolume": 72300.0659, + "quoteVolume": 37670097.3022171, + "info": {}, + }, + "TKN/USDT": { + "symbol": "TKN/USDT", + "timestamp": 1522014806198, + "datetime": "2018-03-25T21:53:26.198Z", + "high": 8718.0, + "low": 8365.77, + "bid": 8603.64, + "bidVolume": 0.15846, + "ask": 8603.67, + "askVolume": 0.069147, + "vwap": 8536.35621697, + "open": 8680.0, + "close": 8680.0, + "first": None, + "last": 8603.67, + "change": -0.879, + "percentage": None, + "average": None, + "baseVolume": 30414.604298, + "quoteVolume": 259629896.48584127, + "info": {}, + }, + "BLK/USDT": { + "symbol": "BLK/USDT", + "timestamp": 1522014806145, + "datetime": "2018-03-25T21:53:26.145Z", + "high": 66.95, + "low": 63.38, + "bid": 66.473, + "bidVolume": 4.968, + "ask": 66.54, + "askVolume": 2.704, + "vwap": 65.0526901, + "open": 66.43, + "close": 66.383, + "first": None, + "last": 66.5, + "change": 0.105, + "percentage": None, + "average": None, + "baseVolume": 294106.204, + "quoteVolume": 19132399.743954, + "info": {}, + }, + "LTC/USDT": { + "symbol": "LTC/USDT", + "timestamp": 1523787257812, + "datetime": "2018-04-15T10:14:18.812Z", + "high": 129.94, + "low": 124.0, + "bid": 129.28, + "bidVolume": 0.03201, + "ask": 129.52, + "askVolume": 0.14529, + "vwap": 126.92838682, + "open": 127.0, + "close": 127.1, + "first": None, + "last": 129.28, + "change": 1.795, + "percentage": None, + "average": None, + "baseVolume": 59698.79897, + "quoteVolume": 29132399.743954, + "info": {}, + }, + "XRP/BTC": { + "symbol": "XRP/BTC", + "timestamp": 1573758257534, + "datetime": "2019-11-14T19:04:17.534Z", + "high": 3.126e-05, + "low": 3.061e-05, + "bid": 3.093e-05, + "bidVolume": 27901.0, + "ask": 3.095e-05, + "askVolume": 10551.0, + "vwap": 3.091e-05, + "open": 3.119e-05, + "close": 3.094e-05, + "last": 3.094e-05, + "previousClose": 3.117e-05, + "change": -2.5e-07, + "percentage": -0.802, + "average": None, + "baseVolume": 37334921.0, + "quoteVolume": 1154.19266394, + "info": {}, + }, + "NANO/USDT": { + "symbol": "NANO/USDT", + "timestamp": 1580469388244, + "datetime": "2020-01-31T11:16:28.244Z", + "high": 0.7519, + "low": 0.7154, + "bid": 0.7305, + "bidVolume": 300.3, + "ask": 0.7342, + "askVolume": 15.14, + "vwap": 0.73645591, + "open": 0.7154, + "close": 0.7342, + "last": 0.7342, + "previousClose": 0.7189, + "change": 0.0188, + "percentage": 2.628, + "average": None, + "baseVolume": 439472.44, + "quoteVolume": 323652.075405, + "info": {}, + }, + # Example of leveraged pair with incomplete info + "ADAHALF/USDT": { + "symbol": "ADAHALF/USDT", + "timestamp": 1580469388244, + "datetime": "2020-01-31T11:16:28.244Z", + "high": None, + "low": None, + "bid": 0.7305, + "bidVolume": None, + "ask": 0.7342, + "askVolume": None, + "vwap": None, + "open": None, + "close": None, + "last": None, + "previousClose": None, + "change": None, + "percentage": 2.628, + "average": None, + "baseVolume": 0.0, + "quoteVolume": 0.0, + "info": {}, + }, + "ADADOUBLE/USDT": { + "symbol": "ADADOUBLE/USDT", + "timestamp": 1580469388244, + "datetime": "2020-01-31T11:16:28.244Z", + "high": None, + "low": None, + "bid": 0.7305, + "bidVolume": None, + "ask": 0.7342, + "askVolume": None, + "vwap": None, + "open": None, + "close": None, + "last": 0, + "previousClose": None, + "change": None, + "percentage": 2.628, + "average": None, + "baseVolume": 0.0, + "quoteVolume": 0.0, + "info": {}, + }, + } + ) @pytest.fixture def dataframe_1m(testdatadir): - with (testdatadir / 'UNITTEST_BTC-1m.json').open('r') as data_file: - return ohlcv_to_dataframe(json.load(data_file), '1m', pair="UNITTEST/BTC", - fill_missing=True) + with (testdatadir / "UNITTEST_BTC-1m.json").open("r") as data_file: + return ohlcv_to_dataframe( + json.load(data_file), "1m", pair="UNITTEST/BTC", fill_missing=True + ) @pytest.fixture(scope="function") def trades_for_order(): - return [{ - 'info': { - 'id': 34567, - 'orderId': 123456, - 'price': '2.0', - 'qty': '8.00000000', - 'commission': '0.00800000', - 'commissionAsset': 'LTC', - 'time': 1521663363189, - 'isBuyer': True, - 'isMaker': False, - 'isBestMatch': True - }, - 'timestamp': 1521663363189, - 'datetime': '2018-03-21T20:16:03.189Z', - 'symbol': 'LTC/USDT', - 'id': '34567', - 'order': '123456', - 'type': None, - 'side': 'buy', - 'price': 2.0, - 'cost': 16.0, - 'amount': 8.0, - 'fee': { - 'cost': 0.008, - 'currency': 'LTC' + return [ + { + "info": { + "id": 34567, + "orderId": 123456, + "price": "2.0", + "qty": "8.00000000", + "commission": "0.00800000", + "commissionAsset": "LTC", + "time": 1521663363189, + "isBuyer": True, + "isMaker": False, + "isBestMatch": True, + }, + "timestamp": 1521663363189, + "datetime": "2018-03-21T20:16:03.189Z", + "symbol": "LTC/USDT", + "id": "34567", + "order": "123456", + "type": None, + "side": "buy", + "price": 2.0, + "cost": 16.0, + "amount": 8.0, + "fee": {"cost": 0.008, "currency": "LTC"}, } - }] + ] @pytest.fixture(scope="function") def trades_history(): - return [[1565798389463, '12618132aa9', None, 'buy', 0.019627, 0.04, 0.00078508], - [1565798399629, '1261813bb30', None, 'buy', 0.019627, 0.244, 0.004788987999999999], - [1565798399752, '1261813cc31', None, 'sell', 0.019626, 0.011, 0.00021588599999999999], - [1565798399862, '126181cc332', None, 'sell', 0.019626, 0.011, 0.00021588599999999999], - [1565798399862, '126181cc333', None, 'sell', 0.019626, 0.012, 0.00021588599999999999], - [1565798399872, '1261aa81334', None, 'sell', 0.019626, 0.011, 0.00021588599999999999]] + return [ + [1565798389463, "12618132aa9", None, "buy", 0.019627, 0.04, 0.00078508], + [1565798399629, "1261813bb30", None, "buy", 0.019627, 0.244, 0.004788987999999999], + [1565798399752, "1261813cc31", None, "sell", 0.019626, 0.011, 0.00021588599999999999], + [1565798399862, "126181cc332", None, "sell", 0.019626, 0.011, 0.00021588599999999999], + [1565798399862, "126181cc333", None, "sell", 0.019626, 0.012, 0.00021588599999999999], + [1565798399872, "1261aa81334", None, "sell", 0.019626, 0.011, 0.00021588599999999999], + ] @pytest.fixture(scope="function") def trades_history_df(trades_history): trades = trades_list_to_df(trades_history) - trades['date'] = pd.to_datetime(trades['timestamp'], unit='ms', utc=True) + trades["date"] = pd.to_datetime(trades["timestamp"], unit="ms", utc=True) return trades @pytest.fixture(scope="function") def fetch_trades_result(): - return [{'info': ['0.01962700', '0.04000000', '1565798399.4631551', 'b', 'm', '', '126181329'], - 'timestamp': 1565798399463, - 'datetime': '2019-08-14T15:59:59.463Z', - 'symbol': 'ETH/BTC', - 'id': '126181329', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'buy', - 'price': 0.019627, - 'amount': 0.04, - 'cost': 0.00078508, - 'fee': None}, - {'info': ['0.01962700', '0.24400000', '1565798399.6291551', 'b', 'm', '', '126181330'], - 'timestamp': 1565798399629, - 'datetime': '2019-08-14T15:59:59.629Z', - 'symbol': 'ETH/BTC', - 'id': '126181330', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'buy', - 'price': 0.019627, - 'amount': 0.244, - 'cost': 0.004788987999999999, - 'fee': None}, - {'info': ['0.01962600', '0.01100000', '1565798399.7521551', 's', 'm', '', '126181331'], - 'timestamp': 1565798399752, - 'datetime': '2019-08-14T15:59:59.752Z', - 'symbol': 'ETH/BTC', - 'id': '126181331', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'sell', - 'price': 0.019626, - 'amount': 0.011, - 'cost': 0.00021588599999999999, - 'fee': None}, - {'info': ['0.01962600', '0.01100000', '1565798399.8621551', 's', 'm', '', '126181332'], - 'timestamp': 1565798399862, - 'datetime': '2019-08-14T15:59:59.862Z', - 'symbol': 'ETH/BTC', - 'id': '126181332', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'sell', - 'price': 0.019626, - 'amount': 0.011, - 'cost': 0.00021588599999999999, - 'fee': None}, - {'info': ['0.01952600', '0.01200000', '1565798399.8721551', 's', 'm', '', '126181333', - 1565798399872512133], - 'timestamp': 1565798399872, - 'datetime': '2019-08-14T15:59:59.872Z', - 'symbol': 'ETH/BTC', - 'id': '126181333', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'sell', - 'price': 0.019626, - 'amount': 0.011, - 'cost': 0.00021588599999999999, - 'fee': None}] + return [ + { + "info": ["0.01962700", "0.04000000", "1565798399.4631551", "b", "m", "", "126181329"], + "timestamp": 1565798399463, + "datetime": "2019-08-14T15:59:59.463Z", + "symbol": "ETH/BTC", + "id": "126181329", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "buy", + "price": 0.019627, + "amount": 0.04, + "cost": 0.00078508, + "fee": None, + }, + { + "info": ["0.01962700", "0.24400000", "1565798399.6291551", "b", "m", "", "126181330"], + "timestamp": 1565798399629, + "datetime": "2019-08-14T15:59:59.629Z", + "symbol": "ETH/BTC", + "id": "126181330", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "buy", + "price": 0.019627, + "amount": 0.244, + "cost": 0.004788987999999999, + "fee": None, + }, + { + "info": ["0.01962600", "0.01100000", "1565798399.7521551", "s", "m", "", "126181331"], + "timestamp": 1565798399752, + "datetime": "2019-08-14T15:59:59.752Z", + "symbol": "ETH/BTC", + "id": "126181331", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "sell", + "price": 0.019626, + "amount": 0.011, + "cost": 0.00021588599999999999, + "fee": None, + }, + { + "info": ["0.01962600", "0.01100000", "1565798399.8621551", "s", "m", "", "126181332"], + "timestamp": 1565798399862, + "datetime": "2019-08-14T15:59:59.862Z", + "symbol": "ETH/BTC", + "id": "126181332", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "sell", + "price": 0.019626, + "amount": 0.011, + "cost": 0.00021588599999999999, + "fee": None, + }, + { + "info": [ + "0.01952600", + "0.01200000", + "1565798399.8721551", + "s", + "m", + "", + "126181333", + 1565798399872512133, + ], + "timestamp": 1565798399872, + "datetime": "2019-08-14T15:59:59.872Z", + "symbol": "ETH/BTC", + "id": "126181333", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "sell", + "price": 0.019626, + "amount": 0.011, + "cost": 0.00021588599999999999, + "fee": None, + }, + ] @pytest.fixture(scope="function") def trades_for_order2(): - return [{'info': {}, - 'timestamp': 1521663363189, - 'datetime': '2018-03-21T20:16:03.189Z', - 'symbol': 'LTC/ETH', - 'id': '34567', - 'order': '123456', - 'type': None, - 'side': 'buy', - 'price': 0.245441, - 'cost': 1.963528, - 'amount': 4.0, - 'fee': {'cost': 0.004, 'currency': 'LTC'}}, - {'info': {}, - 'timestamp': 1521663363189, - 'datetime': '2018-03-21T20:16:03.189Z', - 'symbol': 'LTC/ETH', - 'id': '34567', - 'order': '123456', - 'type': None, - 'side': 'buy', - 'price': 0.245441, - 'cost': 1.963528, - 'amount': 4.0, - 'fee': {'cost': 0.004, 'currency': 'LTC'}}] + return [ + { + "info": {}, + "timestamp": 1521663363189, + "datetime": "2018-03-21T20:16:03.189Z", + "symbol": "LTC/ETH", + "id": "34567", + "order": "123456", + "type": None, + "side": "buy", + "price": 0.245441, + "cost": 1.963528, + "amount": 4.0, + "fee": {"cost": 0.004, "currency": "LTC"}, + }, + { + "info": {}, + "timestamp": 1521663363189, + "datetime": "2018-03-21T20:16:03.189Z", + "symbol": "LTC/ETH", + "id": "34567", + "order": "123456", + "type": None, + "side": "buy", + "price": 0.245441, + "cost": 1.963528, + "amount": 4.0, + "fee": {"cost": 0.004, "currency": "LTC"}, + }, + ] @pytest.fixture def buy_order_fee(): return { - 'id': 'mocked_limit_buy_old', - 'type': 'limit', - 'side': 'buy', - 'symbol': 'mocked', - 'timestamp': dt_ts(dt_now() - timedelta(minutes=601)), - 'datetime': (dt_now() - timedelta(minutes=601)).isoformat(), - 'price': 0.245441, - 'amount': 8.0, - 'cost': 1.963528, - 'remaining': 90.99181073, - 'status': 'closed', - 'fee': None + "id": "mocked_limit_buy_old", + "type": "limit", + "side": "buy", + "symbol": "mocked", + "timestamp": dt_ts(dt_now() - timedelta(minutes=601)), + "datetime": (dt_now() - timedelta(minutes=601)).isoformat(), + "price": 0.245441, + "amount": 8.0, + "cost": 1.963528, + "remaining": 90.99181073, + "status": "closed", + "fee": None, } @pytest.fixture(scope="function") def edge_conf(default_conf): conf = deepcopy(default_conf) - conf['runmode'] = RunMode.DRY_RUN - conf['max_open_trades'] = -1 - conf['tradable_balance_ratio'] = 0.5 - conf['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT - conf['edge'] = { + conf["runmode"] = RunMode.DRY_RUN + conf["max_open_trades"] = -1 + conf["tradable_balance_ratio"] = 0.5 + conf["stake_amount"] = constants.UNLIMITED_STAKE_AMOUNT + conf["edge"] = { "enabled": True, "process_throttle_secs": 1800, "calculate_since_number_of_days": 14, @@ -2614,7 +2568,7 @@ def edge_conf(default_conf): "minimum_expectancy": 0.20, "min_trade_number": 15, "max_trade_duration_minute": 1440, - "remove_pumps": False + "remove_pumps": False, } return conf @@ -2623,36 +2577,12 @@ def edge_conf(default_conf): @pytest.fixture def rpc_balance(): return { - 'BTC': { - 'total': 12.0, - 'free': 12.0, - 'used': 0.0 - }, - 'ETH': { - 'total': 0.0, - 'free': 0.0, - 'used': 0.0 - }, - 'USDT': { - 'total': 10000.0, - 'free': 10000.0, - 'used': 0.0 - }, - 'LTC': { - 'total': 10.0, - 'free': 10.0, - 'used': 0.0 - }, - 'XRP': { - 'total': 0.1, - 'free': 0.01, - 'used': 0.0 - }, - 'EUR': { - 'total': 10.0, - 'free': 10.0, - 'used': 0.0 - }, + "BTC": {"total": 12.0, "free": 12.0, "used": 0.0}, + "ETH": {"total": 0.0, "free": 0.0, "used": 0.0}, + "USDT": {"total": 10000.0, "free": 10000.0, "used": 0.0}, + "LTC": {"total": 10.0, "free": 10.0, "used": 0.0}, + "XRP": {"total": 0.1, "free": 0.01, "used": 0.0}, + "EUR": {"total": 10.0, "free": 10.0, "used": 0.0}, } @@ -2667,10 +2597,11 @@ def import_fails() -> None: # Source of this test-method: # https://stackoverflow.com/questions/2481511/mocking-importerror-in-python import builtins + realimport = builtins.__import__ def mockedimport(name, *args, **kwargs): - if name in ["filelock", 'cysystemd.journal', 'uvloop']: + if name in ["filelock", "cysystemd.journal", "uvloop"]: raise ImportError(f"No module named '{name}'") return realimport(name, *args, **kwargs) @@ -2686,24 +2617,24 @@ def import_fails() -> None: @pytest.fixture(scope="function") def open_trade(): trade = Trade( - pair='ETH/BTC', + pair="ETH/BTC", open_rate=0.00001099, - exchange='binance', + exchange="binance", amount=90.99181073, fee_open=0.0, fee_close=0.0, stake_amount=1, open_date=dt_now() - timedelta(minutes=601), - is_open=True + is_open=True, ) trade.orders = [ Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.open_rate, - order_id='123456789', + order_id="123456789", status="closed", symbol=trade.pair, order_type="market", @@ -2723,24 +2654,24 @@ def open_trade(): @pytest.fixture(scope="function") def open_trade_usdt(): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", open_rate=2.0, - exchange='binance', + exchange="binance", amount=30.0, fee_open=0.0, fee_close=0.0, stake_amount=60.0, open_date=dt_now() - timedelta(minutes=601), - is_open=True + is_open=True, ) trade.orders = [ Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, ft_amount=trade.amount, ft_price=trade.open_rate, - order_id='123456789', + order_id="123456789", status="closed", symbol=trade.pair, order_type="market", @@ -2754,12 +2685,12 @@ def open_trade_usdt(): order_filled_date=trade.open_date, ), Order( - ft_order_side='exit', + ft_order_side="exit", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.open_rate, - order_id='123456789_exit', + order_id="123456789_exit", status="open", symbol=trade.pair, order_type="limit", @@ -2771,308 +2702,154 @@ def open_trade_usdt(): cost=trade.open_rate * trade.amount, order_date=trade.open_date, order_filled_date=trade.open_date, - ) + ), ] return trade -@pytest.fixture -def saved_hyperopt_results(): - hyperopt_res = [ - { - 'loss': 0.4366182531161519, - 'params_dict': { - 'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1190, 'roi_t2': 541, 'roi_t3': 408, 'roi_p1': 0.026035863879169705, 'roi_p2': 0.12508730043628782, 'roi_p3': 0.27766427921605896, 'stoploss': -0.2562930402099556}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 15, 'fastd-value': 20, 'adx-value': 25, 'rsi-value': 28, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 88, 'sell-fastd-value': 97, 'sell-adx-value': 51, 'sell-rsi-value': 67, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4287874435315165, 408: 0.15112316431545753, 949: 0.026035863879169705, 2139: 0}, 'stoploss': {'stoploss': -0.2562930402099556}}, # noqa: E501 - 'results_metrics': {'total_trades': 2, 'trade_count_long': 2, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 2, 'profit_mean': -0.01254995, 'profit_median': -0.012222, 'profit_total': -0.00125625, 'profit_total_abs': -2.50999, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=3930.0), 'stake_currency': 'BTC', 'strategy_name': 'SampleStrategy'}, # noqa: E501 - 'results_explanation': ' 2 trades. Avg profit -1.25%. Total profit -0.00125625 BTC ( -2.51Σ%). Avg duration 3930.0 min.', # noqa: E501 - 'total_profit': -0.00125625, - 'current_epoch': 1, - 'is_initial_point': True, - 'is_random': False, - 'is_best': True, - - }, { - 'loss': 20.0, - 'params_dict': { - 'mfi-value': 17, 'fastd-value': 38, 'adx-value': 48, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 96, 'sell-fastd-value': 68, 'sell-adx-value': 63, 'sell-rsi-value': 81, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 334, 'roi_t2': 683, 'roi_t3': 140, 'roi_p1': 0.06403981740598495, 'roi_p2': 0.055519840060645045, 'roi_p3': 0.3253712811342459, 'stoploss': -0.338070047333259}, # noqa: E501 - 'params_details': { - 'buy': {'mfi-value': 17, 'fastd-value': 38, 'adx-value': 48, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, # noqa: E501 - 'sell': {'sell-mfi-value': 96, 'sell-fastd-value': 68, 'sell-adx-value': 63, 'sell-rsi-value': 81, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, # noqa: E501 - 'roi': {0: 0.4449309386008759, 140: 0.11955965746663, 823: 0.06403981740598495, 1157: 0}, # noqa: E501 - 'stoploss': {'stoploss': -0.338070047333259}}, - 'results_metrics': {'total_trades': 1, 'trade_count_long': 1, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 1, 'profit_mean': 0.012357, 'profit_median': -0.012222, 'profit_total': 6.185e-05, 'profit_total_abs': 0.12357, 'max_drawdown': 0.23, 'max_drawdown_abs': -0.00125625, 'holding_avg': timedelta(minutes=1200.0)}, # noqa: E501 - 'results_explanation': ' 1 trades. Avg profit 0.12%. Total profit 0.00006185 BTC ( 0.12Σ%). Avg duration 1200.0 min.', # noqa: E501 - 'total_profit': 6.185e-05, - 'current_epoch': 2, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 14.241196856510731, - 'params_dict': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 889, 'roi_t2': 533, 'roi_t3': 263, 'roi_p1': 0.04759065393663096, 'roi_p2': 0.1488819964638463, 'roi_p3': 0.4102801822104605, 'stoploss': -0.05394588767607611}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 25, 'fastd-value': 16, 'adx-value': 29, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 98, 'sell-fastd-value': 72, 'sell-adx-value': 51, 'sell-rsi-value': 82, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.6067528326109377, 263: 0.19647265040047726, 796: 0.04759065393663096, 1685: 0}, 'stoploss': {'stoploss': -0.05394588767607611}}, # noqa: E501 - 'results_metrics': {'total_trades': 621, 'trade_count_long': 621, 'trade_count_short': 0, 'wins': 320, 'draws': 0, 'losses': 301, 'profit_mean': -0.043883302093397747, 'profit_median': -0.012222, 'profit_total': -0.13639474, 'profit_total_abs': -272.515306, 'max_drawdown': 0.25, 'max_drawdown_abs': -272.515306, 'holding_avg': timedelta(minutes=1691.207729468599)}, # noqa: E501 - 'results_explanation': ' 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.', # noqa: E501 - 'total_profit': -0.13639474, - 'current_epoch': 3, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 100000, - 'params_dict': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1402, 'roi_t2': 676, 'roi_t3': 215, 'roi_p1': 0.06264755784937427, 'roi_p2': 0.14258587851894644, 'roi_p3': 0.20671291201040828, 'stoploss': -0.11818343570194478}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 13, 'fastd-value': 35, 'adx-value': 39, 'rsi-value': 29, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 54, 'sell-adx-value': 63, 'sell-rsi-value': 93, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.411946348378729, 215: 0.2052334363683207, 891: 0.06264755784937427, 2293: 0}, 'stoploss': {'stoploss': -0.11818343570194478}}, # noqa: E501 - 'results_metrics': {'total_trades': 0, 'trade_count_long': 0, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit': 0.0, 'holding_avg': timedelta()}, # noqa: E501 - 'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501 - 'total_profit': 0, 'current_epoch': 4, 'is_initial_point': True, 'is_random': False, 'is_best': False # noqa: E501 - }, { - 'loss': 0.22195522184191518, - 'params_dict': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 1269, 'roi_t2': 601, 'roi_t3': 444, 'roi_p1': 0.07280999507931168, 'roi_p2': 0.08946698095898986, 'roi_p3': 0.1454876733325284, 'stoploss': -0.18181041180901014}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 17, 'fastd-value': 21, 'adx-value': 38, 'rsi-value': 33, 'mfi-enabled': True, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 82, 'sell-adx-value': 78, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3077646493708299, 444: 0.16227697603830155, 1045: 0.07280999507931168, 2314: 0}, 'stoploss': {'stoploss': -0.18181041180901014}}, # noqa: E501 - 'results_metrics': {'total_trades': 14, 'trade_count_long': 14, 'trade_count_short': 0, 'wins': 6, 'draws': 0, 'losses': 8, 'profit_mean': -0.003539515, 'profit_median': -0.012222, 'profit_total': -0.002480140000000001, 'profit_total_abs': -4.955321, 'max_drawdown': 0.34, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=3402.8571428571427)}, # noqa: E501 - 'results_explanation': ' 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.', # noqa: E501 - 'total_profit': -0.002480140000000001, - 'current_epoch': 5, - 'is_initial_point': True, - 'is_random': False, - 'is_best': True - }, { - 'loss': 0.545315889154162, - 'params_dict': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower', 'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 319, 'roi_t2': 556, 'roi_t3': 216, 'roi_p1': 0.06251955472249589, 'roi_p2': 0.11659519602202795, 'roi_p3': 0.0953744132197762, 'stoploss': -0.024551752215582423}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 43, 'adx-value': 46, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 87, 'sell-fastd-value': 65, 'sell-adx-value': 94, 'sell-rsi-value': 63, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.2744891639643, 216: 0.17911475074452382, 772: 0.06251955472249589, 1091: 0}, 'stoploss': {'stoploss': -0.024551752215582423}}, # noqa: E501 - 'results_metrics': {'total_trades': 39, 'trade_count_long': 39, 'trade_count_short': 0, 'wins': 20, 'draws': 0, 'losses': 19, 'profit_mean': -0.0021400679487179478, 'profit_median': -0.012222, 'profit_total': -0.0041773, 'profit_total_abs': -8.346264999999997, 'max_drawdown': 0.45, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=636.9230769230769)}, # noqa: E501 - 'results_explanation': ' 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.', # noqa: E501 - 'total_profit': -0.0041773, - 'current_epoch': 6, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 4.713497421432944, - 'params_dict': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 771, 'roi_t2': 620, 'roi_t3': 145, 'roi_p1': 0.0586919200378493, 'roi_p2': 0.04984118697312542, 'roi_p3': 0.37521058680247044, 'stoploss': -0.14613268022709905}, # noqa: E501 - 'params_details': { - 'buy': {'mfi-value': 13, 'fastd-value': 41, 'adx-value': 21, 'rsi-value': 29, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 99, 'sell-fastd-value': 60, 'sell-adx-value': 81, 'sell-rsi-value': 69, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': False, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.4837436938134452, 145: 0.10853310701097472, 765: 0.0586919200378493, 1536: 0}, # noqa: E501 - 'stoploss': {'stoploss': -0.14613268022709905}}, # noqa: E501 - 'results_metrics': {'total_trades': 318, 'trade_count_long': 318, 'trade_count_short': 0, 'wins': 100, 'draws': 0, 'losses': 218, 'profit_mean': -0.0039833954716981146, 'profit_median': -0.012222, 'profit_total': -0.06339929, 'profit_total_abs': -126.67197600000004, 'max_drawdown': 0.50, 'max_drawdown_abs': -200.955321, 'holding_avg': timedelta(minutes=3140.377358490566)}, # noqa: E501 - 'results_explanation': ' 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.', # noqa: E501 - 'total_profit': -0.06339929, - 'current_epoch': 7, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 20.0, # noqa: E501 - 'params_dict': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal', 'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 1149, 'roi_t2': 375, 'roi_t3': 289, 'roi_p1': 0.05571820757172588, 'roi_p2': 0.0606240398618907, 'roi_p3': 0.1729012220156157, 'stoploss': -0.1588514289110401}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 24, 'fastd-value': 43, 'adx-value': 33, 'rsi-value': 20, 'mfi-enabled': False, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': True, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 89, 'sell-fastd-value': 74, 'sell-adx-value': 70, 'sell-rsi-value': 70, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': False, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.2892434694492323, 289: 0.11634224743361658, 664: 0.05571820757172588, 1813: 0}, 'stoploss': {'stoploss': -0.1588514289110401}}, # noqa: E501 - 'results_metrics': {'total_trades': 1, 'trade_count_long': 1, 'trade_count_short': 0, 'wins': 0, 'draws': 1, 'losses': 0, 'profit_mean': 0.0, 'profit_median': 0.0, 'profit_total': 0.0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.52, 'holding_avg': timedelta(minutes=5340.0)}, # noqa: E501 - 'results_explanation': ' 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.', # noqa: E501 - 'total_profit': 0.0, - 'current_epoch': 8, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 2.4731817780991223, - 'params_dict': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1012, 'roi_t2': 584, 'roi_t3': 422, 'roi_p1': 0.036764323603472565, 'roi_p2': 0.10335480573205287, 'roi_p3': 0.10322347377503042, 'stoploss': -0.2780610808108503}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 22, 'fastd-value': 20, 'adx-value': 29, 'rsi-value': 40, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 65, 'sell-adx-value': 81, 'sell-rsi-value': 64, 'sell-mfi-enabled': True, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.2433426031105559, 422: 0.14011912933552545, 1006: 0.036764323603472565, 2018: 0}, 'stoploss': {'stoploss': -0.2780610808108503}}, # noqa: E501 - 'results_metrics': {'total_trades': 229, 'trade_count_long': 229, 'trade_count_short': 0, 'wins': 150, 'draws': 0, 'losses': 79, 'profit_mean': -0.0038433433624454144, 'profit_median': -0.012222, 'profit_total': -0.044050070000000004, 'profit_total_abs': -88.01256299999999, 'max_drawdown': 0.41, 'max_drawdown_abs': -150.955321, 'holding_avg': timedelta(minutes=6505.676855895196)}, # noqa: E501 - 'results_explanation': ' 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.', # noqa: E501 - 'total_profit': -0.044050070000000004, # noqa: E501 - 'current_epoch': 9, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': -0.2604606005845212, # noqa: E501 - 'params_dict': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal', 'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal', 'roi_t1': 792, 'roi_t2': 464, 'roi_t3': 215, 'roi_p1': 0.04594053535385903, 'roi_p2': 0.09623192684243963, 'roi_p3': 0.04428219070850663, 'stoploss': -0.16992287161634415}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 23, 'fastd-value': 24, 'adx-value': 22, 'rsi-value': 24, 'mfi-enabled': False, 'fastd-enabled': False, 'adx-enabled': False, 'rsi-enabled': True, 'trigger': 'macd_cross_signal'}, 'sell': {'sell-mfi-value': 97, 'sell-fastd-value': 70, 'sell-adx-value': 64, 'sell-rsi-value': 80, 'sell-mfi-enabled': False, 'sell-fastd-enabled': True, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-sar_reversal'}, 'roi': {0: 0.18645465290480528, 215: 0.14217246219629864, 679: 0.04594053535385903, 1471: 0}, 'stoploss': {'stoploss': -0.16992287161634415}}, # noqa: E501 - 'results_metrics': {'total_trades': 4, 'trade_count_long': 4, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 4, 'profit_mean': 0.001080385, 'profit_median': -0.012222, 'profit_total': 0.00021629, 'profit_total_abs': 0.432154, 'max_drawdown': 0.13, 'max_drawdown_abs': -4.955321, 'holding_avg': timedelta(minutes=2850.0)}, # noqa: E501 - 'results_explanation': ' 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.', # noqa: E501 - 'total_profit': 0.00021629, - 'current_epoch': 10, - 'is_initial_point': True, - 'is_random': False, - 'is_best': True - }, { - 'loss': 4.876465945994304, # noqa: E501 - 'params_dict': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower', 'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal', 'roi_t1': 579, 'roi_t2': 614, 'roi_t3': 273, 'roi_p1': 0.05307643172744114, 'roi_p2': 0.1352282078262871, 'roi_p3': 0.1913307406325751, 'stoploss': -0.25728526022513887}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 20, 'fastd-value': 32, 'adx-value': 49, 'rsi-value': 23, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': False, 'rsi-enabled': False, 'trigger': 'bb_lower'}, 'sell': {'sell-mfi-value': 75, 'sell-fastd-value': 56, 'sell-adx-value': 61, 'sell-rsi-value': 62, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-macd_cross_signal'}, 'roi': {0: 0.3796353801863034, 273: 0.18830463955372825, 887: 0.05307643172744114, 1466: 0}, 'stoploss': {'stoploss': -0.25728526022513887}}, # noqa: E501 - # New Hyperopt mode! - 'results_metrics': {'total_trades': 117, 'trade_count_long': 117, 'trade_count_short': 0, 'wins': 67, 'draws': 0, 'losses': 50, 'profit_mean': -0.012698609145299145, 'profit_median': -0.012222, 'profit_total': -0.07436117, 'profit_total_abs': -148.573727, 'max_drawdown': 0.52, 'max_drawdown_abs': -224.955321, 'holding_avg': timedelta(minutes=4282.5641025641025)}, # noqa: E501 - 'results_explanation': ' 117 trades. Avg profit -1.27%. Total profit -0.07436117 BTC (-148.57Σ%). Avg duration 4282.6 min.', # noqa: E501 - 'total_profit': -0.07436117, - 'current_epoch': 11, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - }, { - 'loss': 100000, - 'params_dict': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal', 'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper', 'roi_t1': 1156, 'roi_t2': 581, 'roi_t3': 408, 'roi_p1': 0.06860454019988212, 'roi_p2': 0.12473718444931989, 'roi_p3': 0.2896360635226823, 'stoploss': -0.30889015124682806}, # noqa: E501 - 'params_details': {'buy': {'mfi-value': 10, 'fastd-value': 36, 'adx-value': 31, 'rsi-value': 22, 'mfi-enabled': True, 'fastd-enabled': True, 'adx-enabled': True, 'rsi-enabled': False, 'trigger': 'sar_reversal'}, 'sell': {'sell-mfi-value': 80, 'sell-fastd-value': 71, 'sell-adx-value': 60, 'sell-rsi-value': 85, 'sell-mfi-enabled': False, 'sell-fastd-enabled': False, 'sell-adx-enabled': True, 'sell-rsi-enabled': True, 'sell-trigger': 'sell-bb_upper'}, 'roi': {0: 0.4829777881718843, 408: 0.19334172464920202, 989: 0.06860454019988212, 2145: 0}, 'stoploss': {'stoploss': -0.30889015124682806}}, # noqa: E501 - 'results_metrics': {'total_trades': 0, 'trade_count_long': 0, 'trade_count_short': 0, 'wins': 0, 'draws': 0, 'losses': 0, 'profit_mean': None, 'profit_median': None, 'profit_total': 0, 'profit_total_abs': 0.0, 'max_drawdown': 0.0, 'max_drawdown_abs': 0.0, 'holding_avg': timedelta()}, # noqa: E501 - 'results_explanation': ' 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.', # noqa: E501 - 'total_profit': 0, - 'current_epoch': 12, - 'is_initial_point': True, - 'is_random': False, - 'is_best': False - } - ] - - for res in hyperopt_res: - res['results_metrics']['holding_avg_s'] = res['results_metrics']['holding_avg' - ].total_seconds() - - return hyperopt_res - - -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_buy_order_usdt_open(): return { - 'id': 'mocked_limit_buy_usdt', - 'type': 'limit', - 'side': 'buy', - 'symbol': 'mocked', - 'datetime': dt_now().isoformat(), - 'timestamp': dt_ts(), - 'price': 2.00, - 'average': 2.00, - 'amount': 30.0, - 'filled': 0.0, - 'cost': 60.0, - 'remaining': 30.0, - 'status': 'open' + "id": "mocked_limit_buy_usdt", + "type": "limit", + "side": "buy", + "symbol": "mocked", + "datetime": dt_now().isoformat(), + "timestamp": dt_ts(), + "price": 2.00, + "average": 2.00, + "amount": 30.0, + "filled": 0.0, + "cost": 60.0, + "remaining": 30.0, + "status": "open", } -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_buy_order_usdt(limit_buy_order_usdt_open): order = deepcopy(limit_buy_order_usdt_open) - order['status'] = 'closed' - order['filled'] = order['amount'] - order['remaining'] = 0.0 + order["status"] = "closed" + order["filled"] = order["amount"] + order["remaining"] = 0.0 return order @pytest.fixture def limit_sell_order_usdt_open(): return { - 'id': 'mocked_limit_sell_usdt', - 'type': 'limit', - 'side': 'sell', - 'symbol': 'mocked', - 'datetime': dt_now().isoformat(), - 'timestamp': dt_ts(), - 'price': 2.20, - 'amount': 30.0, - 'cost': 66.0, - 'filled': 0.0, - 'remaining': 30.0, - 'status': 'open' + "id": "mocked_limit_sell_usdt", + "type": "limit", + "side": "sell", + "symbol": "mocked", + "datetime": dt_now().isoformat(), + "timestamp": dt_ts(), + "price": 2.20, + "amount": 30.0, + "cost": 66.0, + "filled": 0.0, + "remaining": 30.0, + "status": "open", } @pytest.fixture def limit_sell_order_usdt(limit_sell_order_usdt_open): order = deepcopy(limit_sell_order_usdt_open) - order['remaining'] = 0.0 - order['filled'] = order['amount'] - order['status'] = 'closed' + order["remaining"] = 0.0 + order["filled"] = order["amount"] + order["status"] = "closed" return order -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def market_buy_order_usdt(): return { - 'id': 'mocked_market_buy', - 'type': 'market', - 'side': 'buy', - 'symbol': 'mocked', - 'timestamp': dt_ts(), - 'datetime': dt_now().isoformat(), - 'price': 2.00, - 'amount': 30.0, - 'filled': 30.0, - 'remaining': 0.0, - 'status': 'closed' + "id": "mocked_market_buy", + "type": "market", + "side": "buy", + "symbol": "mocked", + "timestamp": dt_ts(), + "datetime": dt_now().isoformat(), + "price": 2.00, + "amount": 30.0, + "filled": 30.0, + "remaining": 0.0, + "status": "closed", } @pytest.fixture def market_buy_order_usdt_doublefee(market_buy_order_usdt): order = deepcopy(market_buy_order_usdt) - order['fee'] = None + order["fee"] = None # Market orders filled with 2 trades can have fees in different currencies # assuming the account runs out of BNB. - order['fees'] = [ - {'cost': 0.00025125, 'currency': 'BNB'}, - {'cost': 0.05030681, 'currency': 'USDT'}, + order["fees"] = [ + {"cost": 0.00025125, "currency": "BNB"}, + {"cost": 0.05030681, "currency": "USDT"}, + ] + order["trades"] = [ + { + "timestamp": None, + "datetime": None, + "symbol": "ETH/USDT", + "id": None, + "order": "123", + "type": "market", + "side": "sell", + "takerOrMaker": None, + "price": 2.01, + "amount": 25.0, + "cost": 50.25, + "fee": {"cost": 0.00025125, "currency": "BNB"}, + }, + { + "timestamp": None, + "datetime": None, + "symbol": "ETH/USDT", + "id": None, + "order": "123", + "type": "market", + "side": "sell", + "takerOrMaker": None, + "price": 2.0, + "amount": 5, + "cost": 10, + "fee": {"cost": 0.0100306, "currency": "USDT"}, + }, ] - order['trades'] = [{ - 'timestamp': None, - 'datetime': None, - 'symbol': 'ETH/USDT', - 'id': None, - 'order': '123', - 'type': 'market', - 'side': 'sell', - 'takerOrMaker': None, - 'price': 2.01, - 'amount': 25.0, - 'cost': 50.25, - 'fee': {'cost': 0.00025125, 'currency': 'BNB'} - }, { - 'timestamp': None, - 'datetime': None, - 'symbol': 'ETH/USDT', - 'id': None, - 'order': '123', - 'type': 'market', - 'side': 'sell', - 'takerOrMaker': None, - 'price': 2.0, - 'amount': 5, - 'cost': 10, - 'fee': {'cost': 0.0100306, 'currency': 'USDT'} - }] return order @pytest.fixture def market_sell_order_usdt(): return { - 'id': 'mocked_limit_sell', - 'type': 'market', - 'side': 'sell', - 'symbol': 'mocked', - 'timestamp': dt_ts(), - 'datetime': dt_now().isoformat(), - 'price': 2.20, - 'amount': 30.0, - 'filled': 30.0, - 'remaining': 0.0, - 'status': 'closed' + "id": "mocked_limit_sell", + "type": "market", + "side": "sell", + "symbol": "mocked", + "timestamp": dt_ts(), + "datetime": dt_now().isoformat(), + "price": 2.20, + "amount": 30.0, + "filled": 30.0, + "remaining": 0.0, + "status": "closed", } -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_order(limit_buy_order_usdt, limit_sell_order_usdt): - return { - 'buy': limit_buy_order_usdt, - 'sell': limit_sell_order_usdt - } + return {"buy": limit_buy_order_usdt, "sell": limit_sell_order_usdt} -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def limit_order_open(limit_buy_order_usdt_open, limit_sell_order_usdt_open): - return { - 'buy': limit_buy_order_usdt_open, - 'sell': limit_sell_order_usdt_open - } + return {"buy": limit_buy_order_usdt_open, "sell": limit_sell_order_usdt_open} -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def mark_ohlcv(): return [ [1630454400000, 2.77, 2.77, 2.73, 2.73, 0], @@ -3092,254 +2869,254 @@ def mark_ohlcv(): ] -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def funding_rate_history_hourly(): return [ { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000008, "timestamp": 1630454400000, - "datetime": "2021-09-01T00:00:00.000Z" + "datetime": "2021-09-01T00:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000004, "timestamp": 1630458000000, - "datetime": "2021-09-01T01:00:00.000Z" + "datetime": "2021-09-01T01:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000012, "timestamp": 1630461600000, - "datetime": "2021-09-01T02:00:00.000Z" + "datetime": "2021-09-01T02:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000003, "timestamp": 1630465200000, - "datetime": "2021-09-01T03:00:00.000Z" + "datetime": "2021-09-01T03:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000007, "timestamp": 1630468800000, - "datetime": "2021-09-01T04:00:00.000Z" + "datetime": "2021-09-01T04:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000003, "timestamp": 1630472400000, - "datetime": "2021-09-01T05:00:00.000Z" + "datetime": "2021-09-01T05:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000019, "timestamp": 1630476000000, - "datetime": "2021-09-01T06:00:00.000Z" + "datetime": "2021-09-01T06:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000003, "timestamp": 1630479600000, - "datetime": "2021-09-01T07:00:00.000Z" + "datetime": "2021-09-01T07:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000003, "timestamp": 1630483200000, - "datetime": "2021-09-01T08:00:00.000Z" + "datetime": "2021-09-01T08:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0, "timestamp": 1630486800000, - "datetime": "2021-09-01T09:00:00.000Z" + "datetime": "2021-09-01T09:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000013, "timestamp": 1630490400000, - "datetime": "2021-09-01T10:00:00.000Z" + "datetime": "2021-09-01T10:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000077, "timestamp": 1630494000000, - "datetime": "2021-09-01T11:00:00.000Z" + "datetime": "2021-09-01T11:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000072, "timestamp": 1630497600000, - "datetime": "2021-09-01T12:00:00.000Z" + "datetime": "2021-09-01T12:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": 0.000097, "timestamp": 1630501200000, - "datetime": "2021-09-01T13:00:00.000Z" + "datetime": "2021-09-01T13:00:00.000Z", }, ] -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def funding_rate_history_octohourly(): return [ { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000008, "timestamp": 1630454400000, - "datetime": "2021-09-01T00:00:00.000Z" + "datetime": "2021-09-01T00:00:00.000Z", }, { "symbol": "ADA/USDT:USDT", "fundingRate": -0.000003, "timestamp": 1630483200000, - "datetime": "2021-09-01T08:00:00.000Z" - } + "datetime": "2021-09-01T08:00:00.000Z", + }, ] -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def leverage_tiers(): return { "1000SHIB/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 50000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 50, - 'maintAmt': 0.0 + "minNotional": 0, + "maxNotional": 50000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50, + "maintAmt": 0.0, }, { - 'minNotional': 50000, - 'maxNotional': 150000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 20, - 'maintAmt': 750.0 + "minNotional": 50000, + "maxNotional": 150000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "maintAmt": 750.0, }, { - 'minNotional': 150000, - 'maxNotional': 250000, - 'maintenanceMarginRate': 0.05, - 'maxLeverage': 10, - 'maintAmt': 4500.0 + "minNotional": 150000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "maintAmt": 4500.0, }, { - 'minNotional': 250000, - 'maxNotional': 500000, - 'maintenanceMarginRate': 0.1, - 'maxLeverage': 5, - 'maintAmt': 17000.0 + "minNotional": 250000, + "maxNotional": 500000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "maintAmt": 17000.0, }, { - 'minNotional': 500000, - 'maxNotional': 1000000, - 'maintenanceMarginRate': 0.125, - 'maxLeverage': 4, - 'maintAmt': 29500.0 + "minNotional": 500000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4, + "maintAmt": 29500.0, }, { - 'minNotional': 1000000, - 'maxNotional': 2000000, - 'maintenanceMarginRate': 0.25, - 'maxLeverage': 2, - 'maintAmt': 154500.0 + "minNotional": 1000000, + "maxNotional": 2000000, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2, + "maintAmt": 154500.0, }, { - 'minNotional': 2000000, - 'maxNotional': 30000000, - 'maintenanceMarginRate': 0.5, - 'maxLeverage': 1, - 'maintAmt': 654500.0 + "minNotional": 2000000, + "maxNotional": 30000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "maintAmt": 654500.0, }, ], "1INCH/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 5000, - 'maintenanceMarginRate': 0.012, - 'maxLeverage': 50, - 'maintAmt': 0.0 + "minNotional": 0, + "maxNotional": 5000, + "maintenanceMarginRate": 0.012, + "maxLeverage": 50, + "maintAmt": 0.0, }, { - 'minNotional': 5000, - 'maxNotional': 25000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 20, - 'maintAmt': 65.0 + "minNotional": 5000, + "maxNotional": 25000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "maintAmt": 65.0, }, { - 'minNotional': 25000, - 'maxNotional': 100000, - 'maintenanceMarginRate': 0.05, - 'maxLeverage': 10, - 'maintAmt': 690.0 + "minNotional": 25000, + "maxNotional": 100000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "maintAmt": 690.0, }, { - 'minNotional': 100000, - 'maxNotional': 250000, - 'maintenanceMarginRate': 0.1, - 'maxLeverage': 5, - 'maintAmt': 5690.0 + "minNotional": 100000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "maintAmt": 5690.0, }, { - 'minNotional': 250000, - 'maxNotional': 1000000, - 'maintenanceMarginRate': 0.125, - 'maxLeverage': 2, - 'maintAmt': 11940.0 + "minNotional": 250000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2, + "maintAmt": 11940.0, }, { - 'minNotional': 1000000, - 'maxNotional': 100000000, - 'maintenanceMarginRate': 0.5, - 'maxLeverage': 1, - 'maintAmt': 386940.0 + "minNotional": 1000000, + "maxNotional": 100000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "maintAmt": 386940.0, }, ], "AAVE/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 5000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 50, - 'maintAmt': 0.0 + "minNotional": 0, + "maxNotional": 5000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50, + "maintAmt": 0.0, }, { - 'minNotional': 5000, - 'maxNotional': 25000, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 25, - 'maintAmt': 75.0 + "minNotional": 5000, + "maxNotional": 25000, + "maintenanceMarginRate": 0.02, + "maxLeverage": 25, + "maintAmt": 75.0, }, { - 'minNotional': 25000, - 'maxNotional': 100000, - 'maintenanceMarginRate': 0.05, - 'maxLeverage': 10, - 'maintAmt': 700.0 + "minNotional": 25000, + "maxNotional": 100000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "maintAmt": 700.0, }, { - 'minNotional': 100000, - 'maxNotional': 250000, - 'maintenanceMarginRate': 0.1, - 'maxLeverage': 5, - 'maintAmt': 5700.0 + "minNotional": 100000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "maintAmt": 5700.0, }, { - 'minNotional': 250000, - 'maxNotional': 1000000, - 'maintenanceMarginRate': 0.125, - 'maxLeverage': 2, - 'maintAmt': 11950.0 + "minNotional": 250000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 2, + "maintAmt": 11950.0, }, { - 'minNotional': 10000000, - 'maxNotional': 50000000, - 'maintenanceMarginRate': 0.5, - 'maxLeverage': 1, - 'maintAmt': 386950.0 + "minNotional": 10000000, + "maxNotional": 50000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "maintAmt": 386950.0, }, ], "ADA/USDT:USDT": [ @@ -3348,274 +3125,274 @@ def leverage_tiers(): "maxNotional": 100000, "maintenanceMarginRate": 0.025, "maxLeverage": 20, - "maintAmt": 0.0 + "maintAmt": 0.0, }, { "minNotional": 100000, "maxNotional": 500000, "maintenanceMarginRate": 0.05, "maxLeverage": 10, - "maintAmt": 2500.0 + "maintAmt": 2500.0, }, { "minNotional": 500000, "maxNotional": 1000000, "maintenanceMarginRate": 0.1, "maxLeverage": 5, - "maintAmt": 27500.0 + "maintAmt": 27500.0, }, { "minNotional": 1000000, "maxNotional": 2000000, "maintenanceMarginRate": 0.15, "maxLeverage": 3, - "maintAmt": 77500.0 + "maintAmt": 77500.0, }, { "minNotional": 2000000, "maxNotional": 5000000, "maintenanceMarginRate": 0.25, "maxLeverage": 2, - "maintAmt": 277500.0 + "maintAmt": 277500.0, }, { "minNotional": 5000000, "maxNotional": 30000000, "maintenanceMarginRate": 0.5, "maxLeverage": 1, - "maintAmt": 1527500.0 + "maintAmt": 1527500.0, }, ], - 'XRP/USDT:USDT': [ + "XRP/USDT:USDT": [ { - "minNotional": 0, # stake(before leverage) = 0 + "minNotional": 0, # stake(before leverage) = 0 "maxNotional": 100000, # max stake(before leverage) = 5000 "maintenanceMarginRate": 0.025, "maxLeverage": 20, - "maintAmt": 0.0 + "maintAmt": 0.0, }, { "minNotional": 100000, # stake = 10000.0 "maxNotional": 500000, # max_stake = 50000.0 "maintenanceMarginRate": 0.05, "maxLeverage": 10, - "maintAmt": 2500.0 + "maintAmt": 2500.0, }, { - "minNotional": 500000, # stake = 100000.0 + "minNotional": 500000, # stake = 100000.0 "maxNotional": 1000000, # max_stake = 200000.0 "maintenanceMarginRate": 0.1, "maxLeverage": 5, - "maintAmt": 27500.0 + "maintAmt": 27500.0, }, { "minNotional": 1000000, # stake = 333333.3333333333 "maxNotional": 2000000, # max_stake = 666666.6666666666 "maintenanceMarginRate": 0.15, "maxLeverage": 3, - "maintAmt": 77500.0 + "maintAmt": 77500.0, }, { "minNotional": 2000000, # stake = 1000000.0 "maxNotional": 5000000, # max_stake = 2500000.0 "maintenanceMarginRate": 0.25, "maxLeverage": 2, - "maintAmt": 277500.0 + "maintAmt": 277500.0, }, { - "minNotional": 5000000, # stake = 5000000.0 + "minNotional": 5000000, # stake = 5000000.0 "maxNotional": 30000000, # max_stake = 30000000.0 "maintenanceMarginRate": 0.5, "maxLeverage": 1, - "maintAmt": 1527500.0 - } + "maintAmt": 1527500.0, + }, ], - 'BNB/USDT:USDT': [ + "BNB/USDT:USDT": [ { - "minNotional": 0, # stake = 0.0 + "minNotional": 0, # stake = 0.0 "maxNotional": 10000, # max_stake = 133.33333333333334 "maintenanceMarginRate": 0.0065, "maxLeverage": 75, - "maintAmt": 0.0 + "maintAmt": 0.0, }, { "minNotional": 10000, # stake = 200.0 "maxNotional": 50000, # max_stake = 1000.0 "maintenanceMarginRate": 0.01, "maxLeverage": 50, - "maintAmt": 35.0 + "maintAmt": 35.0, }, { - "minNotional": 50000, # stake = 2000.0 + "minNotional": 50000, # stake = 2000.0 "maxNotional": 250000, # max_stake = 10000.0 "maintenanceMarginRate": 0.02, "maxLeverage": 25, - "maintAmt": 535.0 + "maintAmt": 535.0, }, { - "minNotional": 250000, # stake = 25000.0 + "minNotional": 250000, # stake = 25000.0 "maxNotional": 1000000, # max_stake = 100000.0 "maintenanceMarginRate": 0.05, "maxLeverage": 10, - "maintAmt": 8035.0 + "maintAmt": 8035.0, }, { "minNotional": 1000000, # stake = 200000.0 "maxNotional": 2000000, # max_stake = 400000.0 "maintenanceMarginRate": 0.1, "maxLeverage": 5, - "maintAmt": 58035.0 + "maintAmt": 58035.0, }, { "minNotional": 2000000, # stake = 500000.0 "maxNotional": 5000000, # max_stake = 1250000.0 "maintenanceMarginRate": 0.125, "maxLeverage": 4, - "maintAmt": 108035.0 + "maintAmt": 108035.0, }, { - "minNotional": 5000000, # stake = 1666666.6666666667 + "minNotional": 5000000, # stake = 1666666.6666666667 "maxNotional": 10000000, # max_stake = 3333333.3333333335 "maintenanceMarginRate": 0.15, "maxLeverage": 3, - "maintAmt": 233035.0 + "maintAmt": 233035.0, }, { "minNotional": 10000000, # stake = 5000000.0 "maxNotional": 20000000, # max_stake = 10000000.0 "maintenanceMarginRate": 0.25, "maxLeverage": 2, - "maintAmt": 1233035.0 + "maintAmt": 1233035.0, }, { "minNotional": 20000000, # stake = 20000000.0 "maxNotional": 50000000, # max_stake = 50000000.0 "maintenanceMarginRate": 0.5, "maxLeverage": 1, - "maintAmt": 6233035.0 + "maintAmt": 6233035.0, }, ], - 'BTC/USDT:USDT': [ + "BTC/USDT:USDT": [ { - "minNotional": 0, # stake = 0.0 + "minNotional": 0, # stake = 0.0 "maxNotional": 50000, # max_stake = 400.0 "maintenanceMarginRate": 0.004, "maxLeverage": 125, - "maintAmt": 0.0 + "maintAmt": 0.0, }, { - "minNotional": 50000, # stake = 500.0 + "minNotional": 50000, # stake = 500.0 "maxNotional": 250000, # max_stake = 2500.0 "maintenanceMarginRate": 0.005, "maxLeverage": 100, - "maintAmt": 50.0 + "maintAmt": 50.0, }, { - "minNotional": 250000, # stake = 5000.0 + "minNotional": 250000, # stake = 5000.0 "maxNotional": 1000000, # max_stake = 20000.0 "maintenanceMarginRate": 0.01, "maxLeverage": 50, - "maintAmt": 1300.0 + "maintAmt": 1300.0, }, { "minNotional": 1000000, # stake = 50000.0 "maxNotional": 7500000, # max_stake = 375000.0 "maintenanceMarginRate": 0.025, "maxLeverage": 20, - "maintAmt": 16300.0 + "maintAmt": 16300.0, }, { - "minNotional": 7500000, # stake = 750000.0 + "minNotional": 7500000, # stake = 750000.0 "maxNotional": 40000000, # max_stake = 4000000.0 "maintenanceMarginRate": 0.05, "maxLeverage": 10, - "maintAmt": 203800.0 + "maintAmt": 203800.0, }, { - "minNotional": 40000000, # stake = 8000000.0 + "minNotional": 40000000, # stake = 8000000.0 "maxNotional": 100000000, # max_stake = 20000000.0 "maintenanceMarginRate": 0.1, "maxLeverage": 5, - "maintAmt": 2203800.0 + "maintAmt": 2203800.0, }, { "minNotional": 100000000, # stake = 25000000.0 "maxNotional": 200000000, # max_stake = 50000000.0 "maintenanceMarginRate": 0.125, "maxLeverage": 4, - "maintAmt": 4703800.0 + "maintAmt": 4703800.0, }, { "minNotional": 200000000, # stake = 66666666.666666664 "maxNotional": 400000000, # max_stake = 133333333.33333333 "maintenanceMarginRate": 0.15, "maxLeverage": 3, - "maintAmt": 9703800.0 + "maintAmt": 9703800.0, }, { "minNotional": 400000000, # stake = 200000000.0 "maxNotional": 600000000, # max_stake = 300000000.0 "maintenanceMarginRate": 0.25, "maxLeverage": 2, - "maintAmt": 4.97038E7 + "maintAmt": 4.97038e7, }, { - "minNotional": 600000000, # stake = 600000000.0 + "minNotional": 600000000, # stake = 600000000.0 "maxNotional": 1000000000, # max_stake = 1000000000.0 "maintenanceMarginRate": 0.5, "maxLeverage": 1, - "maintAmt": 1.997038E8 + "maintAmt": 1.997038e8, }, ], "ZEC/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 50000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 50, - 'maintAmt': 0.0 + "minNotional": 0, + "maxNotional": 50000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50, + "maintAmt": 0.0, }, { - 'minNotional': 50000, - 'maxNotional': 150000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 20, - 'maintAmt': 750.0 + "minNotional": 50000, + "maxNotional": 150000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "maintAmt": 750.0, }, { - 'minNotional': 150000, - 'maxNotional': 250000, - 'maintenanceMarginRate': 0.05, - 'maxLeverage': 10, - 'maintAmt': 4500.0 + "minNotional": 150000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "maintAmt": 4500.0, }, { - 'minNotional': 250000, - 'maxNotional': 500000, - 'maintenanceMarginRate': 0.1, - 'maxLeverage': 5, - 'maintAmt': 17000.0 + "minNotional": 250000, + "maxNotional": 500000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "maintAmt": 17000.0, }, { - 'minNotional': 500000, - 'maxNotional': 1000000, - 'maintenanceMarginRate': 0.125, - 'maxLeverage': 4, - 'maintAmt': 29500.0 + "minNotional": 500000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4, + "maintAmt": 29500.0, }, { - 'minNotional': 1000000, - 'maxNotional': 2000000, - 'maintenanceMarginRate': 0.25, - 'maxLeverage': 2, - 'maintAmt': 154500.0 + "minNotional": 1000000, + "maxNotional": 2000000, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2, + "maintAmt": 154500.0, }, { - 'minNotional': 2000000, - 'maxNotional': 30000000, - 'maintenanceMarginRate': 0.5, - 'maxLeverage': 1, - 'maintAmt': 654500.0 + "minNotional": 2000000, + "maxNotional": 30000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "maintAmt": 654500.0, }, - ] + ], } diff --git a/tests/conftest_hyperopt.py b/tests/conftest_hyperopt.py new file mode 100644 index 000000000..af4039a3c --- /dev/null +++ b/tests/conftest_hyperopt.py @@ -0,0 +1,1000 @@ +from datetime import timedelta + + +def hyperopt_test_result(): + """ + Sample hyperopt test result, used for some tests. + """ + hyperopt_res = [ + { + "loss": 0.4366182531161519, + "params_dict": { + "mfi-value": 15, + "fastd-value": 20, + "adx-value": 25, + "rsi-value": 28, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + "sell-mfi-value": 88, + "sell-fastd-value": 97, + "sell-adx-value": 51, + "sell-rsi-value": 67, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + "roi_t1": 1190, + "roi_t2": 541, + "roi_t3": 408, + "roi_p1": 0.026035863879169705, + "roi_p2": 0.12508730043628782, + "roi_p3": 0.27766427921605896, + "stoploss": -0.2562930402099556, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 15, + "fastd-value": 20, + "adx-value": 25, + "rsi-value": 28, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + }, + "sell": { + "sell-mfi-value": 88, + "sell-fastd-value": 97, + "sell-adx-value": 51, + "sell-rsi-value": 67, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + }, + "roi": { + 0: 0.4287874435315165, + 408: 0.15112316431545753, + 949: 0.026035863879169705, + 2139: 0, + }, + "stoploss": {"stoploss": -0.2562930402099556}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 2, + "trade_count_long": 2, + "trade_count_short": 0, + "wins": 0, + "draws": 0, + "losses": 2, + "profit_mean": -0.01254995, + "profit_median": -0.012222, + "profit_total": -0.00125625, + "profit_total_abs": -2.50999, + "max_drawdown_account": 0.23, + "max_drawdown_abs": -0.00125625, + "holding_avg": timedelta(minutes=3930.0), + "stake_currency": "BTC", + "strategy_name": "SampleStrategy", + }, # noqa: E501 + "results_explanation": " 2 trades. Avg profit -1.25%. Total profit -0.00125625 BTC ( -2.51Σ%). Avg duration 3930.0 min.", # noqa: E501 + "total_profit": -0.00125625, + "current_epoch": 1, + "is_initial_point": True, + "is_random": False, + "is_best": True, + }, + { + "loss": 20.0, + "params_dict": { + "mfi-value": 17, + "fastd-value": 38, + "adx-value": 48, + "rsi-value": 22, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + "sell-mfi-value": 96, + "sell-fastd-value": 68, + "sell-adx-value": 63, + "sell-rsi-value": 81, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + "roi_t1": 334, + "roi_t2": 683, + "roi_t3": 140, + "roi_p1": 0.06403981740598495, + "roi_p2": 0.055519840060645045, + "roi_p3": 0.3253712811342459, + "stoploss": -0.338070047333259, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 17, + "fastd-value": 38, + "adx-value": 48, + "rsi-value": 22, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + }, # noqa: E501 + "sell": { + "sell-mfi-value": 96, + "sell-fastd-value": 68, + "sell-adx-value": 63, + "sell-rsi-value": 81, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + }, # noqa: E501 + "roi": { + 0: 0.4449309386008759, + 140: 0.11955965746663, + 823: 0.06403981740598495, + 1157: 0, + }, # noqa: E501 + "stoploss": {"stoploss": -0.338070047333259}, + }, + "results_metrics": { + "total_trades": 1, + "trade_count_long": 1, + "trade_count_short": 0, + "wins": 0, + "draws": 0, + "losses": 1, + "profit_mean": 0.012357, + "profit_median": -0.012222, + "profit_total": 6.185e-05, + "profit_total_abs": 0.12357, + "max_drawdown_account": 0.23, + "max_drawdown_abs": -0.00125625, + "holding_avg": timedelta(minutes=1200.0), + }, # noqa: E501 + "results_explanation": " 1 trades. Avg profit 0.12%. Total profit 0.00006185 BTC ( 0.12Σ%). Avg duration 1200.0 min.", # noqa: E501 + "total_profit": 6.185e-05, + "current_epoch": 2, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 14.241196856510731, + "params_dict": { + "mfi-value": 25, + "fastd-value": 16, + "adx-value": 29, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "macd_cross_signal", + "sell-mfi-value": 98, + "sell-fastd-value": 72, + "sell-adx-value": 51, + "sell-rsi-value": 82, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + "roi_t1": 889, + "roi_t2": 533, + "roi_t3": 263, + "roi_p1": 0.04759065393663096, + "roi_p2": 0.1488819964638463, + "roi_p3": 0.4102801822104605, + "stoploss": -0.05394588767607611, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 25, + "fastd-value": 16, + "adx-value": 29, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "macd_cross_signal", + }, + "sell": { + "sell-mfi-value": 98, + "sell-fastd-value": 72, + "sell-adx-value": 51, + "sell-rsi-value": 82, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + }, + "roi": { + 0: 0.6067528326109377, + 263: 0.19647265040047726, + 796: 0.04759065393663096, + 1685: 0, + }, + "stoploss": {"stoploss": -0.05394588767607611}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 621, + "trade_count_long": 621, + "trade_count_short": 0, + "wins": 320, + "draws": 0, + "losses": 301, + "profit_mean": -0.043883302093397747, + "profit_median": -0.012222, + "profit_total": -0.13639474, + "profit_total_abs": -272.515306, + "max_drawdown_account": 0.25, + "max_drawdown_abs": -272.515306, + "holding_avg": timedelta(minutes=1691.207729468599), + }, # noqa: E501 + "results_explanation": " 621 trades. Avg profit -0.44%. Total profit -0.13639474 BTC (-272.52Σ%). Avg duration 1691.2 min.", # noqa: E501 + "total_profit": -0.13639474, + "current_epoch": 3, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 100000, + "params_dict": { + "mfi-value": 13, + "fastd-value": 35, + "adx-value": 39, + "rsi-value": 29, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + "sell-mfi-value": 87, + "sell-fastd-value": 54, + "sell-adx-value": 63, + "sell-rsi-value": 93, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + "roi_t1": 1402, + "roi_t2": 676, + "roi_t3": 215, + "roi_p1": 0.06264755784937427, + "roi_p2": 0.14258587851894644, + "roi_p3": 0.20671291201040828, + "stoploss": -0.11818343570194478, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 13, + "fastd-value": 35, + "adx-value": 39, + "rsi-value": 29, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + }, + "sell": { + "sell-mfi-value": 87, + "sell-fastd-value": 54, + "sell-adx-value": 63, + "sell-rsi-value": 93, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + }, + "roi": { + 0: 0.411946348378729, + 215: 0.2052334363683207, + 891: 0.06264755784937427, + 2293: 0, + }, + "stoploss": {"stoploss": -0.11818343570194478}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 0, + "trade_count_long": 0, + "trade_count_short": 0, + "wins": 0, + "draws": 0, + "losses": 0, + "profit_mean": None, + "profit_median": None, + "profit_total": 0, + "profit": 0.0, + "holding_avg": timedelta(), + }, # noqa: E501 + "results_explanation": " 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.", # noqa: E501 + "total_profit": 0, + "current_epoch": 4, + "is_initial_point": True, + "is_random": False, + "is_best": False, # noqa: E501 + }, + { + "loss": 0.22195522184191518, + "params_dict": { + "mfi-value": 17, + "fastd-value": 21, + "adx-value": 38, + "rsi-value": 33, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": False, + "trigger": "macd_cross_signal", + "sell-mfi-value": 87, + "sell-fastd-value": 82, + "sell-adx-value": 78, + "sell-rsi-value": 69, + "sell-mfi-enabled": True, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": False, + "sell-trigger": "sell-macd_cross_signal", + "roi_t1": 1269, + "roi_t2": 601, + "roi_t3": 444, + "roi_p1": 0.07280999507931168, + "roi_p2": 0.08946698095898986, + "roi_p3": 0.1454876733325284, + "stoploss": -0.18181041180901014, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 17, + "fastd-value": 21, + "adx-value": 38, + "rsi-value": 33, + "mfi-enabled": True, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": False, + "trigger": "macd_cross_signal", + }, + "sell": { + "sell-mfi-value": 87, + "sell-fastd-value": 82, + "sell-adx-value": 78, + "sell-rsi-value": 69, + "sell-mfi-enabled": True, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": False, + "sell-trigger": "sell-macd_cross_signal", + }, + "roi": { + 0: 0.3077646493708299, + 444: 0.16227697603830155, + 1045: 0.07280999507931168, + 2314: 0, + }, + "stoploss": {"stoploss": -0.18181041180901014}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 14, + "trade_count_long": 14, + "trade_count_short": 0, + "wins": 6, + "draws": 0, + "losses": 8, + "profit_mean": -0.003539515, + "profit_median": -0.012222, + "profit_total": -0.002480140000000001, + "profit_total_abs": -4.955321, + "max_drawdown_account": 0.34, + "max_drawdown_abs": -4.955321, + "holding_avg": timedelta(minutes=3402.8571428571427), + }, # noqa: E501 + "results_explanation": " 14 trades. Avg profit -0.35%. Total profit -0.00248014 BTC ( -4.96Σ%). Avg duration 3402.9 min.", # noqa: E501 + "total_profit": -0.002480140000000001, + "current_epoch": 5, + "is_initial_point": True, + "is_random": False, + "is_best": True, + }, + { + "loss": 0.545315889154162, + "params_dict": { + "mfi-value": 22, + "fastd-value": 43, + "adx-value": 46, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "bb_lower", + "sell-mfi-value": 87, + "sell-fastd-value": 65, + "sell-adx-value": 94, + "sell-rsi-value": 63, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + "roi_t1": 319, + "roi_t2": 556, + "roi_t3": 216, + "roi_p1": 0.06251955472249589, + "roi_p2": 0.11659519602202795, + "roi_p3": 0.0953744132197762, + "stoploss": -0.024551752215582423, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 22, + "fastd-value": 43, + "adx-value": 46, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "bb_lower", + }, + "sell": { + "sell-mfi-value": 87, + "sell-fastd-value": 65, + "sell-adx-value": 94, + "sell-rsi-value": 63, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + }, + "roi": { + 0: 0.2744891639643, + 216: 0.17911475074452382, + 772: 0.06251955472249589, + 1091: 0, + }, + "stoploss": {"stoploss": -0.024551752215582423}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 39, + "trade_count_long": 39, + "trade_count_short": 0, + "wins": 20, + "draws": 0, + "losses": 19, + "profit_mean": -0.0021400679487179478, + "profit_median": -0.012222, + "profit_total": -0.0041773, + "profit_total_abs": -8.346264999999997, + "max_drawdown_account": 0.45, + "max_drawdown_abs": -4.955321, + "holding_avg": timedelta(minutes=636.9230769230769), + }, # noqa: E501 + "results_explanation": " 39 trades. Avg profit -0.21%. Total profit -0.00417730 BTC ( -8.35Σ%). Avg duration 636.9 min.", # noqa: E501 + "total_profit": -0.0041773, + "current_epoch": 6, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 4.713497421432944, + "params_dict": { + "mfi-value": 13, + "fastd-value": 41, + "adx-value": 21, + "rsi-value": 29, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "bb_lower", + "sell-mfi-value": 99, + "sell-fastd-value": 60, + "sell-adx-value": 81, + "sell-rsi-value": 69, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": False, + "sell-trigger": "sell-macd_cross_signal", + "roi_t1": 771, + "roi_t2": 620, + "roi_t3": 145, + "roi_p1": 0.0586919200378493, + "roi_p2": 0.04984118697312542, + "roi_p3": 0.37521058680247044, + "stoploss": -0.14613268022709905, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 13, + "fastd-value": 41, + "adx-value": 21, + "rsi-value": 29, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "bb_lower", + }, + "sell": { + "sell-mfi-value": 99, + "sell-fastd-value": 60, + "sell-adx-value": 81, + "sell-rsi-value": 69, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": False, + "sell-trigger": "sell-macd_cross_signal", + }, + "roi": { + 0: 0.4837436938134452, + 145: 0.10853310701097472, + 765: 0.0586919200378493, + 1536: 0, + }, # noqa: E501 + "stoploss": {"stoploss": -0.14613268022709905}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 318, + "trade_count_long": 318, + "trade_count_short": 0, + "wins": 100, + "draws": 0, + "losses": 218, + "profit_mean": -0.0039833954716981146, + "profit_median": -0.012222, + "profit_total": -0.06339929, + "profit_total_abs": -126.67197600000004, + "max_drawdown_account": 0.50, + "max_drawdown_abs": -200.955321, + "holding_avg": timedelta(minutes=3140.377358490566), + }, # noqa: E501 + "results_explanation": " 318 trades. Avg profit -0.40%. Total profit -0.06339929 BTC (-126.67Σ%). Avg duration 3140.4 min.", # noqa: E501 + "total_profit": -0.06339929, + "current_epoch": 7, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 20.0, # noqa: E501 + "params_dict": { + "mfi-value": 24, + "fastd-value": 43, + "adx-value": 33, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "sar_reversal", + "sell-mfi-value": 89, + "sell-fastd-value": 74, + "sell-adx-value": 70, + "sell-rsi-value": 70, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": False, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + "roi_t1": 1149, + "roi_t2": 375, + "roi_t3": 289, + "roi_p1": 0.05571820757172588, + "roi_p2": 0.0606240398618907, + "roi_p3": 0.1729012220156157, + "stoploss": -0.1588514289110401, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 24, + "fastd-value": 43, + "adx-value": 33, + "rsi-value": 20, + "mfi-enabled": False, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": True, + "trigger": "sar_reversal", + }, + "sell": { + "sell-mfi-value": 89, + "sell-fastd-value": 74, + "sell-adx-value": 70, + "sell-rsi-value": 70, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": False, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + }, + "roi": { + 0: 0.2892434694492323, + 289: 0.11634224743361658, + 664: 0.05571820757172588, + 1813: 0, + }, + "stoploss": {"stoploss": -0.1588514289110401}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 1, + "trade_count_long": 1, + "trade_count_short": 0, + "wins": 0, + "draws": 1, + "losses": 0, + "profit_mean": 0.0, + "profit_median": 0.0, + "profit_total": 0.0, + "profit_total_abs": 0.0, + "max_drawdown_account": 0.0, + "max_drawdown_abs": 0.52, + "holding_avg": timedelta(minutes=5340.0), + }, # noqa: E501 + "results_explanation": " 1 trades. Avg profit 0.00%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration 5340.0 min.", # noqa: E501 + "total_profit": 0.0, + "current_epoch": 8, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 2.4731817780991223, + "params_dict": { + "mfi-value": 22, + "fastd-value": 20, + "adx-value": 29, + "rsi-value": 40, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "sar_reversal", + "sell-mfi-value": 97, + "sell-fastd-value": 65, + "sell-adx-value": 81, + "sell-rsi-value": 64, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + "roi_t1": 1012, + "roi_t2": 584, + "roi_t3": 422, + "roi_p1": 0.036764323603472565, + "roi_p2": 0.10335480573205287, + "roi_p3": 0.10322347377503042, + "stoploss": -0.2780610808108503, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 22, + "fastd-value": 20, + "adx-value": 29, + "rsi-value": 40, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "sar_reversal", + }, + "sell": { + "sell-mfi-value": 97, + "sell-fastd-value": 65, + "sell-adx-value": 81, + "sell-rsi-value": 64, + "sell-mfi-enabled": True, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + }, + "roi": { + 0: 0.2433426031105559, + 422: 0.14011912933552545, + 1006: 0.036764323603472565, + 2018: 0, + }, + "stoploss": {"stoploss": -0.2780610808108503}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 229, + "trade_count_long": 229, + "trade_count_short": 0, + "wins": 150, + "draws": 0, + "losses": 79, + "profit_mean": -0.0038433433624454144, + "profit_median": -0.012222, + "profit_total": -0.044050070000000004, + "profit_total_abs": -88.01256299999999, + "max_drawdown_account": 0.41, + "max_drawdown_abs": -150.955321, + "holding_avg": timedelta(minutes=6505.676855895196), + }, # noqa: E501 + "results_explanation": " 229 trades. Avg profit -0.38%. Total profit -0.04405007 BTC ( -88.01Σ%). Avg duration 6505.7 min.", # noqa: E501 + "total_profit": -0.044050070000000004, # noqa: E501 + "current_epoch": 9, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": -0.2604606005845212, # noqa: E501 + "params_dict": { + "mfi-value": 23, + "fastd-value": 24, + "adx-value": 22, + "rsi-value": 24, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + "sell-mfi-value": 97, + "sell-fastd-value": 70, + "sell-adx-value": 64, + "sell-rsi-value": 80, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + "roi_t1": 792, + "roi_t2": 464, + "roi_t3": 215, + "roi_p1": 0.04594053535385903, + "roi_p2": 0.09623192684243963, + "roi_p3": 0.04428219070850663, + "stoploss": -0.16992287161634415, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 23, + "fastd-value": 24, + "adx-value": 22, + "rsi-value": 24, + "mfi-enabled": False, + "fastd-enabled": False, + "adx-enabled": False, + "rsi-enabled": True, + "trigger": "macd_cross_signal", + }, + "sell": { + "sell-mfi-value": 97, + "sell-fastd-value": 70, + "sell-adx-value": 64, + "sell-rsi-value": 80, + "sell-mfi-enabled": False, + "sell-fastd-enabled": True, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-sar_reversal", + }, + "roi": { + 0: 0.18645465290480528, + 215: 0.14217246219629864, + 679: 0.04594053535385903, + 1471: 0, + }, + "stoploss": {"stoploss": -0.16992287161634415}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 4, + "trade_count_long": 4, + "trade_count_short": 0, + "wins": 0, + "draws": 0, + "losses": 4, + "profit_mean": 0.001080385, + "profit_median": -0.012222, + "profit_total": 0.00021629, + "profit_total_abs": 0.432154, + "max_drawdown_account": 0.13, + "max_drawdown_abs": -4.955321, + "holding_avg": timedelta(minutes=2850.0), + }, # noqa: E501 + "results_explanation": " 4 trades. Avg profit 0.11%. Total profit 0.00021629 BTC ( 0.43Σ%). Avg duration 2850.0 min.", # noqa: E501 + "total_profit": 0.00021629, + "current_epoch": 10, + "is_initial_point": True, + "is_random": False, + "is_best": True, + }, + { + "loss": 4.876465945994304, # noqa: E501 + "params_dict": { + "mfi-value": 20, + "fastd-value": 32, + "adx-value": 49, + "rsi-value": 23, + "mfi-enabled": True, + "fastd-enabled": True, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "bb_lower", + "sell-mfi-value": 75, + "sell-fastd-value": 56, + "sell-adx-value": 61, + "sell-rsi-value": 62, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + "roi_t1": 579, + "roi_t2": 614, + "roi_t3": 273, + "roi_p1": 0.05307643172744114, + "roi_p2": 0.1352282078262871, + "roi_p3": 0.1913307406325751, + "stoploss": -0.25728526022513887, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 20, + "fastd-value": 32, + "adx-value": 49, + "rsi-value": 23, + "mfi-enabled": True, + "fastd-enabled": True, + "adx-enabled": False, + "rsi-enabled": False, + "trigger": "bb_lower", + }, + "sell": { + "sell-mfi-value": 75, + "sell-fastd-value": 56, + "sell-adx-value": 61, + "sell-rsi-value": 62, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-macd_cross_signal", + }, + "roi": { + 0: 0.3796353801863034, + 273: 0.18830463955372825, + 887: 0.05307643172744114, + 1466: 0, + }, + "stoploss": {"stoploss": -0.25728526022513887}, + }, # noqa: E501 + # New Hyperopt mode! + "results_metrics": { + "total_trades": 117, + "trade_count_long": 117, + "trade_count_short": 0, + "wins": 67, + "draws": 0, + "losses": 50, + "profit_mean": -0.012698609145299145, + "profit_median": -0.012222, + "profit_total": -0.07436117, + "profit_total_abs": -148.573727, + "max_drawdown_account": 0.52, + "max_drawdown_abs": -224.955321, + "holding_avg": timedelta(minutes=4282.5641025641025), + }, # noqa: E501 + "results_explanation": " 117 trades. Avg profit -1.27%. Total profit -0.07436117 BTC (-148.57Σ%). Avg duration 4282.6 min.", # noqa: E501 + "total_profit": -0.07436117, + "current_epoch": 11, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + { + "loss": 100000, + "params_dict": { + "mfi-value": 10, + "fastd-value": 36, + "adx-value": 31, + "rsi-value": 22, + "mfi-enabled": True, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": False, + "trigger": "sar_reversal", + "sell-mfi-value": 80, + "sell-fastd-value": 71, + "sell-adx-value": 60, + "sell-rsi-value": 85, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + "roi_t1": 1156, + "roi_t2": 581, + "roi_t3": 408, + "roi_p1": 0.06860454019988212, + "roi_p2": 0.12473718444931989, + "roi_p3": 0.2896360635226823, + "stoploss": -0.30889015124682806, + }, # noqa: E501 + "params_details": { + "buy": { + "mfi-value": 10, + "fastd-value": 36, + "adx-value": 31, + "rsi-value": 22, + "mfi-enabled": True, + "fastd-enabled": True, + "adx-enabled": True, + "rsi-enabled": False, + "trigger": "sar_reversal", + }, + "sell": { + "sell-mfi-value": 80, + "sell-fastd-value": 71, + "sell-adx-value": 60, + "sell-rsi-value": 85, + "sell-mfi-enabled": False, + "sell-fastd-enabled": False, + "sell-adx-enabled": True, + "sell-rsi-enabled": True, + "sell-trigger": "sell-bb_upper", + }, + "roi": { + 0: 0.4829777881718843, + 408: 0.19334172464920202, + 989: 0.06860454019988212, + 2145: 0, + }, + "stoploss": {"stoploss": -0.30889015124682806}, + }, # noqa: E501 + "results_metrics": { + "total_trades": 0, + "trade_count_long": 0, + "trade_count_short": 0, + "wins": 0, + "draws": 0, + "losses": 0, + "profit_mean": None, + "profit_median": None, + "profit_total": 0, + "profit_total_abs": 0.0, + "max_drawdown_account": 0.0, + "max_drawdown_abs": 0.0, + "holding_avg": timedelta(), + }, # noqa: E501 + "results_explanation": " 0 trades. Avg profit nan%. Total profit 0.00000000 BTC ( 0.00Σ%). Avg duration nan min.", # noqa: E501 + "total_profit": 0, + "current_epoch": 12, + "is_initial_point": True, + "is_random": False, + "is_best": False, + }, + ] + + for res in hyperopt_res: + res["results_metrics"]["holding_avg_s"] = res["results_metrics"][ + "holding_avg" + ].total_seconds() + return hyperopt_res diff --git a/tests/conftest_trades.py b/tests/conftest_trades.py index 9ac43d73d..7103b5169 100644 --- a/tests/conftest_trades.py +++ b/tests/conftest_trades.py @@ -20,23 +20,23 @@ def direc(is_short: bool): def mock_order_1(is_short: bool): return { - 'id': f'1234_{direc(is_short)}', - 'symbol': 'ETH/BTC', - 'status': 'open', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.123, - 'average': 0.123, - 'amount': 123.0, - 'filled': 50.0, - 'cost': 15.129, - 'remaining': 123.0 - 50.0, + "id": f"1234_{direc(is_short)}", + "symbol": "ETH/BTC", + "status": "open", + "side": entry_side(is_short), + "type": "limit", + "price": 0.123, + "average": 0.123, + "amount": 123.0, + "filled": 50.0, + "cost": 15.129, + "remaining": 123.0 - 50.0, } def mock_trade_1(fee, is_short: bool): trade = Trade( - pair='ETH/BTC', + pair="ETH/BTC", stake_amount=0.001, amount=123.0, amount_requested=123.0, @@ -45,43 +45,43 @@ def mock_trade_1(fee, is_short: bool): is_open=True, open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17), open_rate=0.123, - exchange='binance', - strategy='StrategyTestV3', + exchange="binance", + strategy="StrategyTestV3", timeframe=5, - is_short=is_short + is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_1(is_short), 'ETH/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_1(is_short), "ETH/BTC", entry_side(is_short)) trade.orders.append(o) return trade def mock_order_2(is_short: bool): return { - 'id': f'1235_{direc(is_short)}', - 'symbol': 'ETC/BTC', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": f"1235_{direc(is_short)}", + "symbol": "ETC/BTC", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 0.123, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } def mock_order_2_sell(is_short: bool): return { - 'id': f'12366_{direc(is_short)}', - 'symbol': 'ETC/BTC', - 'status': 'closed', - 'side': exit_side(is_short), - 'type': 'limit', - 'price': 0.128, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": f"12366_{direc(is_short)}", + "symbol": "ETC/BTC", + "status": "closed", + "side": exit_side(is_short), + "type": "limit", + "price": 0.128, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } @@ -90,7 +90,7 @@ def mock_trade_2(fee, is_short: bool): Closed trade... """ trade = Trade( - pair='ETC/BTC', + pair="ETC/BTC", stake_amount=0.001, amount=123.0, amount_requested=123.0, @@ -100,51 +100,51 @@ def mock_trade_2(fee, is_short: bool): close_rate=0.128, close_profit=-0.005 if is_short else 0.005, close_profit_abs=-0.005584127 if is_short else 0.000584127, - exchange='binance', + exchange="binance", is_open=False, - strategy='StrategyTestV3', + strategy="StrategyTestV3", timeframe=5, - enter_tag='TEST1', - exit_reason='sell_signal', + enter_tag="TEST1", + exit_reason="sell_signal", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20), close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2), - is_short=is_short + is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_2(is_short), 'ETC/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_2(is_short), "ETC/BTC", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_2_sell(is_short), 'ETC/BTC', exit_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_2_sell(is_short), "ETC/BTC", exit_side(is_short)) trade.orders.append(o) return trade def mock_order_3(is_short: bool): return { - 'id': f'41231a12a_{direc(is_short)}', - 'symbol': 'XRP/BTC', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.05, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": f"41231a12a_{direc(is_short)}", + "symbol": "XRP/BTC", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 0.05, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } def mock_order_3_sell(is_short: bool): return { - 'id': f'41231a666a_{direc(is_short)}', - 'symbol': 'XRP/BTC', - 'status': 'closed', - 'side': exit_side(is_short), - 'type': 'stop_loss_limit', - 'price': 0.06, - 'average': 0.06, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": f"41231a666a_{direc(is_short)}", + "symbol": "XRP/BTC", + "status": "closed", + "side": exit_side(is_short), + "type": "stop_loss_limit", + "price": 0.06, + "average": 0.06, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } @@ -153,7 +153,7 @@ def mock_trade_3(fee, is_short: bool): Closed trade """ trade = Trade( - pair='XRP/BTC', + pair="XRP/BTC", stake_amount=0.001, amount=123.0, amount_requested=123.0, @@ -163,34 +163,34 @@ def mock_trade_3(fee, is_short: bool): close_rate=0.06, close_profit=-0.01 if is_short else 0.01, close_profit_abs=-0.001155 if is_short else 0.000155, - exchange='binance', + exchange="binance", is_open=False, - strategy='StrategyTestV3', + strategy="StrategyTestV3", timeframe=5, - exit_reason='roi', + exit_reason="roi", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20), close_date=datetime.now(tz=timezone.utc), - is_short=is_short + is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_3(is_short), 'XRP/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_3(is_short), "XRP/BTC", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_3_sell(is_short), 'XRP/BTC', exit_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_3_sell(is_short), "XRP/BTC", exit_side(is_short)) trade.orders.append(o) return trade def mock_order_4(is_short: bool): return { - 'id': f'prod_buy_{direc(is_short)}_12345', - 'symbol': 'ETC/BTC', - 'status': 'open', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 0.0, - 'cost': 15.129, - 'remaining': 123.0, + "id": f"prod_buy_{direc(is_short)}_12345", + "symbol": "ETC/BTC", + "status": "open", + "side": entry_side(is_short), + "type": "limit", + "price": 0.123, + "amount": 123.0, + "filled": 0.0, + "cost": 15.129, + "remaining": 123.0, } @@ -199,7 +199,7 @@ def mock_trade_4(fee, is_short: bool): Simulate prod entry """ trade = Trade( - pair='ETC/BTC', + pair="ETC/BTC", stake_amount=0.001, amount=123.0, amount_requested=124.0, @@ -208,44 +208,44 @@ def mock_trade_4(fee, is_short: bool): open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14), is_open=True, open_rate=0.123, - exchange='binance', - strategy='StrategyTestV3', + exchange="binance", + strategy="StrategyTestV3", timeframe=5, is_short=is_short, - stop_loss_pct=0.10 + stop_loss_pct=0.10, ) - o = Order.parse_from_ccxt_object(mock_order_4(is_short), 'ETC/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_4(is_short), "ETC/BTC", entry_side(is_short)) trade.orders.append(o) return trade def mock_order_5(is_short: bool): return { - 'id': f'prod_buy_{direc(is_short)}_3455', - 'symbol': 'XRP/BTC', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": f"prod_buy_{direc(is_short)}_3455", + "symbol": "XRP/BTC", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 0.123, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } def mock_order_5_stoploss(is_short: bool): return { - 'id': f'prod_stoploss_{direc(is_short)}_3455', - 'symbol': 'XRP/BTC', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'stop_loss_limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 0.0, - 'cost': 0.0, - 'remaining': 123.0, + "id": f"prod_stoploss_{direc(is_short)}_3455", + "symbol": "XRP/BTC", + "status": "open", + "side": exit_side(is_short), + "type": "stop_loss_limit", + "price": 0.123, + "amount": 123.0, + "filled": 0.0, + "cost": 0.0, + "remaining": 123.0, } @@ -254,7 +254,7 @@ def mock_trade_5(fee, is_short: bool): Simulate prod entry with stoploss """ trade = Trade( - pair='XRP/BTC', + pair="XRP/BTC", stake_amount=0.001, amount=123.0, amount_requested=124.0, @@ -263,47 +263,47 @@ def mock_trade_5(fee, is_short: bool): open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12), is_open=True, open_rate=0.123, - exchange='binance', - strategy='SampleStrategy', - enter_tag='TEST1', + exchange="binance", + strategy="SampleStrategy", + enter_tag="TEST1", timeframe=5, is_short=is_short, stop_loss_pct=0.10, ) - o = Order.parse_from_ccxt_object(mock_order_5(is_short), 'XRP/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_5(is_short), "XRP/BTC", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_5_stoploss(is_short), 'XRP/BTC', 'stoploss') + o = Order.parse_from_ccxt_object(mock_order_5_stoploss(is_short), "XRP/BTC", "stoploss") trade.orders.append(o) return trade def mock_order_6(is_short: bool): return { - 'id': f'prod_buy_{direc(is_short)}_6', - 'symbol': 'LTC/BTC', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 0.15, - 'amount': 2.0, - 'filled': 2.0, - 'cost': 0.3, - 'remaining': 0.0, + "id": f"prod_buy_{direc(is_short)}_6", + "symbol": "LTC/BTC", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 0.15, + "amount": 2.0, + "filled": 2.0, + "cost": 0.3, + "remaining": 0.0, } def mock_order_6_sell(is_short: bool): return { - 'id': f'prod_sell_{direc(is_short)}_6', - 'symbol': 'LTC/BTC', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'limit', - 'price': 0.15 if is_short else 0.20, - 'amount': 2.0, - 'filled': 0.0, - 'cost': 0.0, - 'remaining': 2.0, + "id": f"prod_sell_{direc(is_short)}_6", + "symbol": "LTC/BTC", + "status": "open", + "side": exit_side(is_short), + "type": "limit", + "price": 0.15 if is_short else 0.20, + "amount": 2.0, + "filled": 0.0, + "cost": 0.0, + "remaining": 2.0, } @@ -312,7 +312,7 @@ def mock_trade_6(fee, is_short: bool): Simulate prod entry with open exit order """ trade = Trade( - pair='LTC/BTC', + pair="LTC/BTC", stake_amount=0.001, amount=2.0, amount_requested=2.0, @@ -321,81 +321,81 @@ def mock_trade_6(fee, is_short: bool): fee_close=fee.return_value, is_open=True, open_rate=0.15, - exchange='binance', - strategy='SampleStrategy', - enter_tag='TEST2', + exchange="binance", + strategy="SampleStrategy", + enter_tag="TEST2", timeframe=5, - is_short=is_short + is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_6(is_short), 'LTC/BTC', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_6(is_short), "LTC/BTC", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_6_sell(is_short), 'LTC/BTC', exit_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_6_sell(is_short), "LTC/BTC", exit_side(is_short)) trade.orders.append(o) return trade def short_order(): return { - 'id': '1236', - 'symbol': 'ETC/BTC', - 'status': 'closed', - 'side': 'sell', - 'type': 'limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.129, - 'remaining': 0.0, + "id": "1236", + "symbol": "ETC/BTC", + "status": "closed", + "side": "sell", + "type": "limit", + "price": 0.123, + "amount": 123.0, + "filled": 123.0, + "cost": 15.129, + "remaining": 0.0, } def exit_short_order(): return { - 'id': '12367', - 'symbol': 'ETC/BTC', - 'status': 'closed', - 'side': 'buy', - 'type': 'limit', - 'price': 0.128, - 'amount': 123.0, - 'filled': 123.0, - 'cost': 15.744, - 'remaining': 0.0, + "id": "12367", + "symbol": "ETC/BTC", + "status": "closed", + "side": "buy", + "type": "limit", + "price": 0.128, + "amount": 123.0, + "filled": 123.0, + "cost": 15.744, + "remaining": 0.0, } def short_trade(fee): """ - 10 minute short limit trade on binance + 10 minute short limit trade on binance - Short trade - fee: 0.25% base - interest_rate: 0.05% per day - open_rate: 0.123 base - close_rate: 0.128 base - amount: 123.0 crypto - stake_amount: 15.129 base - borrowed: 123.0 crypto - time-periods: 10 minutes(rounds up to 1/24 time-period of 1 day) - interest: borrowed * interest_rate * time-periods - = 123.0 * 0.0005 * 1/24 = 0.0025625 crypto - open_value: (amount * open_rate) - (amount * open_rate * fee) - = (123 * 0.123) - (123 * 0.123 * 0.0025) - = 15.091177499999999 - amount_closed: amount + interest = 123 + 0.0025625 = 123.0025625 - close_value: (amount_closed * close_rate) + (amount_closed * close_rate * fee) - = (123.0025625 * 0.128) + (123.0025625 * 0.128 * 0.0025) - = 15.78368882 - total_profit = open_value - close_value - = 15.091177499999999 - 15.78368882 - = -0.6925113200000013 - total_profit_percentage = total_profit / stake_amount - = -0.6925113200000013 / 15.129 - = -0.04577376693766946 + Short trade + fee: 0.25% base + interest_rate: 0.05% per day + open_rate: 0.123 base + close_rate: 0.128 base + amount: 123.0 crypto + stake_amount: 15.129 base + borrowed: 123.0 crypto + time-periods: 10 minutes(rounds up to 1/24 time-period of 1 day) + interest: borrowed * interest_rate * time-periods + = 123.0 * 0.0005 * 1/24 = 0.0025625 crypto + open_value: (amount * open_rate) - (amount * open_rate * fee) + = (123 * 0.123) - (123 * 0.123 * 0.0025) + = 15.091177499999999 + amount_closed: amount + interest = 123 + 0.0025625 = 123.0025625 + close_value: (amount_closed * close_rate) + (amount_closed * close_rate * fee) + = (123.0025625 * 0.128) + (123.0025625 * 0.128 * 0.0025) + = 15.78368882 + total_profit = open_value - close_value + = 15.091177499999999 - 15.78368882 + = -0.6925113200000013 + total_profit_percentage = total_profit / stake_amount + = -0.6925113200000013 / 15.129 + = -0.04577376693766946 """ trade = Trade( - pair='ETC/BTC', + pair="ETC/BTC", stake_amount=15.129, amount=123.0, amount_requested=123.0, @@ -405,51 +405,51 @@ def short_trade(fee): # close_rate=0.128, # close_profit=-0.04577376693766946, # close_profit_abs=-0.6925113200000013, - exchange='binance', + exchange="binance", is_open=True, - strategy='DefaultStrategy', + strategy="DefaultStrategy", timeframe=5, - exit_reason='sell_signal', + exit_reason="sell_signal", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20), # close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2), - is_short=True + is_short=True, ) - o = Order.parse_from_ccxt_object(short_order(), 'ETC/BTC', 'sell') + o = Order.parse_from_ccxt_object(short_order(), "ETC/BTC", "sell") trade.orders.append(o) - o = Order.parse_from_ccxt_object(exit_short_order(), 'ETC/BTC', 'sell') + o = Order.parse_from_ccxt_object(exit_short_order(), "ETC/BTC", "sell") trade.orders.append(o) return trade def leverage_order(): return { - 'id': '1237', - 'symbol': 'DOGE/BTC', - 'status': 'closed', - 'side': 'buy', - 'type': 'limit', - 'price': 0.123, - 'amount': 123.0, - 'filled': 123.0, - 'remaining': 0.0, - 'cost': 15.129, - 'leverage': 5.0 + "id": "1237", + "symbol": "DOGE/BTC", + "status": "closed", + "side": "buy", + "type": "limit", + "price": 0.123, + "amount": 123.0, + "filled": 123.0, + "remaining": 0.0, + "cost": 15.129, + "leverage": 5.0, } def leverage_order_sell(): return { - 'id': '12368', - 'symbol': 'DOGE/BTC', - 'status': 'closed', - 'side': 'sell', - 'type': 'limit', - 'price': 0.128, - 'amount': 123.0, - 'filled': 123.0, - 'remaining': 0.0, - 'cost': 15.744, - 'leverage': 5.0 + "id": "12368", + "symbol": "DOGE/BTC", + "status": "closed", + "side": "sell", + "type": "limit", + "price": 0.128, + "amount": 123.0, + "filled": 123.0, + "remaining": 0.0, + "cost": 15.744, + "leverage": 5.0, } @@ -457,34 +457,34 @@ def leverage_trade(fee): """ 5 hour short limit trade on kraken - Short trade - fee: 0.25% base - interest_rate: 0.05% per day - open_rate: 0.123 base - close_rate: 0.128 base - amount: 615 crypto - stake_amount: 15.129 base - borrowed: 60.516 base - leverage: 5 - hours: 5 - interest: borrowed * interest_rate * ceil(1 + hours/4) - = 60.516 * 0.0005 * ceil(1 + 5/4) = 0.090774 base - open_value: (amount * open_rate) + (amount * open_rate * fee) - = (615.0 * 0.123) + (615.0 * 0.123 * 0.0025) - = 75.83411249999999 + Short trade + fee: 0.25% base + interest_rate: 0.05% per day + open_rate: 0.123 base + close_rate: 0.128 base + amount: 615 crypto + stake_amount: 15.129 base + borrowed: 60.516 base + leverage: 5 + hours: 5 + interest: borrowed * interest_rate * ceil(1 + hours/4) + = 60.516 * 0.0005 * ceil(1 + 5/4) = 0.090774 base + open_value: (amount * open_rate) + (amount * open_rate * fee) + = (615.0 * 0.123) + (615.0 * 0.123 * 0.0025) + = 75.83411249999999 - close_value: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest - = (615.0 * 0.128) - (615.0 * 0.128 * 0.0025) - 0.090774 - = 78.432426 - total_profit = close_value - open_value - = 78.432426 - 75.83411249999999 - = 2.5983135000000175 - total_profit_percentage = ((close_value/open_value)-1) * leverage - = ((78.432426/75.83411249999999)-1) * 5 - = 0.1713156134055116 + close_value: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest + = (615.0 * 0.128) - (615.0 * 0.128 * 0.0025) - 0.090774 + = 78.432426 + total_profit = close_value - open_value + = 78.432426 - 75.83411249999999 + = 2.5983135000000175 + total_profit_percentage = ((close_value/open_value)-1) * leverage + = ((78.432426/75.83411249999999)-1) * 5 + = 0.1713156134055116 """ trade = Trade( - pair='DOGE/BTC', + pair="DOGE/BTC", stake_amount=15.129, amount=615.0, leverage=5.0, @@ -495,17 +495,17 @@ def leverage_trade(fee): close_rate=0.128, close_profit=0.1713156134055116, close_profit_abs=2.5983135000000175, - exchange='kraken', + exchange="kraken", is_open=False, - strategy='DefaultStrategy', + strategy="DefaultStrategy", timeframe=5, - exit_reason='sell_signal', + exit_reason="sell_signal", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=300), close_date=datetime.now(tz=timezone.utc), - interest_rate=0.0005 + interest_rate=0.0005, ) - o = Order.parse_from_ccxt_object(leverage_order(), 'DOGE/BTC', 'sell') + o = Order.parse_from_ccxt_object(leverage_order(), "DOGE/BTC", "sell") trade.orders.append(o) - o = Order.parse_from_ccxt_object(leverage_order_sell(), 'DOGE/BTC', 'sell') + o = Order.parse_from_ccxt_object(leverage_order_sell(), "DOGE/BTC", "sell") trade.orders.append(o) return trade diff --git a/tests/conftest_trades_usdt.py b/tests/conftest_trades_usdt.py index cf3109090..1fc458279 100644 --- a/tests/conftest_trades_usdt.py +++ b/tests/conftest_trades_usdt.py @@ -20,29 +20,29 @@ def direc(is_short: bool): def mock_order_usdt_1(is_short: bool): return { - 'id': f'prod_entry_1_{direc(is_short)}', - 'symbol': 'LTC/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 10.0, - 'amount': 2.0, - 'filled': 2.0, - 'remaining': 0.0, + "id": f"prod_entry_1_{direc(is_short)}", + "symbol": "LTC/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 10.0, + "amount": 2.0, + "filled": 2.0, + "remaining": 0.0, } def mock_order_usdt_1_exit(is_short: bool): return { - 'id': f'prod_exit_1_{direc(is_short)}', - 'symbol': 'LTC/USDT', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'limit', - 'price': 8.0, - 'amount': 2.0, - 'filled': 0.0, - 'remaining': 2.0, + "id": f"prod_exit_1_{direc(is_short)}", + "symbol": "LTC/USDT", + "status": "open", + "side": exit_side(is_short), + "type": "limit", + "price": 8.0, + "amount": 2.0, + "filled": 0.0, + "remaining": 2.0, } @@ -51,7 +51,7 @@ def mock_trade_usdt_1(fee, is_short: bool): Simulate prod entry with open sell order """ trade = Trade( - pair='LTC/USDT', + pair="LTC/USDT", stake_amount=20.0, amount=2.0, amount_requested=2.0, @@ -64,44 +64,45 @@ def mock_trade_usdt_1(fee, is_short: bool): close_rate=8.0, close_profit=-0.2, close_profit_abs=-4.09, - exchange='binance', - strategy='SampleStrategy', + exchange="binance", + strategy="SampleStrategy", timeframe=5, is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_1(is_short), 'LTC/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_1(is_short), "LTC/USDT", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_usdt_1_exit(is_short), - 'LTC/USDT', exit_side(is_short)) + o = Order.parse_from_ccxt_object( + mock_order_usdt_1_exit(is_short), "LTC/USDT", exit_side(is_short) + ) trade.orders.append(o) return trade def mock_order_usdt_2(is_short: bool): return { - 'id': f'1235_{direc(is_short)}', - 'symbol': 'NEO/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 2.0, - 'amount': 100.0, - 'filled': 100.0, - 'remaining': 0.0, + "id": f"1235_{direc(is_short)}", + "symbol": "NEO/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 2.0, + "amount": 100.0, + "filled": 100.0, + "remaining": 0.0, } def mock_order_usdt_2_exit(is_short: bool): return { - 'id': f'12366_{direc(is_short)}', - 'symbol': 'NEO/USDT', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'limit', - 'price': 2.05, - 'amount': 100.0, - 'filled': 0.0, - 'remaining': 100.0, + "id": f"12366_{direc(is_short)}", + "symbol": "NEO/USDT", + "status": "open", + "side": exit_side(is_short), + "type": "limit", + "price": 2.05, + "amount": 100.0, + "filled": 0.0, + "remaining": 100.0, } @@ -110,7 +111,7 @@ def mock_trade_usdt_2(fee, is_short: bool): Closed trade... """ trade = Trade( - pair='NEO/USDT', + pair="NEO/USDT", stake_amount=200.0, amount=100.0, amount_requested=100.0, @@ -120,50 +121,51 @@ def mock_trade_usdt_2(fee, is_short: bool): close_rate=2.05, close_profit=0.05, close_profit_abs=3.9875, - exchange='binance', + exchange="binance", is_open=False, - strategy='StrategyTestV2', + strategy="StrategyTestV2", timeframe=5, - enter_tag='TEST1', - exit_reason='exit_signal', + enter_tag="TEST1", + exit_reason="exit_signal", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20), close_date=datetime.now(tz=timezone.utc) - timedelta(minutes=2), is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_2(is_short), 'NEO/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_2(is_short), "NEO/USDT", entry_side(is_short)) trade.orders.append(o) o = Order.parse_from_ccxt_object( - mock_order_usdt_2_exit(is_short), 'NEO/USDT', exit_side(is_short)) + mock_order_usdt_2_exit(is_short), "NEO/USDT", exit_side(is_short) + ) trade.orders.append(o) return trade def mock_order_usdt_3(is_short: bool): return { - 'id': f'41231a12a_{direc(is_short)}', - 'symbol': 'XRP/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 1.0, - 'amount': 30.0, - 'filled': 30.0, - 'remaining': 0.0, + "id": f"41231a12a_{direc(is_short)}", + "symbol": "XRP/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 1.0, + "amount": 30.0, + "filled": 30.0, + "remaining": 0.0, } def mock_order_usdt_3_exit(is_short: bool): return { - 'id': f'41231a666a_{direc(is_short)}', - 'symbol': 'XRP/USDT', - 'status': 'closed', - 'side': exit_side(is_short), - 'type': 'stop_loss_limit', - 'price': 1.1, - 'average': 1.1, - 'amount': 30.0, - 'filled': 30.0, - 'remaining': 0.0, + "id": f"41231a666a_{direc(is_short)}", + "symbol": "XRP/USDT", + "status": "closed", + "side": exit_side(is_short), + "type": "stop_loss_limit", + "price": 1.1, + "average": 1.1, + "amount": 30.0, + "filled": 30.0, + "remaining": 0.0, } @@ -172,7 +174,7 @@ def mock_trade_usdt_3(fee, is_short: bool): Closed trade """ trade = Trade( - pair='XRP/USDT', + pair="XRP/USDT", stake_amount=30.0, amount=30.0, amount_requested=30.0, @@ -182,35 +184,36 @@ def mock_trade_usdt_3(fee, is_short: bool): close_rate=1.1, close_profit=0.1, close_profit_abs=2.8425, - exchange='binance', + exchange="binance", is_open=False, - strategy='StrategyTestV2', + strategy="StrategyTestV2", timeframe=5, - enter_tag='TEST3', - exit_reason='roi', + enter_tag="TEST3", + exit_reason="roi", open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=20), close_date=datetime.now(tz=timezone.utc), is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_3(is_short), 'XRP/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_3(is_short), "XRP/USDT", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_usdt_3_exit(is_short), - 'XRP/USDT', exit_side(is_short)) + o = Order.parse_from_ccxt_object( + mock_order_usdt_3_exit(is_short), "XRP/USDT", exit_side(is_short) + ) trade.orders.append(o) return trade def mock_order_usdt_4(is_short: bool): return { - 'id': f'prod_buy_12345_{direc(is_short)}', - 'symbol': 'NEO/USDT', - 'status': 'open', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 2.0, - 'amount': 10.0, - 'filled': 0.0, - 'remaining': 30.0, + "id": f"prod_buy_12345_{direc(is_short)}", + "symbol": "NEO/USDT", + "status": "open", + "side": entry_side(is_short), + "type": "limit", + "price": 2.0, + "amount": 10.0, + "filled": 0.0, + "remaining": 30.0, } @@ -219,7 +222,7 @@ def mock_trade_usdt_4(fee, is_short: bool): Simulate prod entry """ trade = Trade( - pair='NEO/USDT', + pair="NEO/USDT", stake_amount=20.0, amount=10.0, amount_requested=10.01, @@ -228,41 +231,41 @@ def mock_trade_usdt_4(fee, is_short: bool): open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=14), is_open=True, open_rate=2.0, - exchange='binance', - strategy='StrategyTestV2', + exchange="binance", + strategy="StrategyTestV2", timeframe=5, is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_4(is_short), 'NEO/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_4(is_short), "NEO/USDT", entry_side(is_short)) trade.orders.append(o) return trade def mock_order_usdt_5(is_short: bool): return { - 'id': f'prod_buy_3455_{direc(is_short)}', - 'symbol': 'XRP/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 2.0, - 'amount': 10.0, - 'filled': 10.0, - 'remaining': 0.0, + "id": f"prod_buy_3455_{direc(is_short)}", + "symbol": "XRP/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 2.0, + "amount": 10.0, + "filled": 10.0, + "remaining": 0.0, } def mock_order_usdt_5_stoploss(is_short: bool): return { - 'id': f'prod_stoploss_3455_{direc(is_short)}', - 'symbol': 'XRP/USDT', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'stop_loss_limit', - 'price': 2.0, - 'amount': 10.0, - 'filled': 0.0, - 'remaining': 30.0, + "id": f"prod_stoploss_3455_{direc(is_short)}", + "symbol": "XRP/USDT", + "status": "open", + "side": exit_side(is_short), + "type": "stop_loss_limit", + "price": 2.0, + "amount": 10.0, + "filled": 0.0, + "remaining": 30.0, } @@ -271,7 +274,7 @@ def mock_trade_usdt_5(fee, is_short: bool): Simulate prod entry with stoploss """ trade = Trade( - pair='XRP/USDT', + pair="XRP/USDT", stake_amount=20.0, amount=10.0, amount_requested=10.01, @@ -280,43 +283,43 @@ def mock_trade_usdt_5(fee, is_short: bool): open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=12), is_open=True, open_rate=2.0, - exchange='binance', - strategy='SampleStrategy', + exchange="binance", + strategy="SampleStrategy", timeframe=5, is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_5(is_short), 'XRP/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_5(is_short), "XRP/USDT", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_usdt_5_stoploss(is_short), 'XRP/USDT', 'stoploss') + o = Order.parse_from_ccxt_object(mock_order_usdt_5_stoploss(is_short), "XRP/USDT", "stoploss") trade.orders.append(o) return trade def mock_order_usdt_6(is_short: bool): return { - 'id': f'prod_entry_6_{direc(is_short)}', - 'symbol': 'LTC/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 10.0, - 'amount': 2.0, - 'filled': 2.0, - 'remaining': 0.0, + "id": f"prod_entry_6_{direc(is_short)}", + "symbol": "LTC/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 10.0, + "amount": 2.0, + "filled": 2.0, + "remaining": 0.0, } def mock_order_usdt_6_exit(is_short: bool): return { - 'id': f'prod_exit_6_{direc(is_short)}', - 'symbol': 'LTC/USDT', - 'status': 'open', - 'side': exit_side(is_short), - 'type': 'limit', - 'price': 12.0, - 'amount': 2.0, - 'filled': 0.0, - 'remaining': 2.0, + "id": f"prod_exit_6_{direc(is_short)}", + "symbol": "LTC/USDT", + "status": "open", + "side": exit_side(is_short), + "type": "limit", + "price": 12.0, + "amount": 2.0, + "filled": 0.0, + "remaining": 2.0, } @@ -325,7 +328,7 @@ def mock_trade_usdt_6(fee, is_short: bool): Simulate prod entry with open sell order """ trade = Trade( - pair='LTC/USDT', + pair="LTC/USDT", stake_amount=20.0, amount=2.0, amount_requested=2.0, @@ -334,36 +337,37 @@ def mock_trade_usdt_6(fee, is_short: bool): fee_close=fee.return_value, is_open=True, open_rate=10.0, - exchange='binance', - strategy='SampleStrategy', + exchange="binance", + strategy="SampleStrategy", timeframe=5, is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_6(is_short), 'LTC/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_6(is_short), "LTC/USDT", entry_side(is_short)) trade.orders.append(o) - o = Order.parse_from_ccxt_object(mock_order_usdt_6_exit(is_short), - 'LTC/USDT', exit_side(is_short)) + o = Order.parse_from_ccxt_object( + mock_order_usdt_6_exit(is_short), "LTC/USDT", exit_side(is_short) + ) trade.orders.append(o) return trade def mock_order_usdt_7(is_short: bool): return { - 'id': f'1234_{direc(is_short)}', - 'symbol': 'ADA/USDT', - 'status': 'closed', - 'side': entry_side(is_short), - 'type': 'limit', - 'price': 2.0, - 'amount': 10.0, - 'filled': 10.0, - 'remaining': 0.0, + "id": f"1234_{direc(is_short)}", + "symbol": "ADA/USDT", + "status": "closed", + "side": entry_side(is_short), + "type": "limit", + "price": 2.0, + "amount": 10.0, + "filled": 10.0, + "remaining": 0.0, } def mock_trade_usdt_7(fee, is_short: bool): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=20.0, amount=10.0, amount_requested=10.0, @@ -372,11 +376,11 @@ def mock_trade_usdt_7(fee, is_short: bool): is_open=True, open_date=datetime.now(tz=timezone.utc) - timedelta(minutes=17), open_rate=2.0, - exchange='binance', - strategy='StrategyTestV2', + exchange="binance", + strategy="StrategyTestV2", timeframe=5, is_short=is_short, ) - o = Order.parse_from_ccxt_object(mock_order_usdt_7(is_short), 'ADA/USDT', entry_side(is_short)) + o = Order.parse_from_ccxt_object(mock_order_usdt_7(is_short), "ADA/USDT", entry_side(is_short)) trade.orders.append(o) return trade diff --git a/tests/data/test_btanalysis.py b/tests/data/test_btanalysis.py index 7d7a97331..b9dee6d59 100644 --- a/tests/data/test_btanalysis.py +++ b/tests/data/test_btanalysis.py @@ -7,16 +7,32 @@ from pandas import DataFrame, DateOffset, Timestamp, to_datetime from freqtrade.configuration import TimeRange from freqtrade.constants import LAST_BT_RESULT_FN -from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism, - extract_trades_of_period, get_latest_backtest_filename, - get_latest_hyperopt_file, load_backtest_data, - load_backtest_metadata, load_trades, load_trades_from_db) +from freqtrade.data.btanalysis import ( + BT_DATA_COLUMNS, + analyze_trade_parallelism, + extract_trades_of_period, + get_latest_backtest_filename, + get_latest_hyperopt_file, + load_backtest_data, + load_backtest_metadata, + load_trades, + load_trades_from_db, +) from freqtrade.data.history import load_data, load_pair_history -from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum, - calculate_expectancy, calculate_market_change, - calculate_max_drawdown, calculate_sharpe, calculate_sortino, - calculate_underwater, combine_dataframes_with_mean, - combined_dataframes_with_rel_mean, create_cum_profit) +from freqtrade.data.metrics import ( + calculate_cagr, + calculate_calmar, + calculate_csum, + calculate_expectancy, + calculate_market_change, + calculate_max_drawdown, + calculate_sharpe, + calculate_sortino, + calculate_underwater, + combine_dataframes_with_mean, + combined_dataframes_with_rel_mean, + create_cum_profit, +) from freqtrade.exceptions import OperationalException from freqtrade.util import dt_utc from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades @@ -25,18 +41,17 @@ from tests.conftest_trades import MOCK_TRADE_COUNT def test_get_latest_backtest_filename(testdatadir, mocker): with pytest.raises(ValueError, match=r"Directory .* does not exist\."): - get_latest_backtest_filename(testdatadir / 'does_not_exist') + get_latest_backtest_filename(testdatadir / "does_not_exist") - with pytest.raises(ValueError, - match=r"Directory .* does not seem to contain .*"): + with pytest.raises(ValueError, match=r"Directory .* does not seem to contain .*"): get_latest_backtest_filename(testdatadir) testdir_bt = testdatadir / "backtest_results" res = get_latest_backtest_filename(testdir_bt) - assert res == 'backtest-result.json' + assert res == "backtest-result.json" res = get_latest_backtest_filename(str(testdir_bt)) - assert res == 'backtest-result.json' + assert res == "backtest-result.json" mocker.patch("freqtrade.data.btanalysis.json_load", return_value={}) @@ -45,8 +60,8 @@ def test_get_latest_backtest_filename(testdatadir, mocker): def test_get_latest_hyperopt_file(testdatadir): - res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle') - assert res == testdatadir / 'does_not_exist/testfile.pickle' + res = get_latest_hyperopt_file(testdatadir / "does_not_exist", "testfile.pickle") + assert res == testdatadir / "does_not_exist/testfile.pickle" res = get_latest_hyperopt_file(testdatadir.parent) assert res == testdatadir.parent / "hyperopt_results.pickle" @@ -57,33 +72,35 @@ def test_get_latest_hyperopt_file(testdatadir): # Test with absolute path with pytest.raises( OperationalException, - match="--hyperopt-filename expects only the filename, not an absolute path."): + match="--hyperopt-filename expects only the filename, not an absolute path.", + ): get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent)) def test_load_backtest_metadata(mocker, testdatadir): - res = load_backtest_metadata(testdatadir / 'nonexistant.file.json') + res = load_backtest_metadata(testdatadir / "nonexistent.file.json") assert res == {} - mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename') - mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception()) - with pytest.raises(OperationalException, - match=r"Unexpected error.*loading backtest metadata\."): - load_backtest_metadata(testdatadir / 'nonexistant.file.json') + mocker.patch("freqtrade.data.btanalysis.get_backtest_metadata_filename") + mocker.patch("freqtrade.data.btanalysis.json_load", side_effect=Exception()) + with pytest.raises( + OperationalException, match=r"Unexpected error.*loading backtest metadata\." + ): + load_backtest_metadata(testdatadir / "nonexistent.file.json") def test_load_backtest_data_old_format(testdatadir, mocker): - filename = testdatadir / "backtest-result_test222.json" - mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[]) + mocker.patch("freqtrade.data.btanalysis.load_backtest_stats", return_value=[]) - with pytest.raises(OperationalException, - match=r"Backtest-results with only trades data are no longer supported."): + with pytest.raises( + OperationalException, + match=r"Backtest-results with only trades data are no longer supported.", + ): load_backtest_data(filename) def test_load_backtest_data_new_format(testdatadir): - filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) assert isinstance(bt_data, DataFrame) @@ -106,13 +123,11 @@ def test_load_backtest_data_new_format(testdatadir): def test_load_backtest_data_multi(testdatadir): - filename = testdatadir / "backtest_results/backtest-result_multistrat.json" - for strategy in ('StrategyTestV2', 'TestStrategy'): + for strategy in ("StrategyTestV2", "TestStrategy"): bt_data = load_backtest_data(filename, strategy=strategy) assert isinstance(bt_data, DataFrame) - assert set(bt_data.columns) == set( - BT_DATA_COLUMNS) + assert set(bt_data.columns) == set(BT_DATA_COLUMNS) assert len(bt_data) == 179 # Test loading from string (must yield same result) @@ -120,21 +135,20 @@ def test_load_backtest_data_multi(testdatadir): assert bt_data.equals(bt_data2) with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."): - load_backtest_data(filename, strategy='XYZ') + load_backtest_data(filename, strategy="XYZ") with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"): load_backtest_data(filename) @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize("is_short", [False, True]) def test_load_trades_from_db(default_conf, fee, is_short, mocker): - create_mock_trades(fee, is_short) # remove init so it does not init again - init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock()) + init_mock = mocker.patch("freqtrade.data.btanalysis.init_db", MagicMock()) - trades = load_trades_from_db(db_url=default_conf['db_url']) + trades = load_trades_from_db(db_url=default_conf["db_url"]) assert init_mock.call_count == 1 assert len(trades) == MOCK_TRADE_COUNT assert isinstance(trades, DataFrame) @@ -143,38 +157,46 @@ def test_load_trades_from_db(default_conf, fee, is_short, mocker): assert "profit_ratio" in trades.columns for col in BT_DATA_COLUMNS: - if col not in ['index', 'open_at_end']: + if col not in ["index", "open_at_end"]: assert col in trades.columns - trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY) + trades = load_trades_from_db(db_url=default_conf["db_url"], strategy=CURRENT_TEST_STRATEGY) assert len(trades) == 4 - trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy') + trades = load_trades_from_db(db_url=default_conf["db_url"], strategy="NoneStrategy") assert len(trades) == 0 def test_extract_trades_of_period(testdatadir): pair = "UNITTEST/BTC" # 2018-11-14 06:07:00 - timerange = TimeRange('date', None, 1510639620, 0) + timerange = TimeRange("date", None, 1510639620, 0) - data = load_pair_history(pair=pair, timeframe='1m', - datadir=testdatadir, timerange=timerange) + data = load_pair_history(pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange) trades = DataFrame( - {'pair': [pair, pair, pair, pair], - 'profit_ratio': [0.0, 0.1, -0.2, -0.5], - 'profit_abs': [0.0, 1, -2, -5], - 'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc), - datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc), - datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc), - datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc), - ], utc=True - ), - 'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc), - datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc), - datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc), - datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc), - ], utc=True) - }) + { + "pair": [pair, pair, pair, pair], + "profit_ratio": [0.0, 0.1, -0.2, -0.5], + "profit_abs": [0.0, 1, -2, -5], + "open_date": to_datetime( + [ + datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc), + datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc), + datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc), + datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc), + ], + utc=True, + ), + "close_date": to_datetime( + [ + datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc), + datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc), + datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc), + datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc), + ], + utc=True, + ), + } + ) trades1 = extract_trades_of_period(data, trades) # First and last trade are dropped as they are out of range assert len(trades1) == 2 @@ -190,44 +212,47 @@ def test_analyze_trade_parallelism(testdatadir): res = analyze_trade_parallelism(bt_data, "5m") assert isinstance(res, DataFrame) - assert 'open_trades' in res.columns - assert res['open_trades'].max() == 3 - assert res['open_trades'].min() == 0 + assert "open_trades" in res.columns + assert res["open_trades"].max() == 3 + assert res["open_trades"].min() == 0 def test_load_trades(default_conf, mocker): db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock()) bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock()) - load_trades("DB", - db_url=default_conf.get('db_url'), - exportfilename=default_conf.get('exportfilename'), - no_trades=False, - strategy=CURRENT_TEST_STRATEGY, - ) + load_trades( + "DB", + db_url=default_conf.get("db_url"), + exportfilename=default_conf.get("exportfilename"), + no_trades=False, + strategy=CURRENT_TEST_STRATEGY, + ) assert db_mock.call_count == 1 assert bt_mock.call_count == 0 db_mock.reset_mock() bt_mock.reset_mock() - default_conf['exportfilename'] = Path("testfile.json") - load_trades("file", - db_url=default_conf.get('db_url'), - exportfilename=default_conf.get('exportfilename'), - ) + default_conf["exportfilename"] = Path("testfile.json") + load_trades( + "file", + db_url=default_conf.get("db_url"), + exportfilename=default_conf.get("exportfilename"), + ) assert db_mock.call_count == 0 assert bt_mock.call_count == 1 db_mock.reset_mock() bt_mock.reset_mock() - default_conf['exportfilename'] = "testfile.json" - load_trades("file", - db_url=default_conf.get('db_url'), - exportfilename=default_conf.get('exportfilename'), - no_trades=True - ) + default_conf["exportfilename"] = "testfile.json" + load_trades( + "file", + db_url=default_conf.get("db_url"), + exportfilename=default_conf.get("exportfilename"), + no_trades=True, + ) assert db_mock.call_count == 0 assert bt_mock.call_count == 0 @@ -235,7 +260,7 @@ def test_load_trades(default_conf, mocker): def test_calculate_market_change(testdatadir): pairs = ["ETH/BTC", "ADA/BTC"] - data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m') + data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m") result = calculate_market_change(data) assert isinstance(result, float) assert pytest.approx(result) == 0.01100002 @@ -243,7 +268,7 @@ def test_calculate_market_change(testdatadir): def test_combine_dataframes_with_mean(testdatadir): pairs = ["ETH/BTC", "ADA/BTC"] - data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m') + data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m") df = combine_dataframes_with_mean(data) assert isinstance(df, DataFrame) assert "ETH/BTC" in df.columns @@ -253,11 +278,9 @@ def test_combine_dataframes_with_mean(testdatadir): def test_combined_dataframes_with_rel_mean(testdatadir): pairs = ["ETH/BTC", "ADA/BTC"] - data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m') + data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m") df = combined_dataframes_with_rel_mean( - data, - datetime(2018, 1, 12, tzinfo=timezone.utc), - datetime(2018, 1, 28, tzinfo=timezone.utc) + data, datetime(2018, 1, 12, tzinfo=timezone.utc), datetime(2018, 1, 28, tzinfo=timezone.utc) ) assert isinstance(df, DataFrame) assert "ETH/BTC" not in df.columns @@ -265,14 +288,14 @@ def test_combined_dataframes_with_rel_mean(testdatadir): assert "mean" in df.columns assert "rel_mean" in df.columns assert "count" in df.columns - assert df.iloc[0]['count'] == 2 - assert df.iloc[-1]['count'] == 2 - assert len(df) < len(data['ETH/BTC']) + assert df.iloc[0]["count"] == 2 + assert df.iloc[-1]["count"] == 2 + assert len(df) < len(data["ETH/BTC"]) def test_combine_dataframes_with_mean_no_data(testdatadir): pairs = ["ETH/BTC", "ADA/BTC"] - data = load_data(datadir=testdatadir, pairs=pairs, timeframe='6m') + data = load_data(datadir=testdatadir, pairs=pairs, timeframe="6m") with pytest.raises(ValueError, match=r"No data provided\."): combine_dataframes_with_mean(data) @@ -282,60 +305,61 @@ def test_create_cum_profit(testdatadir): bt_data = load_backtest_data(filename) timerange = TimeRange.parse_timerange("20180110-20180112") - df = load_pair_history(pair="TRX/BTC", timeframe='5m', - datadir=testdatadir, timerange=timerange) + df = load_pair_history(pair="TRX/BTC", timeframe="5m", datadir=testdatadir, timerange=timerange) - cum_profits = create_cum_profit(df.set_index('date'), - bt_data[bt_data["pair"] == 'TRX/BTC'], - "cum_profits", timeframe="5m") + cum_profits = create_cum_profit( + df.set_index("date"), bt_data[bt_data["pair"] == "TRX/BTC"], "cum_profits", timeframe="5m" + ) assert "cum_profits" in cum_profits.columns - assert cum_profits.iloc[0]['cum_profits'] == 0 - assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05 + assert cum_profits.iloc[0]["cum_profits"] == 0 + assert pytest.approx(cum_profits.iloc[-1]["cum_profits"]) == 9.0225563e-05 def test_create_cum_profit1(testdatadir): filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) # Move close-time to "off" the candle, to make sure the logic still works - bt_data['close_date'] = bt_data.loc[:, 'close_date'] + DateOffset(seconds=20) + bt_data["close_date"] = bt_data.loc[:, "close_date"] + DateOffset(seconds=20) timerange = TimeRange.parse_timerange("20180110-20180112") - df = load_pair_history(pair="TRX/BTC", timeframe='5m', - datadir=testdatadir, timerange=timerange) + df = load_pair_history(pair="TRX/BTC", timeframe="5m", datadir=testdatadir, timerange=timerange) - cum_profits = create_cum_profit(df.set_index('date'), - bt_data[bt_data["pair"] == 'TRX/BTC'], - "cum_profits", timeframe="5m") + cum_profits = create_cum_profit( + df.set_index("date"), bt_data[bt_data["pair"] == "TRX/BTC"], "cum_profits", timeframe="5m" + ) assert "cum_profits" in cum_profits.columns - assert cum_profits.iloc[0]['cum_profits'] == 0 - assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05 + assert cum_profits.iloc[0]["cum_profits"] == 0 + assert pytest.approx(cum_profits.iloc[-1]["cum_profits"]) == 9.0225563e-05 - with pytest.raises(ValueError, match='Trade dataframe empty.'): - create_cum_profit(df.set_index('date'), bt_data[bt_data["pair"] == 'NOTAPAIR'], - "cum_profits", timeframe="5m") + with pytest.raises(ValueError, match="Trade dataframe empty."): + create_cum_profit( + df.set_index("date"), + bt_data[bt_data["pair"] == "NOTAPAIR"], + "cum_profits", + timeframe="5m", + ) def test_calculate_max_drawdown(testdatadir): filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) - _, hdate, lowdate, hval, lval, drawdown = calculate_max_drawdown( - bt_data, value_col="profit_abs") - assert isinstance(drawdown, float) - assert pytest.approx(drawdown) == 0.29753914 - assert isinstance(hdate, Timestamp) - assert isinstance(lowdate, Timestamp) - assert isinstance(hval, float) - assert isinstance(lval, float) - assert hdate == Timestamp('2018-01-16 19:30:00', tz='UTC') - assert lowdate == Timestamp('2018-01-16 22:25:00', tz='UTC') + drawdown = calculate_max_drawdown(bt_data, value_col="profit_abs") + assert isinstance(drawdown.relative_account_drawdown, float) + assert pytest.approx(drawdown.relative_account_drawdown) == 0.29753914 + assert isinstance(drawdown.high_date, Timestamp) + assert isinstance(drawdown.low_date, Timestamp) + assert isinstance(drawdown.high_value, float) + assert isinstance(drawdown.low_value, float) + assert drawdown.high_date == Timestamp("2018-01-16 19:30:00", tz="UTC") + assert drawdown.low_date == Timestamp("2018-01-16 22:25:00", tz="UTC") underwater = calculate_underwater(bt_data) assert isinstance(underwater, DataFrame) - with pytest.raises(ValueError, match='Trade dataframe empty.'): + with pytest.raises(ValueError, match="Trade dataframe empty."): calculate_max_drawdown(DataFrame()) - with pytest.raises(ValueError, match='Trade dataframe empty.'): + with pytest.raises(ValueError, match="Trade dataframe empty."): calculate_underwater(DataFrame()) @@ -354,7 +378,7 @@ def test_calculate_csum(testdatadir): assert csum_min1 == csum_min + 5 assert csum_max1 == csum_max + 5 - with pytest.raises(ValueError, match='Trade dataframe empty.'): + with pytest.raises(ValueError, match="Trade dataframe empty."): csum_min, csum_max = calculate_csum(DataFrame()) @@ -372,9 +396,7 @@ def test_calculate_expectancy(testdatadir): assert pytest.approx(expectancy) == 5.820687070932315e-06 assert pytest.approx(expectancy_ratio) == 0.07151374226574791 - data = { - 'profit_abs': [100, 200, 50, -150, 300, -100, 80, -30] - } + data = {"profit_abs": [100, 200, 50, -150, 300, -100, 80, -30]} df = DataFrame(data) expectancy, expectancy_ratio = calculate_expectancy(df) @@ -391,10 +413,10 @@ def test_calculate_sortino(testdatadir): sortino = calculate_sortino( bt_data, - bt_data['open_date'].min(), - bt_data['close_date'].max(), + bt_data["open_date"].min(), + bt_data["close_date"].max(), 0.01, - ) + ) assert isinstance(sortino, float) assert pytest.approx(sortino) == 35.17722 @@ -408,10 +430,10 @@ def test_calculate_sharpe(testdatadir): sharpe = calculate_sharpe( bt_data, - bt_data['open_date'].min(), - bt_data['close_date'].max(), + bt_data["open_date"].min(), + bt_data["close_date"].max(), 0.01, - ) + ) assert isinstance(sharpe, float) assert pytest.approx(sharpe) == 44.5078669 @@ -425,68 +447,99 @@ def test_calculate_calmar(testdatadir): calmar = calculate_calmar( bt_data, - bt_data['open_date'].min(), - bt_data['close_date'].max(), + bt_data["open_date"].min(), + bt_data["close_date"].max(), 0.01, - ) + ) assert isinstance(calmar, float) assert pytest.approx(calmar) == 559.040508 -@pytest.mark.parametrize('start,end,days, expected', [ - (64900, 176000, 3 * 365, 0.3945), - (64900, 176000, 365, 1.7119), - (1000, 1000, 365, 0.0), - (1000, 1500, 365, 0.5), - (1000, 1500, 100, 3.3927), # sub year - (0.01000000, 0.01762792, 120, 4.6087), # sub year BTC values -]) +@pytest.mark.parametrize( + "start,end,days, expected", + [ + (64900, 176000, 3 * 365, 0.3945), + (64900, 176000, 365, 1.7119), + (1000, 1000, 365, 0.0), + (1000, 1500, 365, 0.5), + (1000, 1500, 100, 3.3927), # sub year + (0.01000000, 0.01762792, 120, 4.6087), # sub year BTC values + ], +) def test_calculate_cagr(start, end, days, expected): - assert round(calculate_cagr(days, start, end), 4) == expected def test_calculate_max_drawdown2(): - values = [0.011580, 0.010048, 0.011340, 0.012161, 0.010416, 0.010009, 0.020024, - -0.024662, -0.022350, 0.020496, -0.029859, -0.030511, 0.010041, 0.010872, - -0.025782, 0.010400, 0.012374, 0.012467, 0.114741, 0.010303, 0.010088, - -0.033961, 0.010680, 0.010886, -0.029274, 0.011178, 0.010693, 0.010711] + values = [ + 0.011580, + 0.010048, + 0.011340, + 0.012161, + 0.010416, + 0.010009, + 0.020024, + -0.024662, + -0.022350, + 0.020496, + -0.029859, + -0.030511, + 0.010041, + 0.010872, + -0.025782, + 0.010400, + 0.012374, + 0.012467, + 0.114741, + 0.010303, + 0.010088, + -0.033961, + 0.010680, + 0.010886, + -0.029274, + 0.011178, + 0.010693, + 0.010711, + ] dates = [dt_utc(2020, 1, 1) + timedelta(days=i) for i in range(len(values))] - df = DataFrame(zip(values, dates), columns=['profit', 'open_date']) + df = DataFrame(zip(values, dates), columns=["profit", "open_date"]) # sort by profit and reset index - df = df.sort_values('profit').reset_index(drop=True) + df = df.sort_values("profit").reset_index(drop=True) df1 = df.copy() - drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown( - df, date_col='open_date', value_col='profit') + drawdown = calculate_max_drawdown( + df, date_col="open_date", starting_balance=0.2, value_col="profit" + ) # Ensure df has not been altered. assert df.equals(df1) - assert isinstance(drawdown, float) - assert isinstance(drawdown_rel, float) + assert isinstance(drawdown.drawdown_abs, float) + assert isinstance(drawdown.relative_account_drawdown, float) # High must be before low - assert hdate < ldate + assert drawdown.high_date < drawdown.low_date # High value must be higher than low value - assert hval > lval - assert drawdown == 0.091755 + assert drawdown.high_value > drawdown.low_value + assert drawdown.drawdown_abs == 0.091755 + assert pytest.approx(drawdown.relative_account_drawdown) == 0.32129575 - df = DataFrame(zip(values[:5], dates[:5]), columns=['profit', 'open_date']) - with pytest.raises(ValueError, match='No losing trade, therefore no drawdown.'): - calculate_max_drawdown(df, date_col='open_date', value_col='profit') + df = DataFrame(zip(values[:5], dates[:5]), columns=["profit", "open_date"]) + with pytest.raises(ValueError, match="No losing trade, therefore no drawdown."): + calculate_max_drawdown(df, date_col="open_date", value_col="profit") - df1 = DataFrame(zip(values[:5], dates[:5]), columns=['profit', 'open_date']) - df1.loc[:, 'profit'] = df1['profit'] * -1 + df1 = DataFrame(zip(values[:5], dates[:5]), columns=["profit", "open_date"]) + df1.loc[:, "profit"] = df1["profit"] * -1 # No winning trade ... - drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown( - df1, date_col='open_date', value_col='profit') - assert drawdown == 0.043965 + drawdown = calculate_max_drawdown(df1, date_col="open_date", value_col="profit") + assert drawdown.drawdown_abs == 0.043965 -@pytest.mark.parametrize('profits,relative,highd,lowdays,result,result_rel', [ - ([0.0, -500.0, 500.0, 10000.0, -1000.0], False, 3, 4, 1000.0, 0.090909), - ([0.0, -500.0, 500.0, 10000.0, -1000.0], True, 0, 1, 500.0, 0.5), - -]) +@pytest.mark.parametrize( + "profits,relative,highd,lowdays,result,result_rel", + [ + ([0.0, -500.0, 500.0, 10000.0, -1000.0], False, 3, 4, 1000.0, 0.090909), + ([0.0, -500.0, 500.0, 10000.0, -1000.0], True, 0, 1, 500.0, 0.5), + ], +) def test_calculate_max_drawdown_abs(profits, relative, highd, lowdays, result, result_rel): """ Test case from issue https://github.com/freqtrade/freqtrade/issues/6655 @@ -495,23 +548,24 @@ def test_calculate_max_drawdown_abs(profits, relative, highd, lowdays, result, r """ init_date = datetime(2020, 1, 1, tzinfo=timezone.utc) dates = [init_date + timedelta(days=i) for i in range(len(profits))] - df = DataFrame(zip(profits, dates), columns=['profit_abs', 'open_date']) + df = DataFrame(zip(profits, dates), columns=["profit_abs", "open_date"]) # sort by profit and reset index - df = df.sort_values('profit_abs').reset_index(drop=True) + df = df.sort_values("profit_abs").reset_index(drop=True) df1 = df.copy() - drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown( - df, date_col='open_date', starting_balance=1000, relative=relative) + drawdown = calculate_max_drawdown( + df, date_col="open_date", starting_balance=1000, relative=relative + ) # Ensure df has not been altered. assert df.equals(df1) - assert isinstance(drawdown, float) - assert isinstance(drawdown_rel, float) - assert hdate == init_date + timedelta(days=highd) - assert ldate == init_date + timedelta(days=lowdays) + assert isinstance(drawdown.drawdown_abs, float) + assert isinstance(drawdown.relative_account_drawdown, float) + assert drawdown.high_date == init_date + timedelta(days=highd) + assert drawdown.low_date == init_date + timedelta(days=lowdays) # High must be before low - assert hdate < ldate + assert drawdown.high_date < drawdown.low_date # High value must be higher than low value - assert hval > lval - assert drawdown == result - assert pytest.approx(drawdown_rel) == result_rel + assert drawdown.high_value > drawdown.low_value + assert drawdown.drawdown_abs == result + assert pytest.approx(drawdown.relative_account_drawdown) == result_rel diff --git a/tests/data/test_converter.py b/tests/data/test_converter.py index d6f88405b..9c6b7d875 100644 --- a/tests/data/test_converter.py +++ b/tests/data/test_converter.py @@ -8,13 +8,24 @@ import pytest from pandas.testing import assert_frame_equal from freqtrade.configuration.timerange import TimeRange -from freqtrade.data.converter import (convert_ohlcv_format, convert_trades_format, - convert_trades_to_ohlcv, ohlcv_fill_up_missing_data, - ohlcv_to_dataframe, reduce_dataframe_footprint, - trades_df_remove_duplicates, trades_dict_to_list, - trades_to_ohlcv, trim_dataframe) -from freqtrade.data.history import (get_timerange, load_data, load_pair_history, - validate_backtest_data) +from freqtrade.data.converter import ( + convert_ohlcv_format, + convert_trades_format, + convert_trades_to_ohlcv, + ohlcv_fill_up_missing_data, + ohlcv_to_dataframe, + reduce_dataframe_footprint, + trades_df_remove_duplicates, + trades_dict_to_list, + trades_to_ohlcv, + trim_dataframe, +) +from freqtrade.data.history import ( + get_timerange, + load_data, + load_pair_history, + validate_backtest_data, +) from freqtrade.data.history.datahandlers import IDataHandler from freqtrade.enums import CandleType from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds @@ -23,102 +34,105 @@ from tests.data.test_history import _clean_test_file def test_dataframe_correct_columns(dataframe_1m): - assert dataframe_1m.columns.tolist() == ['date', 'open', 'high', 'low', 'close', 'volume'] + assert dataframe_1m.columns.tolist() == ["date", "open", "high", "low", "close", "volume"] def test_ohlcv_to_dataframe(ohlcv_history_list, caplog): - columns = ['date', 'open', 'high', 'low', 'close', 'volume'] + columns = ["date", "open", "high", "low", "close", "volume"] caplog.set_level(logging.DEBUG) # Test file with BV data - dataframe = ohlcv_to_dataframe(ohlcv_history_list, '5m', pair="UNITTEST/BTC", - fill_missing=True) + dataframe = ohlcv_to_dataframe(ohlcv_history_list, "5m", pair="UNITTEST/BTC", fill_missing=True) assert dataframe.columns.tolist() == columns - assert log_has('Converting candle (OHLCV) data to dataframe for pair UNITTEST/BTC.', caplog) + assert log_has("Converting candle (OHLCV) data to dataframe for pair UNITTEST/BTC.", caplog) def test_trades_to_ohlcv(trades_history_df, caplog): - caplog.set_level(logging.DEBUG) with pytest.raises(ValueError, match="Trade-list empty."): - trades_to_ohlcv(pd.DataFrame(columns=trades_history_df.columns), '1m') + trades_to_ohlcv(pd.DataFrame(columns=trades_history_df.columns), "1m") - df = trades_to_ohlcv(trades_history_df, '1m') + df = trades_to_ohlcv(trades_history_df, "1m") assert not df.empty assert len(df) == 1 - assert 'open' in df.columns - assert 'high' in df.columns - assert 'low' in df.columns - assert 'close' in df.columns - assert df.iloc[0, :]['high'] == 0.019627 - assert df.iloc[0, :]['low'] == 0.019626 - assert df.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:00+0000') + assert "open" in df.columns + assert "high" in df.columns + assert "low" in df.columns + assert "close" in df.columns + assert df.iloc[0, :]["high"] == 0.019627 + assert df.iloc[0, :]["low"] == 0.019626 + assert df.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:59:00+0000") - df_1h = trades_to_ohlcv(trades_history_df, '1h') + df_1h = trades_to_ohlcv(trades_history_df, "1h") assert len(df_1h) == 1 - assert df_1h.iloc[0, :]['high'] == 0.019627 - assert df_1h.iloc[0, :]['low'] == 0.019626 - assert df_1h.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:00:00+0000') + assert df_1h.iloc[0, :]["high"] == 0.019627 + assert df_1h.iloc[0, :]["low"] == 0.019626 + assert df_1h.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:00:00+0000") - df_1s = trades_to_ohlcv(trades_history_df, '1s') + df_1s = trades_to_ohlcv(trades_history_df, "1s") assert len(df_1s) == 2 - assert df_1s.iloc[0, :]['high'] == 0.019627 - assert df_1s.iloc[0, :]['low'] == 0.019627 - assert df_1s.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:49+0000') - assert df_1s.iloc[-1, :]['date'] == pd.Timestamp('2019-08-14 15:59:59+0000') + assert df_1s.iloc[0, :]["high"] == 0.019627 + assert df_1s.iloc[0, :]["low"] == 0.019627 + assert df_1s.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:59:49+0000") + assert df_1s.iloc[-1, :]["date"] == pd.Timestamp("2019-08-14 15:59:59+0000") -@pytest.mark.parametrize('timeframe,rows,days,candles,start,end,weekday', [ - ('1s', 20_000, 5, 19522, '2020-01-01 00:00:05', '2020-01-05 23:59:27', None), - ('1m', 20_000, 5, 6745, '2020-01-01 00:00:00', '2020-01-05 23:59:00', None), - ('5m', 20_000, 5, 1440, '2020-01-01 00:00:00', '2020-01-05 23:55:00', None), - ('15m', 20_000, 5, 480, '2020-01-01 00:00:00', '2020-01-05 23:45:00', None), - ('1h', 20_000, 5, 120, '2020-01-01 00:00:00', '2020-01-05 23:00:00', None), - ('2h', 20_000, 5, 60, '2020-01-01 00:00:00', '2020-01-05 22:00:00', None), - ('4h', 20_000, 5, 30, '2020-01-01 00:00:00', '2020-01-05 20:00:00', None), - ('8h', 20_000, 5, 15, '2020-01-01 00:00:00', '2020-01-05 16:00:00', None), - ('12h', 20_000, 5, 10, '2020-01-01 00:00:00', '2020-01-05 12:00:00', None), - ('1d', 20_000, 5, 5, '2020-01-01 00:00:00', '2020-01-05 00:00:00', 'Sunday'), - ('7d', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'), - ('1w', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'), - ('1M', 20_000, 74, 3, '2020-01-01 00:00:00', '2020-03-01 00:00:00', None), - ('3M', 20_000, 100, 2, '2020-01-01 00:00:00', '2020-04-01 00:00:00', None), - ('1y', 20_000, 1000, 3, '2020-01-01 00:00:00', '2022-01-01 00:00:00', None), -]) +@pytest.mark.parametrize( + "timeframe,rows,days,candles,start,end,weekday", + [ + ("1s", 20_000, 5, 19522, "2020-01-01 00:00:05", "2020-01-05 23:59:27", None), + ("1m", 20_000, 5, 6745, "2020-01-01 00:00:00", "2020-01-05 23:59:00", None), + ("5m", 20_000, 5, 1440, "2020-01-01 00:00:00", "2020-01-05 23:55:00", None), + ("15m", 20_000, 5, 480, "2020-01-01 00:00:00", "2020-01-05 23:45:00", None), + ("1h", 20_000, 5, 120, "2020-01-01 00:00:00", "2020-01-05 23:00:00", None), + ("2h", 20_000, 5, 60, "2020-01-01 00:00:00", "2020-01-05 22:00:00", None), + ("4h", 20_000, 5, 30, "2020-01-01 00:00:00", "2020-01-05 20:00:00", None), + ("8h", 20_000, 5, 15, "2020-01-01 00:00:00", "2020-01-05 16:00:00", None), + ("12h", 20_000, 5, 10, "2020-01-01 00:00:00", "2020-01-05 12:00:00", None), + ("1d", 20_000, 5, 5, "2020-01-01 00:00:00", "2020-01-05 00:00:00", "Sunday"), + ("7d", 20_000, 37, 6, "2020-01-06 00:00:00", "2020-02-10 00:00:00", "Monday"), + ("1w", 20_000, 37, 6, "2020-01-06 00:00:00", "2020-02-10 00:00:00", "Monday"), + ("1M", 20_000, 74, 3, "2020-01-01 00:00:00", "2020-03-01 00:00:00", None), + ("3M", 20_000, 100, 2, "2020-01-01 00:00:00", "2020-04-01 00:00:00", None), + ("1y", 20_000, 1000, 3, "2020-01-01 00:00:00", "2022-01-01 00:00:00", None), + ], +) def test_trades_to_ohlcv_multi(timeframe, rows, days, candles, start, end, weekday): trades_history = generate_trades_history(n_rows=rows, days=days) df = trades_to_ohlcv(trades_history, timeframe) assert not df.empty assert len(df) == candles - assert df.iloc[0, :]['date'] == pd.Timestamp(f'{start}+0000') - assert df.iloc[-1, :]['date'] == pd.Timestamp(f'{end}+0000') + assert df.iloc[0, :]["date"] == pd.Timestamp(f"{start}+0000") + assert df.iloc[-1, :]["date"] == pd.Timestamp(f"{end}+0000") if weekday: # Weekday is only relevant for daily and weekly candles. - assert df.iloc[-1, :]['date'].day_name() == weekday + assert df.iloc[-1, :]["date"].day_name() == weekday def test_ohlcv_fill_up_missing_data(testdatadir, caplog): - data = load_pair_history(datadir=testdatadir, - timeframe='1m', - pair='UNITTEST/BTC', - fill_up_missing=False) + data = load_pair_history( + datadir=testdatadir, timeframe="1m", pair="UNITTEST/BTC", fill_up_missing=False + ) caplog.set_level(logging.DEBUG) - data2 = ohlcv_fill_up_missing_data(data, '1m', 'UNITTEST/BTC') + data2 = ohlcv_fill_up_missing_data(data, "1m", "UNITTEST/BTC") assert len(data2) > len(data) # Column names should not change assert (data.columns == data2.columns).all() - assert log_has_re(f"Missing data fillup for UNITTEST/BTC, 1m: before: " - f"{len(data)} - after: {len(data2)}.*", caplog) + assert log_has_re( + f"Missing data fillup for UNITTEST/BTC, 1m: before: " + f"{len(data)} - after: {len(data2)}.*", + caplog, + ) # Test fillup actually fixes invalid backtest data - min_date, max_date = get_timerange({'UNITTEST/BTC': data}) - assert validate_backtest_data(data, 'UNITTEST/BTC', min_date, max_date, 1) - assert not validate_backtest_data(data2, 'UNITTEST/BTC', min_date, max_date, 1) + min_date, max_date = get_timerange({"UNITTEST/BTC": data}) + assert validate_backtest_data(data, "UNITTEST/BTC", min_date, max_date, 1) + assert not validate_backtest_data(data2, "UNITTEST/BTC", min_date, max_date, 1) def test_ohlcv_fill_up_missing_data2(caplog): - timeframe = '5m' + timeframe = "5m" ticks = [ [ 1511686200000, # 8:50:00 @@ -142,7 +156,7 @@ def test_ohlcv_fill_up_missing_data2(caplog): 8.893e-05, 8.875e-05, 8.877e-05, - 2251 + 2251, ], [ 1511687400000, # 9:10:00 @@ -150,51 +164,54 @@ def test_ohlcv_fill_up_missing_data2(caplog): 8.883e-05, 8.895e-05, 8.817e-05, - 123551 - ] + 123551, + ], ] # Generate test-data without filling missing - data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC", - fill_missing=False) + data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False) assert len(data) == 3 caplog.set_level(logging.DEBUG) data2 = ohlcv_fill_up_missing_data(data, timeframe, "UNITTEST/BTC") assert len(data2) == 4 # 3rd candle has been filled row = data2.loc[2, :] - assert row['volume'] == 0 + assert row["volume"] == 0 # close should match close of previous candle - assert row['close'] == data.loc[1, 'close'] - assert row['open'] == row['close'] - assert row['high'] == row['close'] - assert row['low'] == row['close'] + assert row["close"] == data.loc[1, "close"] + assert row["open"] == row["close"] + assert row["high"] == row["close"] + assert row["low"] == row["close"] # Column names should not change assert (data.columns == data2.columns).all() - assert log_has_re(f"Missing data fillup for UNITTEST/BTC, {timeframe}: before: " - f"{len(data)} - after: {len(data2)}.*", caplog) + assert log_has_re( + f"Missing data fillup for UNITTEST/BTC, {timeframe}: before: " + f"{len(data)} - after: {len(data2)}.*", + caplog, + ) -@pytest.mark.parametrize('timeframe', [ - '1s', '1m', '5m', '15m', '1h', '2h', '4h', '8h', '12h', '1d', '7d', '1w', '1M', '3M', '1y' -]) +@pytest.mark.parametrize( + "timeframe", + ["1s", "1m", "5m", "15m", "1h", "2h", "4h", "8h", "12h", "1d", "7d", "1w", "1M", "3M", "1y"], +) def test_ohlcv_to_dataframe_multi(timeframe): data = generate_test_data(timeframe, 180) assert len(data) == 180 - df = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT') + df = ohlcv_to_dataframe(data, timeframe, "UNITTEST/USDT") assert len(df) == len(data) - 1 - df1 = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT', drop_incomplete=False) + df1 = ohlcv_to_dataframe(data, timeframe, "UNITTEST/USDT", drop_incomplete=False) assert len(df1) == len(data) assert data.equals(df1) data1 = data.copy() - if timeframe in ('1M', '3M', '1y'): - data1.loc[:, 'date'] = data1.loc[:, 'date'] + pd.to_timedelta('1w') + if timeframe in ("1M", "3M", "1y"): + data1.loc[:, "date"] = data1.loc[:, "date"] + pd.to_timedelta("1w") else: # Shift by half a timeframe - data1.loc[:, 'date'] = data1.loc[:, 'date'] + (pd.to_timedelta(timeframe) / 2) - df2 = ohlcv_to_dataframe(data1, timeframe, 'UNITTEST/USDT') + data1.loc[:, "date"] = data1.loc[:, "date"] + (pd.to_timedelta(timeframe) / 2) + df2 = ohlcv_to_dataframe(data1, timeframe, "UNITTEST/USDT") assert len(df2) == len(data) - 1 tfs = timeframe_to_seconds(timeframe) @@ -202,21 +219,20 @@ def test_ohlcv_to_dataframe_multi(timeframe): if 1 <= tfm < 10000: # minute based resampling does not work on timeframes >= 1 week ohlcv_dict = { - 'open': 'first', - 'high': 'max', - 'low': 'min', - 'close': 'last', - 'volume': 'sum' + "open": "first", + "high": "max", + "low": "min", + "close": "last", + "volume": "sum", } - dfs = data1.resample(f"{tfs}s", on='date').agg(ohlcv_dict).reset_index(drop=False) - dfm = data1.resample(f"{tfm}min", on='date').agg(ohlcv_dict).reset_index(drop=False) + dfs = data1.resample(f"{tfs}s", on="date").agg(ohlcv_dict).reset_index(drop=False) + dfm = data1.resample(f"{tfm}min", on="date").agg(ohlcv_dict).reset_index(drop=False) assert dfs.equals(dfm) assert dfs.equals(df1) def test_ohlcv_to_dataframe_1M(): - # Monthly ticks from 2019-09-01 to 2023-07-01 ticks = [ [1567296000000, 8042.08, 10475.54, 7700.67, 8041.96, 608742.1109999999], @@ -265,25 +281,27 @@ def test_ohlcv_to_dataframe_1M(): [1680307200000, 28454.8, 31059.0, 26919.3, 29223.0, 14654208.219], [1682899200000, 29223.0, 29840.0, 25751.0, 27201.1, 13328157.284], [1685577600000, 27201.1, 31500.0, 24777.0, 30460.2, 14099299.273], - [1688169600000, 30460.2, 31850.0, 28830.0, 29338.8, 8760361.377] + [1688169600000, 30460.2, 31850.0, 28830.0, 29338.8, 8760361.377], ] - data = ohlcv_to_dataframe(ticks, '1M', pair="UNITTEST/USDT", - fill_missing=False, drop_incomplete=False) + data = ohlcv_to_dataframe( + ticks, "1M", pair="UNITTEST/USDT", fill_missing=False, drop_incomplete=False + ) assert len(data) == len(ticks) - assert data.iloc[0]['date'].strftime('%Y-%m-%d') == '2019-09-01' - assert data.iloc[-1]['date'].strftime('%Y-%m-%d') == '2023-07-01' + assert data.iloc[0]["date"].strftime("%Y-%m-%d") == "2019-09-01" + assert data.iloc[-1]["date"].strftime("%Y-%m-%d") == "2023-07-01" # Test with filling missing data - data = ohlcv_to_dataframe(ticks, '1M', pair="UNITTEST/USDT", - fill_missing=True, drop_incomplete=False) + data = ohlcv_to_dataframe( + ticks, "1M", pair="UNITTEST/USDT", fill_missing=True, drop_incomplete=False + ) assert len(data) == len(ticks) - assert data.iloc[0]['date'].strftime('%Y-%m-%d') == '2019-09-01' - assert data.iloc[-1]['date'].strftime('%Y-%m-%d') == '2023-07-01' + assert data.iloc[0]["date"].strftime("%Y-%m-%d") == "2019-09-01" + assert data.iloc[-1]["date"].strftime("%Y-%m-%d") == "2023-07-01" def test_ohlcv_drop_incomplete(caplog): - timeframe = '1d' + timeframe = "1d" ticks = [ [ 1559750400000, # 2019-06-04 @@ -307,7 +325,7 @@ def test_ohlcv_drop_incomplete(caplog): 8.893e-05, 8.875e-05, 8.877e-05, - 2251 + 2251, ], [ 1560009600000, # 2019-06-07 @@ -315,35 +333,33 @@ def test_ohlcv_drop_incomplete(caplog): 8.883e-05, 8.895e-05, 8.817e-05, - 123551 - ] + 123551, + ], ] caplog.set_level(logging.DEBUG) - data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC", - fill_missing=False, drop_incomplete=False) + data = ohlcv_to_dataframe( + ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False, drop_incomplete=False + ) assert len(data) == 4 assert not log_has("Dropping last candle", caplog) # Drop last candle - data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC", - fill_missing=False, drop_incomplete=True) + data = ohlcv_to_dataframe( + ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False, drop_incomplete=True + ) assert len(data) == 3 assert log_has("Dropping last candle", caplog) def test_trim_dataframe(testdatadir) -> None: - data = load_data( - datadir=testdatadir, - timeframe='1m', - pairs=['UNITTEST/BTC'] - )['UNITTEST/BTC'] - min_date = int(data.iloc[0]['date'].timestamp()) - max_date = int(data.iloc[-1]['date'].timestamp()) + data = load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"])["UNITTEST/BTC"] + min_date = int(data.iloc[0]["date"].timestamp()) + max_date = int(data.iloc[-1]["date"].timestamp()) data_modify = data.copy() # Remove first 30 minutes (1800 s) - tr = TimeRange('date', None, min_date + 1800, 0) + tr = TimeRange("date", None, min_date + 1800, 0) data_modify = trim_dataframe(data_modify, tr) assert not data_modify.equals(data) assert len(data_modify) < len(data) @@ -352,7 +368,7 @@ def test_trim_dataframe(testdatadir) -> None: assert all(data_modify.iloc[0] == data.iloc[30]) data_modify = data.copy() - tr = TimeRange('date', None, min_date + 1800, 0) + tr = TimeRange("date", None, min_date + 1800, 0) # Remove first 20 candles - ignores min date data_modify = trim_dataframe(data_modify, tr, startup_candles=20) assert not data_modify.equals(data) @@ -363,7 +379,7 @@ def test_trim_dataframe(testdatadir) -> None: data_modify = data.copy() # Remove last 30 minutes (1800 s) - tr = TimeRange(None, 'date', 0, max_date - 1800) + tr = TimeRange(None, "date", 0, max_date - 1800) data_modify = trim_dataframe(data_modify, tr) assert not data_modify.equals(data) assert len(data_modify) < len(data) @@ -373,7 +389,7 @@ def test_trim_dataframe(testdatadir) -> None: data_modify = data.copy() # Remove first 25 and last 30 minutes (1800 s) - tr = TimeRange('date', 'date', min_date + 1500, max_date - 1800) + tr = TimeRange("date", "date", min_date + 1500, max_date - 1800) data_modify = trim_dataframe(data_modify, tr) assert not data_modify.equals(data) assert len(data_modify) < len(data) @@ -383,8 +399,9 @@ def test_trim_dataframe(testdatadir) -> None: def test_trades_df_remove_duplicates(trades_history_df): - trades_history1 = pd.concat([trades_history_df, trades_history_df, trades_history_df] - ).reset_index(drop=True) + trades_history1 = pd.concat( + [trades_history_df, trades_history_df, trades_history_df] + ).reset_index(drop=True) assert len(trades_history1) == len(trades_history_df) * 3 res = trades_df_remove_duplicates(trades_history1) assert len(res) == len(trades_history_df) @@ -396,55 +413,55 @@ def test_trades_dict_to_list(fetch_trades_result): assert isinstance(res, list) assert isinstance(res[0], list) for i, t in enumerate(res): - assert t[0] == fetch_trades_result[i]['timestamp'] - assert t[1] == fetch_trades_result[i]['id'] - assert t[2] == fetch_trades_result[i]['type'] - assert t[3] == fetch_trades_result[i]['side'] - assert t[4] == fetch_trades_result[i]['price'] - assert t[5] == fetch_trades_result[i]['amount'] - assert t[6] == fetch_trades_result[i]['cost'] + assert t[0] == fetch_trades_result[i]["timestamp"] + assert t[1] == fetch_trades_result[i]["id"] + assert t[2] == fetch_trades_result[i]["type"] + assert t[3] == fetch_trades_result[i]["side"] + assert t[4] == fetch_trades_result[i]["price"] + assert t[5] == fetch_trades_result[i]["amount"] + assert t[6] == fetch_trades_result[i]["cost"] def test_convert_trades_format(default_conf, testdatadir, tmp_path): - files = [{'old': tmp_path / "XRP_ETH-trades.json.gz", - 'new': tmp_path / "XRP_ETH-trades.json"}, - {'old': tmp_path / "XRP_OLD-trades.json.gz", - 'new': tmp_path / "XRP_OLD-trades.json"}, - ] + files = [ + {"old": tmp_path / "XRP_ETH-trades.json.gz", "new": tmp_path / "XRP_ETH-trades.json"}, + {"old": tmp_path / "XRP_OLD-trades.json.gz", "new": tmp_path / "XRP_OLD-trades.json"}, + ] for file in files: - copyfile(testdatadir / file['old'].name, file['old']) - assert not file['new'].exists() + copyfile(testdatadir / file["old"].name, file["old"]) + assert not file["new"].exists() - default_conf['datadir'] = tmp_path + default_conf["datadir"] = tmp_path - convert_trades_format(default_conf, convert_from='jsongz', - convert_to='json', erase=False) + convert_trades_format(default_conf, convert_from="jsongz", convert_to="json", erase=False) for file in files: - assert file['new'].exists() - assert file['old'].exists() + assert file["new"].exists() + assert file["old"].exists() # Remove original file - file['old'].unlink() + file["old"].unlink() # Convert back - convert_trades_format(default_conf, convert_from='json', - convert_to='jsongz', erase=True) + convert_trades_format(default_conf, convert_from="json", convert_to="jsongz", erase=True) for file in files: - assert file['old'].exists() - assert not file['new'].exists() + assert file["old"].exists() + assert not file["new"].exists() - _clean_test_file(file['old']) - if file['new'].exists(): - file['new'].unlink() + _clean_test_file(file["old"]) + if file["new"].exists(): + file["new"].unlink() -@pytest.mark.parametrize('file_base,candletype', [ - (['XRP_ETH-5m', 'XRP_ETH-1m'], CandleType.SPOT), - (['UNITTEST_USDT_USDT-1h-mark', 'XRP_USDT_USDT-1h-mark'], CandleType.MARK), - (['XRP_USDT_USDT-1h-futures'], CandleType.FUTURES), -]) +@pytest.mark.parametrize( + "file_base,candletype", + [ + (["XRP_ETH-5m", "XRP_ETH-1m"], CandleType.SPOT), + (["UNITTEST_USDT_USDT-1h-mark", "XRP_USDT_USDT-1h-mark"], CandleType.MARK), + (["XRP_USDT_USDT-1h-futures"], CandleType.FUTURES), + ], +) def test_convert_ohlcv_format(default_conf, testdatadir, tmp_path, file_base, candletype): - prependix = '' if candletype == CandleType.SPOT else 'futures/' + prependix = "" if candletype == CandleType.SPOT else "futures/" files_orig = [] files_temp = [] files_new = [] @@ -459,77 +476,77 @@ def test_convert_ohlcv_format(default_conf, testdatadir, tmp_path, file_base, ca files_temp.append(file_temp) files_new.append(file_new) - default_conf['datadir'] = tmp_path - default_conf['candle_types'] = [candletype] + default_conf["datadir"] = tmp_path + default_conf["candle_types"] = [candletype] if candletype == CandleType.SPOT: - default_conf['pairs'] = ['XRP/ETH', 'XRP/USDT', 'UNITTEST/USDT'] + default_conf["pairs"] = ["XRP/ETH", "XRP/USDT", "UNITTEST/USDT"] else: - default_conf['pairs'] = ['XRP/ETH:ETH', 'XRP/USDT:USDT', 'UNITTEST/USDT:USDT'] - default_conf['timeframes'] = ['1m', '5m', '1h'] + default_conf["pairs"] = ["XRP/ETH:ETH", "XRP/USDT:USDT", "UNITTEST/USDT:USDT"] + default_conf["timeframes"] = ["1m", "5m", "1h"] assert not file_new.exists() convert_ohlcv_format( default_conf, - convert_from='feather', - convert_to='jsongz', + convert_from="feather", + convert_to="jsongz", erase=False, ) - for file in (files_temp + files_new): + for file in files_temp + files_new: assert file.exists() # Remove original files - for file in (files_temp): + for file in files_temp: file.unlink() # Convert back convert_ohlcv_format( default_conf, - convert_from='jsongz', - convert_to='feather', + convert_from="jsongz", + convert_to="feather", erase=True, ) - for file in (files_temp): + for file in files_temp: assert file.exists() - for file in (files_new): + for file in files_new: assert not file.exists() def test_reduce_dataframe_footprint(): - data = generate_test_data('15m', 40) + data = generate_test_data("15m", 40) - data['open_copy'] = data['open'] - data['close_copy'] = data['close'] - data['close_copy'] = data['close'] + data["open_copy"] = data["open"] + data["close_copy"] = data["close"] + data["close_copy"] = data["close"] - assert data['open'].dtype == np.float64 - assert data['open_copy'].dtype == np.float64 - assert data['close_copy'].dtype == np.float64 + assert data["open"].dtype == np.float64 + assert data["open_copy"].dtype == np.float64 + assert data["close_copy"].dtype == np.float64 df2 = reduce_dataframe_footprint(data) # Does not modify original dataframe - assert data['open'].dtype == np.float64 - assert data['open_copy'].dtype == np.float64 - assert data['close_copy'].dtype == np.float64 + assert data["open"].dtype == np.float64 + assert data["open_copy"].dtype == np.float64 + assert data["close_copy"].dtype == np.float64 # skips ohlcv columns - assert df2['open'].dtype == np.float64 - assert df2['high'].dtype == np.float64 - assert df2['low'].dtype == np.float64 - assert df2['close'].dtype == np.float64 - assert df2['volume'].dtype == np.float64 + assert df2["open"].dtype == np.float64 + assert df2["high"].dtype == np.float64 + assert df2["low"].dtype == np.float64 + assert df2["close"].dtype == np.float64 + assert df2["volume"].dtype == np.float64 # Changes dtype of returned dataframe - assert df2['open_copy'].dtype == np.float32 - assert df2['close_copy'].dtype == np.float32 + assert df2["open_copy"].dtype == np.float32 + assert df2["close_copy"].dtype == np.float32 def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog): - pair = 'XRP/ETH' - file1 = tmp_path / 'XRP_ETH-1m.feather' - file5 = tmp_path / 'XRP_ETH-5m.feather' - filetrades = tmp_path / 'XRP_ETH-trades.json.gz' + pair = "XRP/ETH" + file1 = tmp_path / "XRP_ETH-1m.feather" + file5 = tmp_path / "XRP_ETH-5m.feather" + filetrades = tmp_path / "XRP_ETH-trades.json.gz" copyfile(testdatadir / file1.name, file1) copyfile(testdatadir / file5.name, file5) copyfile(testdatadir / filetrades.name, filetrades) @@ -538,13 +555,18 @@ def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog): dfbak_1m = load_pair_history(datadir=tmp_path, timeframe="1m", pair=pair) dfbak_5m = load_pair_history(datadir=tmp_path, timeframe="5m", pair=pair) - tr = TimeRange.parse_timerange('20191011-20191012') + tr = TimeRange.parse_timerange("20191011-20191012") - convert_trades_to_ohlcv([pair], timeframes=['1m', '5m'], - data_format_trades='jsongz', - datadir=tmp_path, timerange=tr, erase=True, - data_format_ohlcv='feather', - candle_type=CandleType.SPOT) + convert_trades_to_ohlcv( + [pair], + timeframes=["1m", "5m"], + data_format_trades="jsongz", + datadir=tmp_path, + timerange=tr, + erase=True, + data_format_ohlcv="feather", + candle_type=CandleType.SPOT, + ) assert log_has("Deleting existing data for pair XRP/ETH, interval 1m.", caplog) # Load new data @@ -553,12 +575,17 @@ def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog): assert_frame_equal(dfbak_1m, df_1m, check_exact=True) assert_frame_equal(dfbak_5m, df_5m, check_exact=True) - msg = 'Could not convert NoDatapair to OHLCV.' + msg = "Could not convert NoDatapair to OHLCV." assert not log_has(msg, caplog) - convert_trades_to_ohlcv(['NoDatapair'], timeframes=['1m', '5m'], - data_format_trades='jsongz', - datadir=tmp_path, timerange=tr, erase=True, - data_format_ohlcv='feather', - candle_type=CandleType.SPOT) + convert_trades_to_ohlcv( + ["NoDatapair"], + timeframes=["1m", "5m"], + data_format_trades="jsongz", + datadir=tmp_path, + timerange=tr, + erase=True, + data_format_ohlcv="feather", + candle_type=CandleType.SPOT, + ) assert log_has(msg, caplog) diff --git a/tests/data/test_datahandler.py b/tests/data/test_datahandler.py index 97c9e29ac..1f66d1b1e 100644 --- a/tests/data/test_datahandler.py +++ b/tests/data/test_datahandler.py @@ -13,8 +13,11 @@ from freqtrade.configuration import TimeRange from freqtrade.constants import AVAILABLE_DATAHANDLERS from freqtrade.data.history.datahandlers.featherdatahandler import FeatherDataHandler from freqtrade.data.history.datahandlers.hdf5datahandler import HDF5DataHandler -from freqtrade.data.history.datahandlers.idatahandler import (IDataHandler, get_datahandler, - get_datahandlerclass) +from freqtrade.data.history.datahandlers.idatahandler import ( + IDataHandler, + get_datahandler, + get_datahandlerclass, +) from freqtrade.data.history.datahandlers.jsondatahandler import JsonDataHandler, JsonGzDataHandler from freqtrade.data.history.datahandlers.parquetdatahandler import ParquetDataHandler from freqtrade.enums import CandleType, TradingMode @@ -22,39 +25,53 @@ from tests.conftest import log_has, log_has_re def test_datahandler_ohlcv_get_pairs(testdatadir): - pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, '5m', candle_type=CandleType.SPOT) + pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, "5m", candle_type=CandleType.SPOT) # Convert to set to avoid failures due to sorting - assert set(pairs) == {'UNITTEST/BTC', 'XLM/BTC', 'ETH/BTC', 'TRX/BTC', 'LTC/BTC', - 'XMR/BTC', 'ZEC/BTC', 'ADA/BTC', 'ETC/BTC', 'NXT/BTC', - 'DASH/BTC', 'XRP/ETH'} + assert set(pairs) == { + "UNITTEST/BTC", + "XLM/BTC", + "ETH/BTC", + "TRX/BTC", + "LTC/BTC", + "XMR/BTC", + "ZEC/BTC", + "ADA/BTC", + "ETC/BTC", + "NXT/BTC", + "DASH/BTC", + "XRP/ETH", + } - pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, '8m', candle_type=CandleType.SPOT) - assert set(pairs) == {'UNITTEST/BTC'} + pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, "8m", candle_type=CandleType.SPOT) + assert set(pairs) == {"UNITTEST/BTC"} - pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, '5m', candle_type=CandleType.SPOT) - assert set(pairs) == {'UNITTEST/BTC'} + pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, "5m", candle_type=CandleType.SPOT) + assert set(pairs) == {"UNITTEST/BTC"} - pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.MARK) - assert set(pairs) == {'UNITTEST/USDT:USDT', 'XRP/USDT:USDT'} + pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.MARK) + assert set(pairs) == {"UNITTEST/USDT:USDT", "XRP/USDT:USDT"} - pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.FUTURES) - assert set(pairs) == {'XRP/USDT:USDT'} + pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.FUTURES) + assert set(pairs) == {"XRP/USDT:USDT"} - pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.MARK) - assert set(pairs) == {'UNITTEST/USDT:USDT'} + pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.MARK) + assert set(pairs) == {"UNITTEST/USDT:USDT"} -@pytest.mark.parametrize('filename,pair,timeframe,candletype', [ - ('XMR_BTC-5m.json', 'XMR_BTC', '5m', ''), - ('XMR_USDT-1h.h5', 'XMR_USDT', '1h', ''), - ('BTC-PERP-1h.h5', 'BTC-PERP', '1h', ''), - ('BTC_USDT-2h.jsongz', 'BTC_USDT', '2h', ''), - ('BTC_USDT-2h-mark.jsongz', 'BTC_USDT', '2h', 'mark'), - ('XMR_USDT-1h-mark.h5', 'XMR_USDT', '1h', 'mark'), - ('XMR_USDT-1h-random.h5', 'XMR_USDT', '1h', 'random'), - ('BTC-PERP-1h-index.h5', 'BTC-PERP', '1h', 'index'), - ('XMR_USDT_USDT-1h-mark.h5', 'XMR_USDT_USDT', '1h', 'mark'), -]) +@pytest.mark.parametrize( + "filename,pair,timeframe,candletype", + [ + ("XMR_BTC-5m.json", "XMR_BTC", "5m", ""), + ("XMR_USDT-1h.h5", "XMR_USDT", "1h", ""), + ("BTC-PERP-1h.h5", "BTC-PERP", "1h", ""), + ("BTC_USDT-2h.jsongz", "BTC_USDT", "2h", ""), + ("BTC_USDT-2h-mark.jsongz", "BTC_USDT", "2h", "mark"), + ("XMR_USDT-1h-mark.h5", "XMR_USDT", "1h", "mark"), + ("XMR_USDT-1h-random.h5", "XMR_USDT", "1h", "random"), + ("BTC-PERP-1h-index.h5", "BTC-PERP", "1h", "index"), + ("XMR_USDT_USDT-1h-mark.h5", "XMR_USDT_USDT", "1h", "mark"), + ], +) def test_datahandler_ohlcv_regex(filename, pair, timeframe, candletype): regex = JsonDataHandler._OHLCV_REGEX @@ -65,18 +82,20 @@ def test_datahandler_ohlcv_regex(filename, pair, timeframe, candletype): assert match[3] == candletype -@pytest.mark.parametrize('input,expected', [ - ('XMR_USDT', 'XMR/USDT'), - ('BTC_USDT', 'BTC/USDT'), - ('USDT_BUSD', 'USDT/BUSD'), - ('BTC_USDT_USDT', 'BTC/USDT:USDT'), # Futures - ('XRP_USDT_USDT', 'XRP/USDT:USDT'), # futures - ('BTC-PERP', 'BTC-PERP'), - ('BTC-PERP_USDT', 'BTC-PERP:USDT'), - ('UNITTEST_USDT', 'UNITTEST/USDT'), -]) +@pytest.mark.parametrize( + "input,expected", + [ + ("XMR_USDT", "XMR/USDT"), + ("BTC_USDT", "BTC/USDT"), + ("USDT_BUSD", "USDT/BUSD"), + ("BTC_USDT_USDT", "BTC/USDT:USDT"), # Futures + ("XRP_USDT_USDT", "XRP/USDT:USDT"), # futures + ("BTC-PERP", "BTC-PERP"), + ("BTC-PERP_USDT", "BTC-PERP:USDT"), + ("UNITTEST_USDT", "UNITTEST/USDT"), + ], +) def test_rebuild_pair_from_filename(input, expected): - assert IDataHandler.rebuild_pair_from_filename(input) == expected @@ -84,63 +103,63 @@ def test_datahandler_ohlcv_get_available_data(testdatadir): paircombs = FeatherDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT) # Convert to set to avoid failures due to sorting assert set(paircombs) == { - ('UNITTEST/BTC', '5m', CandleType.SPOT), - ('ETH/BTC', '5m', CandleType.SPOT), - ('XLM/BTC', '5m', CandleType.SPOT), - ('TRX/BTC', '5m', CandleType.SPOT), - ('LTC/BTC', '5m', CandleType.SPOT), - ('XMR/BTC', '5m', CandleType.SPOT), - ('ZEC/BTC', '5m', CandleType.SPOT), - ('UNITTEST/BTC', '1m', CandleType.SPOT), - ('ADA/BTC', '5m', CandleType.SPOT), - ('ETC/BTC', '5m', CandleType.SPOT), - ('NXT/BTC', '5m', CandleType.SPOT), - ('DASH/BTC', '5m', CandleType.SPOT), - ('XRP/ETH', '1m', CandleType.SPOT), - ('XRP/ETH', '5m', CandleType.SPOT), - ('UNITTEST/BTC', '30m', CandleType.SPOT), - ('UNITTEST/BTC', '8m', CandleType.SPOT), + ("UNITTEST/BTC", "5m", CandleType.SPOT), + ("ETH/BTC", "5m", CandleType.SPOT), + ("XLM/BTC", "5m", CandleType.SPOT), + ("TRX/BTC", "5m", CandleType.SPOT), + ("LTC/BTC", "5m", CandleType.SPOT), + ("XMR/BTC", "5m", CandleType.SPOT), + ("ZEC/BTC", "5m", CandleType.SPOT), + ("UNITTEST/BTC", "1m", CandleType.SPOT), + ("ADA/BTC", "5m", CandleType.SPOT), + ("ETC/BTC", "5m", CandleType.SPOT), + ("NXT/BTC", "5m", CandleType.SPOT), + ("DASH/BTC", "5m", CandleType.SPOT), + ("XRP/ETH", "1m", CandleType.SPOT), + ("XRP/ETH", "5m", CandleType.SPOT), + ("UNITTEST/BTC", "30m", CandleType.SPOT), + ("UNITTEST/BTC", "8m", CandleType.SPOT), } paircombs = FeatherDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.FUTURES) # Convert to set to avoid failures due to sorting assert set(paircombs) == { - ('UNITTEST/USDT:USDT', '1h', 'mark'), - ('XRP/USDT:USDT', '5m', 'futures'), - ('XRP/USDT:USDT', '1h', 'futures'), - ('XRP/USDT:USDT', '1h', 'mark'), - ('XRP/USDT:USDT', '8h', 'mark'), - ('XRP/USDT:USDT', '8h', 'funding_rate'), + ("UNITTEST/USDT:USDT", "1h", "mark"), + ("XRP/USDT:USDT", "5m", "futures"), + ("XRP/USDT:USDT", "1h", "futures"), + ("XRP/USDT:USDT", "1h", "mark"), + ("XRP/USDT:USDT", "8h", "mark"), + ("XRP/USDT:USDT", "8h", "funding_rate"), } paircombs = JsonGzDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT) - assert set(paircombs) == {('UNITTEST/BTC', '8m', CandleType.SPOT)} + assert set(paircombs) == {("UNITTEST/BTC", "8m", CandleType.SPOT)} paircombs = HDF5DataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT) - assert set(paircombs) == {('UNITTEST/BTC', '5m', CandleType.SPOT)} + assert set(paircombs) == {("UNITTEST/BTC", "5m", CandleType.SPOT)} def test_jsondatahandler_ohlcv_purge(mocker, testdatadir): mocker.patch.object(Path, "exists", MagicMock(return_value=False)) unlinkmock = mocker.patch.object(Path, "unlink", MagicMock()) dh = JsonGzDataHandler(testdatadir) - assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '') - assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark') + assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "") + assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark") assert unlinkmock.call_count == 0 mocker.patch.object(Path, "exists", MagicMock(return_value=True)) - assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '') - assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark') + assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "") + assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark") assert unlinkmock.call_count == 2 def test_jsondatahandler_ohlcv_load(testdatadir, caplog): dh = JsonDataHandler(testdatadir) - df = dh.ohlcv_load('UNITTEST/BTC', '1m', 'spot') + df = dh.ohlcv_load("UNITTEST/BTC", "1m", "spot") assert len(df) > 0 -# # Failure case (empty array) - df1 = dh.ohlcv_load('NOPAIR/XXX', '4m', 'spot') + # # Failure case (empty array) + df1 = dh.ohlcv_load("NOPAIR/XXX", "4m", "spot") assert len(df1) == 0 assert log_has("Could not load data for NOPAIR/XXX.", caplog) assert df.columns.equals(df1.columns) @@ -148,22 +167,22 @@ def test_jsondatahandler_ohlcv_load(testdatadir, caplog): def test_datahandler_ohlcv_data_min_max(testdatadir): dh = JsonDataHandler(testdatadir) - min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot') + min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "5m", "spot") assert len(min_max) == 3 # Empty pair - min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot') + min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "8m", "spot") assert len(min_max) == 3 assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) assert min_max[0] == min_max[1] # Empty pair2 - min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '41m', 'spot') + min_max = dh.ohlcv_data_min_max("NOPAIR/XXX", "41m", "spot") assert len(min_max) == 3 assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) assert min_max[0] == min_max[1] # Existing pair ... - min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '1m', 'spot') + min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "1m", "spot") assert len(min_max) == 3 assert min_max[0] == datetime(2017, 11, 4, 23, 2, tzinfo=timezone.utc) assert min_max[1] == datetime(2017, 11, 14, 22, 59, tzinfo=timezone.utc) @@ -172,181 +191,192 @@ def test_datahandler_ohlcv_data_min_max(testdatadir): def test_datahandler__check_empty_df(testdatadir, caplog): dh = JsonDataHandler(testdatadir) expected_text = r"Price jump in UNITTEST/USDT, 1h, spot between" - df = DataFrame([ + df = DataFrame( [ - 1511686200000, # 8:50:00 - 8.794, # open - 8.948, # high - 8.794, # low - 8.88, # close - 2255, # volume (in quote currency) + [ + 1511686200000, # 8:50:00 + 8.794, # open + 8.948, # high + 8.794, # low + 8.88, # close + 2255, # volume (in quote currency) + ], + [ + 1511686500000, # 8:55:00 + 8.88, + 8.942, + 8.88, + 8.893, + 9911, + ], + [ + 1511687100000, # 9:05:00 + 8.891, + 8.893, + 8.875, + 8.877, + 2251, + ], + [ + 1511687400000, # 9:10:00 + 8.877, + 8.883, + 8.895, + 8.817, + 123551, + ], ], - [ - 1511686500000, # 8:55:00 - 8.88, - 8.942, - 8.88, - 8.893, - 9911, - ], - [ - 1511687100000, # 9:05:00 - 8.891, - 8.893, - 8.875, - 8.877, - 2251 - ], - [ - 1511687400000, # 9:10:00 - 8.877, - 8.883, - 8.895, - 8.817, - 123551 - ] - ], columns=['date', 'open', 'high', 'low', 'close', 'volume']) + columns=["date", "open", "high", "low", "close", "volume"], + ) - dh._check_empty_df(df, 'UNITTEST/USDT', '1h', CandleType.SPOT, True, True) + dh._check_empty_df(df, "UNITTEST/USDT", "1h", CandleType.SPOT, True, True) assert not log_has_re(expected_text, caplog) - df = DataFrame([ + df = DataFrame( [ - 1511686200000, # 8:50:00 - 8.794, # open - 8.948, # high - 8.794, # low - 8.88, # close - 2255, # volume (in quote currency) + [ + 1511686200000, # 8:50:00 + 8.794, # open + 8.948, # high + 8.794, # low + 8.88, # close + 2255, # volume (in quote currency) + ], + [ + 1511686500000, # 8:55:00 + 8.88, + 8.942, + 8.88, + 8.893, + 9911, + ], + [ + 1511687100000, # 9:05:00 + 889.1, # Price jump by several decimals + 889.3, + 887.5, + 887.7, + 2251, + ], + [ + 1511687400000, # 9:10:00 + 8.877, + 8.883, + 8.895, + 8.817, + 123551, + ], ], - [ - 1511686500000, # 8:55:00 - 8.88, - 8.942, - 8.88, - 8.893, - 9911, - ], - [ - 1511687100000, # 9:05:00 - 889.1, # Price jump by several decimals - 889.3, - 887.5, - 887.7, - 2251 - ], - [ - 1511687400000, # 9:10:00 - 8.877, - 8.883, - 8.895, - 8.817, - 123551 - ] - ], columns=['date', 'open', 'high', 'low', 'close', 'volume']) + columns=["date", "open", "high", "low", "close", "volume"], + ) - dh._check_empty_df(df, 'UNITTEST/USDT', '1h', CandleType.SPOT, True, True) + dh._check_empty_df(df, "UNITTEST/USDT", "1h", CandleType.SPOT, True, True) assert log_has_re(expected_text, caplog) # @pytest.mark.parametrize('datahandler', []) @pytest.mark.skip("All datahandlers currently support trades data.") -def test_datahandler_trades_not_supported(datahandler, testdatadir, ): +def test_datahandler_trades_not_supported( + datahandler, + testdatadir, +): # Currently disabled. Re-enable should a new provider not support trades data. dh = get_datahandler(testdatadir, datahandler) with pytest.raises(NotImplementedError): - dh.trades_load('UNITTEST/ETH') + dh.trades_load("UNITTEST/ETH") with pytest.raises(NotImplementedError): - dh.trades_store('UNITTEST/ETH', MagicMock()) + dh.trades_store("UNITTEST/ETH", MagicMock()) def test_jsondatahandler_trades_load(testdatadir, caplog): dh = JsonGzDataHandler(testdatadir) logmsg = "Old trades format detected - converting" - dh.trades_load('XRP/ETH', TradingMode.SPOT) + dh.trades_load("XRP/ETH", TradingMode.SPOT) assert not log_has(logmsg, caplog) # Test conversation is happening - dh.trades_load('XRP/OLD', TradingMode.SPOT) + dh.trades_load("XRP/OLD", TradingMode.SPOT) assert log_has(logmsg, caplog) -@pytest.mark.parametrize('datahandler', AVAILABLE_DATAHANDLERS) -def test_datahandler_ohlcv_append(datahandler, testdatadir, ): +@pytest.mark.parametrize("datahandler", AVAILABLE_DATAHANDLERS) +def test_datahandler_ohlcv_append( + datahandler, + testdatadir, +): dh = get_datahandler(testdatadir, datahandler) with pytest.raises(NotImplementedError): - dh.ohlcv_append('UNITTEST/ETH', '5m', DataFrame(), CandleType.SPOT) + dh.ohlcv_append("UNITTEST/ETH", "5m", DataFrame(), CandleType.SPOT) with pytest.raises(NotImplementedError): - dh.ohlcv_append('UNITTEST/ETH', '5m', DataFrame(), CandleType.MARK) + dh.ohlcv_append("UNITTEST/ETH", "5m", DataFrame(), CandleType.MARK) -@pytest.mark.parametrize('datahandler', AVAILABLE_DATAHANDLERS) +@pytest.mark.parametrize("datahandler", AVAILABLE_DATAHANDLERS) def test_datahandler_trades_append(datahandler, testdatadir): dh = get_datahandler(testdatadir, datahandler) with pytest.raises(NotImplementedError): - dh.trades_append('UNITTEST/ETH', DataFrame()) + dh.trades_append("UNITTEST/ETH", DataFrame()) -@pytest.mark.parametrize('datahandler,expected', [ - ('jsongz', {'XRP/ETH', 'XRP/OLD'}), - ('hdf5', {'XRP/ETH'}), - ('feather', {'XRP/ETH'}), - ('parquet', {'XRP/ETH'}), -]) +@pytest.mark.parametrize( + "datahandler,expected", + [ + ("jsongz", {"XRP/ETH", "XRP/OLD"}), + ("hdf5", {"XRP/ETH"}), + ("feather", {"XRP/ETH"}), + ("parquet", {"XRP/ETH"}), + ], +) def test_datahandler_trades_get_pairs(testdatadir, datahandler, expected): - pairs = get_datahandlerclass(datahandler).trades_get_pairs(testdatadir) # Convert to set to avoid failures due to sorting assert set(pairs) == expected def test_hdf5datahandler_trades_load(testdatadir): - dh = get_datahandler(testdatadir, 'hdf5') - trades = dh.trades_load('XRP/ETH', TradingMode.SPOT) + dh = get_datahandler(testdatadir, "hdf5") + trades = dh.trades_load("XRP/ETH", TradingMode.SPOT) assert isinstance(trades, DataFrame) - trades1 = dh.trades_load('UNITTEST/NONEXIST', TradingMode.SPOT) + trades1 = dh.trades_load("UNITTEST/NONEXIST", TradingMode.SPOT) assert isinstance(trades1, DataFrame) assert trades1.empty # data goes from 2019-10-11 - 2019-10-13 - timerange = TimeRange.parse_timerange('20191011-20191012') + timerange = TimeRange.parse_timerange("20191011-20191012") - trades2 = dh._trades_load('XRP/ETH', TradingMode.SPOT, timerange) + trades2 = dh._trades_load("XRP/ETH", TradingMode.SPOT, timerange) assert len(trades) > len(trades2) # Check that ID is None (If it's nan, it's wrong) - assert trades2.iloc[0]['type'] is None + assert trades2.iloc[0]["type"] is None # unfiltered load has trades before starttime - assert len(trades.loc[trades['timestamp'] < timerange.startts * 1000]) >= 0 + assert len(trades.loc[trades["timestamp"] < timerange.startts * 1000]) >= 0 # filtered list does not have trades before starttime - assert len(trades2.loc[trades2['timestamp'] < timerange.startts * 1000]) == 0 + assert len(trades2.loc[trades2["timestamp"] < timerange.startts * 1000]) == 0 # unfiltered load has trades after endtime - assert len(trades.loc[trades['timestamp'] > timerange.stopts * 1000]) >= 0 + assert len(trades.loc[trades["timestamp"] > timerange.stopts * 1000]) >= 0 # filtered list does not have trades after endtime - assert len(trades2.loc[trades2['timestamp'] > timerange.stopts * 1000]) == 0 + assert len(trades2.loc[trades2["timestamp"] > timerange.stopts * 1000]) == 0 # assert len([t for t in trades2 if t[0] > timerange.stopts * 1000]) == 0 -@pytest.mark.parametrize('pair,timeframe,candle_type,candle_append,startdt,enddt', [ - # Data goes from 2018-01-10 - 2018-01-30 - ('UNITTEST/BTC', '5m', 'spot', '', '2018-01-15', '2018-01-19'), - # Mark data goes from to 2021-11-15 2021-11-19 - ('UNITTEST/USDT:USDT', '1h', 'mark', '-mark', '2021-11-16', '2021-11-18'), -]) +@pytest.mark.parametrize( + "pair,timeframe,candle_type,candle_append,startdt,enddt", + [ + # Data goes from 2018-01-10 - 2018-01-30 + ("UNITTEST/BTC", "5m", "spot", "", "2018-01-15", "2018-01-19"), + # Mark data goes from to 2021-11-15 2021-11-19 + ("UNITTEST/USDT:USDT", "1h", "mark", "-mark", "2021-11-16", "2021-11-18"), + ], +) def test_hdf5datahandler_ohlcv_load_and_resave( - testdatadir, - tmp_path, - pair, - timeframe, - candle_type, - candle_append, - startdt, enddt + testdatadir, tmp_path, pair, timeframe, candle_type, candle_append, startdt, enddt ): tmpdir2 = tmp_path - if candle_type not in ('', 'spot'): - tmpdir2 = tmp_path / 'futures' + if candle_type not in ("", "spot"): + tmpdir2 = tmp_path / "futures" tmpdir2.mkdir() - dh = get_datahandler(testdatadir, 'hdf5') + dh = get_datahandler(testdatadir, "hdf5") ohlcv = dh._ohlcv_load(pair, timeframe, None, candle_type=candle_type) assert isinstance(ohlcv, DataFrame) assert len(ohlcv) > 0 @@ -354,50 +384,46 @@ def test_hdf5datahandler_ohlcv_load_and_resave( file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.h5" assert not file.is_file() - dh1 = get_datahandler(tmp_path, 'hdf5') - dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type) + dh1 = get_datahandler(tmp_path, "hdf5") + dh1.ohlcv_store("UNITTEST/NEW", timeframe, ohlcv, candle_type=candle_type) assert file.is_file() - assert not ohlcv[ohlcv['date'] < startdt].empty + assert not ohlcv[ohlcv["date"] < startdt].empty timerange = TimeRange.parse_timerange(f"{startdt.replace('-', '')}-{enddt.replace('-', '')}") # Call private function to ensure timerange is filtered in hdf5 ohlcv = dh._ohlcv_load(pair, timeframe, timerange, candle_type=candle_type) - ohlcv1 = dh1._ohlcv_load('UNITTEST/NEW', timeframe, timerange, candle_type=candle_type) + ohlcv1 = dh1._ohlcv_load("UNITTEST/NEW", timeframe, timerange, candle_type=candle_type) assert len(ohlcv) == len(ohlcv1) assert ohlcv.equals(ohlcv1) - assert ohlcv[ohlcv['date'] < startdt].empty - assert ohlcv[ohlcv['date'] > enddt].empty + assert ohlcv[ohlcv["date"] < startdt].empty + assert ohlcv[ohlcv["date"] > enddt].empty # Try loading inexisting file - ohlcv = dh.ohlcv_load('UNITTEST/NONEXIST', timeframe, candle_type=candle_type) + ohlcv = dh.ohlcv_load("UNITTEST/NONEXIST", timeframe, candle_type=candle_type) assert ohlcv.empty -@pytest.mark.parametrize('pair,timeframe,candle_type,candle_append,startdt,enddt', [ - # Data goes from 2018-01-10 - 2018-01-30 - ('UNITTEST/BTC', '5m', 'spot', '', '2018-01-15', '2018-01-19'), - # Mark data goes from to 2021-11-15 2021-11-19 - ('UNITTEST/USDT:USDT', '1h', 'mark', '-mark', '2021-11-16', '2021-11-18'), -]) -@pytest.mark.parametrize('datahandler', ['hdf5', 'feather', 'parquet']) +@pytest.mark.parametrize( + "pair,timeframe,candle_type,candle_append,startdt,enddt", + [ + # Data goes from 2018-01-10 - 2018-01-30 + ("UNITTEST/BTC", "5m", "spot", "", "2018-01-15", "2018-01-19"), + # Mark data goes from to 2021-11-15 2021-11-19 + ("UNITTEST/USDT:USDT", "1h", "mark", "-mark", "2021-11-16", "2021-11-18"), + ], +) +@pytest.mark.parametrize("datahandler", ["hdf5", "feather", "parquet"]) def test_generic_datahandler_ohlcv_load_and_resave( - datahandler, - testdatadir, - tmp_path, - pair, - timeframe, - candle_type, - candle_append, - startdt, enddt + datahandler, testdatadir, tmp_path, pair, timeframe, candle_type, candle_append, startdt, enddt ): tmpdir2 = tmp_path - if candle_type not in ('', 'spot'): - tmpdir2 = tmp_path / 'futures' + if candle_type not in ("", "spot"): + tmpdir2 = tmp_path / "futures" tmpdir2.mkdir() # Load data from one common file - dhbase = get_datahandler(testdatadir, 'feather') + dhbase = get_datahandler(testdatadir, "feather") ohlcv = dhbase._ohlcv_load(pair, timeframe, None, candle_type=candle_type) assert isinstance(ohlcv, DataFrame) assert len(ohlcv) > 0 @@ -409,122 +435,123 @@ def test_generic_datahandler_ohlcv_load_and_resave( assert not file.is_file() dh1 = get_datahandler(tmp_path, datahandler) - dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type) + dh1.ohlcv_store("UNITTEST/NEW", timeframe, ohlcv, candle_type=candle_type) assert file.is_file() - assert not ohlcv[ohlcv['date'] < startdt].empty + assert not ohlcv[ohlcv["date"] < startdt].empty timerange = TimeRange.parse_timerange(f"{startdt.replace('-', '')}-{enddt.replace('-', '')}") ohlcv = dhbase.ohlcv_load(pair, timeframe, timerange=timerange, candle_type=candle_type) - if datahandler == 'hdf5': - ohlcv1 = dh1._ohlcv_load('UNITTEST/NEW', timeframe, timerange, candle_type=candle_type) - if candle_type == 'mark': - ohlcv1['volume'] = 0.0 + if datahandler == "hdf5": + ohlcv1 = dh1._ohlcv_load("UNITTEST/NEW", timeframe, timerange, candle_type=candle_type) + if candle_type == "mark": + ohlcv1["volume"] = 0.0 else: - ohlcv1 = dh1.ohlcv_load('UNITTEST/NEW', timeframe, - timerange=timerange, candle_type=candle_type) + ohlcv1 = dh1.ohlcv_load( + "UNITTEST/NEW", timeframe, timerange=timerange, candle_type=candle_type + ) assert len(ohlcv) == len(ohlcv1) assert ohlcv.equals(ohlcv1) - assert ohlcv[ohlcv['date'] < startdt].empty - assert ohlcv[ohlcv['date'] > enddt].empty + assert ohlcv[ohlcv["date"] < startdt].empty + assert ohlcv[ohlcv["date"] > enddt].empty # Try loading inexisting file - ohlcv = dh.ohlcv_load('UNITTEST/NONEXIST', timeframe, candle_type=candle_type) + ohlcv = dh.ohlcv_load("UNITTEST/NONEXIST", timeframe, candle_type=candle_type) assert ohlcv.empty def test_hdf5datahandler_ohlcv_purge(mocker, testdatadir): mocker.patch.object(Path, "exists", MagicMock(return_value=False)) unlinkmock = mocker.patch.object(Path, "unlink", MagicMock()) - dh = get_datahandler(testdatadir, 'hdf5') - assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '') - assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark') + dh = get_datahandler(testdatadir, "hdf5") + assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "") + assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark") assert unlinkmock.call_count == 0 mocker.patch.object(Path, "exists", MagicMock(return_value=True)) - assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '') - assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark') + assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "") + assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark") assert unlinkmock.call_count == 2 -@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet']) +@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"]) def test_datahandler_trades_load(testdatadir, datahandler): dh = get_datahandler(testdatadir, datahandler) - trades = dh.trades_load('XRP/ETH', TradingMode.SPOT) + trades = dh.trades_load("XRP/ETH", TradingMode.SPOT) assert isinstance(trades, DataFrame) - assert trades.iloc[0]['timestamp'] == 1570752011620 - assert trades.iloc[0]['date'] == Timestamp('2019-10-11 00:00:11.620000+0000') - assert trades.iloc[-1]['cost'] == 0.1986231 + assert trades.iloc[0]["timestamp"] == 1570752011620 + assert trades.iloc[0]["date"] == Timestamp("2019-10-11 00:00:11.620000+0000") + assert trades.iloc[-1]["cost"] == 0.1986231 - trades1 = dh.trades_load('UNITTEST/NONEXIST', TradingMode.SPOT) + trades1 = dh.trades_load("UNITTEST/NONEXIST", TradingMode.SPOT) assert isinstance(trades, DataFrame) assert trades1.empty -@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet']) +@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"]) def test_datahandler_trades_store(testdatadir, tmp_path, datahandler): dh = get_datahandler(testdatadir, datahandler) - trades = dh.trades_load('XRP/ETH', TradingMode.SPOT) + trades = dh.trades_load("XRP/ETH", TradingMode.SPOT) dh1 = get_datahandler(tmp_path, datahandler) - dh1.trades_store('XRP/NEW', trades, TradingMode.SPOT) + dh1.trades_store("XRP/NEW", trades, TradingMode.SPOT) - file = tmp_path / f'XRP_NEW-trades.{dh1._get_file_extension()}' + file = tmp_path / f"XRP_NEW-trades.{dh1._get_file_extension()}" assert file.is_file() # Load trades back - trades_new = dh1.trades_load('XRP/NEW', TradingMode.SPOT) + trades_new = dh1.trades_load("XRP/NEW", TradingMode.SPOT) assert_frame_equal(trades, trades_new, check_exact=True) assert len(trades_new) == len(trades) -@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet']) +@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"]) def test_datahandler_trades_purge(mocker, testdatadir, datahandler): mocker.patch.object(Path, "exists", MagicMock(return_value=False)) unlinkmock = mocker.patch.object(Path, "unlink", MagicMock()) dh = get_datahandler(testdatadir, datahandler) - assert not dh.trades_purge('UNITTEST/NONEXIST', TradingMode.SPOT) + assert not dh.trades_purge("UNITTEST/NONEXIST", TradingMode.SPOT) assert unlinkmock.call_count == 0 mocker.patch.object(Path, "exists", MagicMock(return_value=True)) - assert dh.trades_purge('UNITTEST/NONEXIST', TradingMode.SPOT) + assert dh.trades_purge("UNITTEST/NONEXIST", TradingMode.SPOT) assert unlinkmock.call_count == 1 def test_gethandlerclass(): - cl = get_datahandlerclass('json') + cl = get_datahandlerclass("json") assert cl == JsonDataHandler assert issubclass(cl, IDataHandler) - cl = get_datahandlerclass('jsongz') + cl = get_datahandlerclass("jsongz") assert cl == JsonGzDataHandler assert issubclass(cl, IDataHandler) assert issubclass(cl, JsonDataHandler) - cl = get_datahandlerclass('hdf5') + cl = get_datahandlerclass("hdf5") assert cl == HDF5DataHandler assert issubclass(cl, IDataHandler) - cl = get_datahandlerclass('feather') + cl = get_datahandlerclass("feather") assert cl == FeatherDataHandler assert issubclass(cl, IDataHandler) - cl = get_datahandlerclass('parquet') + cl = get_datahandlerclass("parquet") assert cl == ParquetDataHandler assert issubclass(cl, IDataHandler) with pytest.raises(ValueError, match=r"No datahandler for .*"): - get_datahandlerclass('DeadBeef') + get_datahandlerclass("DeadBeef") def test_get_datahandler(testdatadir): - dh = get_datahandler(testdatadir, 'json') + dh = get_datahandler(testdatadir, "json") assert isinstance(dh, JsonDataHandler) - dh = get_datahandler(testdatadir, 'jsongz') + dh = get_datahandler(testdatadir, "jsongz") assert isinstance(dh, JsonGzDataHandler) - dh1 = get_datahandler(testdatadir, 'jsongz', dh) + dh1 = get_datahandler(testdatadir, "jsongz", dh) assert id(dh1) == id(dh) - dh = get_datahandler(testdatadir, 'hdf5') + dh = get_datahandler(testdatadir, "hdf5") assert isinstance(dh, HDF5DataHandler) diff --git a/tests/data/test_dataprovider.py b/tests/data/test_dataprovider.py index f9c56b62b..11c69f918 100644 --- a/tests/data/test_dataprovider.py +++ b/tests/data/test_dataprovider.py @@ -11,10 +11,13 @@ from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import EXMS, generate_test_data, get_patched_exchange -@pytest.mark.parametrize('candle_type', [ - 'mark', - '', -]) +@pytest.mark.parametrize( + "candle_type", + [ + "mark", + "", + ], +) def test_dp_ohlcv(mocker, default_conf, ohlcv_history, candle_type): default_conf["runmode"] = RunMode.DRY_RUN timeframe = default_conf["timeframe"] @@ -33,11 +36,9 @@ def test_dp_ohlcv(mocker, default_conf, ohlcv_history, candle_type): assert dp.ohlcv("NONSENSE/AAA", timeframe, candle_type=candletype).empty # Test with and without parameter - assert dp.ohlcv( - "UNITTEST/BTC", - timeframe, - candle_type=candletype - ).equals(dp.ohlcv("UNITTEST/BTC", candle_type=candle_type)) + assert dp.ohlcv("UNITTEST/BTC", timeframe, candle_type=candletype).equals( + dp.ohlcv("UNITTEST/BTC", candle_type=candle_type) + ) default_conf["runmode"] = RunMode.LIVE dp = DataProvider(default_conf, exchange) @@ -66,10 +67,12 @@ def test_historic_ohlcv_dataformat(mocker, default_conf, ohlcv_history): featherloadmock = MagicMock(return_value=ohlcv_history) mocker.patch( "freqtrade.data.history.datahandlers.hdf5datahandler.HDF5DataHandler._ohlcv_load", - hdf5loadmock) + hdf5loadmock, + ) mocker.patch( "freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load", - featherloadmock) + featherloadmock, + ) default_conf["runmode"] = RunMode.BACKTEST exchange = get_patched_exchange(mocker, default_conf) @@ -90,11 +93,14 @@ def test_historic_ohlcv_dataformat(mocker, default_conf, ohlcv_history): featherloadmock.assert_not_called() -@pytest.mark.parametrize('candle_type', [ - 'mark', - 'futures', - '', -]) +@pytest.mark.parametrize( + "candle_type", + [ + "mark", + "futures", + "", + ], +) def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type): default_conf["runmode"] = RunMode.DRY_RUN timeframe = default_conf["timeframe"] @@ -105,26 +111,33 @@ def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type): dp = DataProvider(default_conf, exchange) assert dp.runmode == RunMode.DRY_RUN - assert ohlcv_history.equals(dp.get_pair_dataframe( - "UNITTEST/BTC", timeframe, candle_type=candle_type)) - assert ohlcv_history.equals(dp.get_pair_dataframe( - "UNITTEST/BTC", timeframe, candle_type=candletype)) - assert isinstance(dp.get_pair_dataframe( - "UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame) - assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe, - candle_type=candle_type) is not ohlcv_history + assert ohlcv_history.equals( + dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type) + ) + assert ohlcv_history.equals( + dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candletype) + ) + assert isinstance( + dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame + ) + assert ( + dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type) + is not ohlcv_history + ) assert not dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type).empty assert dp.get_pair_dataframe("NONSENSE/AAA", timeframe, candle_type=candle_type).empty # Test with and without parameter - assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type)\ - .equals(dp.get_pair_dataframe("UNITTEST/BTC", candle_type=candle_type)) + assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type).equals( + dp.get_pair_dataframe("UNITTEST/BTC", candle_type=candle_type) + ) default_conf["runmode"] = RunMode.LIVE dp = DataProvider(default_conf, exchange) assert dp.runmode == RunMode.LIVE - assert isinstance(dp.get_pair_dataframe( - "UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame) + assert isinstance( + dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame + ) assert dp.get_pair_dataframe("NONSENSE/AAA", timeframe, candle_type=candle_type).empty historymock = MagicMock(return_value=ohlcv_history) @@ -136,7 +149,7 @@ def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type): assert isinstance(df, DataFrame) assert len(df) == 3 # ohlcv_history mock has just 3 rows - dp._set_dataframe_max_date(ohlcv_history.iloc[-1]['date']) + dp._set_dataframe_max_date(ohlcv_history.iloc[-1]["date"]) df = dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type) assert isinstance(df, DataFrame) assert len(df) == 2 # ohlcv_history is limited to 2 rows now @@ -150,7 +163,10 @@ def test_available_pairs(mocker, default_conf, ohlcv_history): dp = DataProvider(default_conf, exchange) assert len(dp.available_pairs) == 2 - assert dp.available_pairs == [("XRP/BTC", timeframe), ("UNITTEST/BTC", timeframe), ] + assert dp.available_pairs == [ + ("XRP/BTC", timeframe), + ("UNITTEST/BTC", timeframe), + ] def test_producer_pairs(default_conf): @@ -172,9 +188,9 @@ def test_producer_pairs(default_conf): def test_get_producer_df(default_conf): dataprovider = DataProvider(default_conf, None) - ohlcv_history = generate_test_data('5m', 150) - pair = 'BTC/USDT' - timeframe = default_conf['timeframe'] + ohlcv_history = generate_test_data("5m", 150) + pair = "BTC/USDT" + timeframe = default_conf["timeframe"] candle_type = CandleType.SPOT empty_la = datetime.fromtimestamp(0, tz=timezone.utc) @@ -192,20 +208,20 @@ def test_get_producer_df(default_conf): assert la > empty_la # no data on this producer, should return empty dataframe - dataframe, la = dataprovider.get_producer_df(pair, producer_name='bad') + dataframe, la = dataprovider.get_producer_df(pair, producer_name="bad") assert dataframe.empty assert la == empty_la # non existent timeframe, empty dataframe - _dataframe, la = dataprovider.get_producer_df(pair, timeframe='1h') + _dataframe, la = dataprovider.get_producer_df(pair, timeframe="1h") assert dataframe.empty assert la == empty_la def test_emit_df(mocker, default_conf, ohlcv_history): - mocker.patch('freqtrade.rpc.rpc_manager.RPCManager.__init__', MagicMock()) - rpc_mock = mocker.patch('freqtrade.rpc.rpc_manager.RPCManager', MagicMock()) - send_mock = mocker.patch('freqtrade.rpc.rpc_manager.RPCManager.send_msg', MagicMock()) + mocker.patch("freqtrade.rpc.rpc_manager.RPCManager.__init__", MagicMock()) + rpc_mock = mocker.patch("freqtrade.rpc.rpc_manager.RPCManager", MagicMock()) + send_mock = mocker.patch("freqtrade.rpc.rpc_manager.RPCManager.send_msg", MagicMock()) dataprovider = DataProvider(default_conf, exchange=None, rpc=rpc_mock) dataprovider_no_rpc = DataProvider(default_conf, exchange=None) @@ -262,14 +278,14 @@ def test_orderbook(mocker, default_conf, order_book_l2): exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock) dp = DataProvider(default_conf, exchange) - res = dp.orderbook('ETH/BTC', 5) + res = dp.orderbook("ETH/BTC", 5) assert order_book_l2.call_count == 1 - assert order_book_l2.call_args_list[0][0][0] == 'ETH/BTC' + assert order_book_l2.call_args_list[0][0][0] == "ETH/BTC" assert order_book_l2.call_args_list[0][0][1] >= 5 assert isinstance(res, dict) - assert 'bids' in res - assert 'asks' in res + assert "bids" in res + assert "asks" in res def test_market(mocker, default_conf, markets): @@ -278,41 +294,39 @@ def test_market(mocker, default_conf, markets): exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock) dp = DataProvider(default_conf, exchange) - res = dp.market('ETH/BTC') + res = dp.market("ETH/BTC") assert isinstance(res, dict) - assert 'symbol' in res - assert res['symbol'] == 'ETH/BTC' + assert "symbol" in res + assert res["symbol"] == "ETH/BTC" - res = dp.market('UNITTEST/BTC') + res = dp.market("UNITTEST/BTC") assert res is None def test_ticker(mocker, default_conf, tickers): - ticker_mock = MagicMock(return_value=tickers()['ETH/BTC']) + ticker_mock = MagicMock(return_value=tickers()["ETH/BTC"]) mocker.patch(f"{EXMS}.fetch_ticker", ticker_mock) exchange = get_patched_exchange(mocker, default_conf) dp = DataProvider(default_conf, exchange) - res = dp.ticker('ETH/BTC') + res = dp.ticker("ETH/BTC") assert isinstance(res, dict) - assert 'symbol' in res - assert res['symbol'] == 'ETH/BTC' + assert "symbol" in res + assert res["symbol"] == "ETH/BTC" - ticker_mock = MagicMock(side_effect=ExchangeError('Pair not found')) + ticker_mock = MagicMock(side_effect=ExchangeError("Pair not found")) mocker.patch(f"{EXMS}.fetch_ticker", ticker_mock) exchange = get_patched_exchange(mocker, default_conf) dp = DataProvider(default_conf, exchange) - res = dp.ticker('UNITTEST/BTC') + res = dp.ticker("UNITTEST/BTC") assert res == {} def test_current_whitelist(mocker, default_conf, tickers): # patch default conf to volumepairlist - default_conf['pairlists'][0] = {'method': 'VolumePairList', "number_assets": 5} + default_conf["pairlists"][0] = {"method": "VolumePairList", "number_assets": 5} - mocker.patch.multiple(EXMS, - exchange_has=MagicMock(return_value=True), - get_tickers=tickers) + mocker.patch.multiple(EXMS, exchange_has=MagicMock(return_value=True), get_tickers=tickers) exchange = get_patched_exchange(mocker, default_conf) pairlist = PairListManager(exchange, default_conf) @@ -331,7 +345,6 @@ def test_current_whitelist(mocker, default_conf, tickers): def test_get_analyzed_dataframe(mocker, default_conf, ohlcv_history): - default_conf["runmode"] = RunMode.DRY_RUN timeframe = default_conf["timeframe"] @@ -384,28 +397,27 @@ def test_no_exchange_mode(default_conf): dp.refresh([()]) with pytest.raises(OperationalException, match=message): - dp.ohlcv('XRP/USDT', '5m', '') + dp.ohlcv("XRP/USDT", "5m", "") with pytest.raises(OperationalException, match=message): - dp.market('XRP/USDT') + dp.market("XRP/USDT") with pytest.raises(OperationalException, match=message): - dp.ticker('XRP/USDT') + dp.ticker("XRP/USDT") with pytest.raises(OperationalException, match=message): - dp.orderbook('XRP/USDT', 20) + dp.orderbook("XRP/USDT", 20) with pytest.raises(OperationalException, match=message): dp.available_pairs() def test_dp_send_msg(default_conf): - default_conf["runmode"] = RunMode.DRY_RUN - default_conf["timeframe"] = '1h' + default_conf["timeframe"] = "1h" dp = DataProvider(default_conf, None) - msg = 'Test message' + msg = "Test message" dp.send_msg(msg) assert msg in dp._msg_queue @@ -424,81 +436,81 @@ def test_dp_send_msg(default_conf): def test_dp__add_external_df(default_conf_usdt): - timeframe = '1h' + timeframe = "1h" default_conf_usdt["timeframe"] = timeframe dp = DataProvider(default_conf_usdt, None) - df = generate_test_data(timeframe, 24, '2022-01-01 00:00:00+00:00') + df = generate_test_data(timeframe, 24, "2022-01-01 00:00:00+00:00") last_analyzed = datetime.now(timezone.utc) - res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # Why 1000 ?? assert res[1] == 1000 # Hard add dataframe - dp._replace_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + dp._replace_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT) # BTC is not stored yet - res = dp._add_external_df('BTC/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df("BTC/USDT", df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False - df_res, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df_res, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) assert len(df_res) == 24 # Add the same dataframe again - dataframe size shall not change. - res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert isinstance(res[1], int) assert res[1] == 0 - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) assert len(df) == 24 # Add a new day. - df2 = generate_test_data(timeframe, 24, '2022-01-02 00:00:00+00:00') + df2 = generate_test_data(timeframe, 24, "2022-01-02 00:00:00+00:00") - res = dp._add_external_df('ETH/USDT', df2, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df("ETH/USDT", df2, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert isinstance(res[1], int) assert res[1] == 0 - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) assert len(df) == 48 # Add a dataframe with a 12 hour offset - so 12 candles are overlapping, and 12 valid. - df3 = generate_test_data(timeframe, 24, '2022-01-02 12:00:00+00:00') + df3 = generate_test_data(timeframe, 24, "2022-01-02 12:00:00+00:00") - res = dp._add_external_df('ETH/USDT', df3, last_analyzed, timeframe, CandleType.SPOT) + res = dp._add_external_df("ETH/USDT", df3, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is True assert isinstance(res[1], int) assert res[1] == 0 - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) # New length = 48 + 12 (since we have a 12 hour offset). assert len(df) == 60 - assert df.iloc[-1]['date'] == df3.iloc[-1]['date'] - assert df.iloc[-1]['date'] == Timestamp('2022-01-03 11:00:00+00:00') + assert df.iloc[-1]["date"] == df3.iloc[-1]["date"] + assert df.iloc[-1]["date"] == Timestamp("2022-01-03 11:00:00+00:00") # Generate 1 new candle - df4 = generate_test_data(timeframe, 1, '2022-01-03 12:00:00+00:00') - res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + df4 = generate_test_data(timeframe, 1, "2022-01-03 12:00:00+00:00") + res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT) # assert res[0] is True # assert res[1] == 0 - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) # New length = 61 + 1 assert len(df) == 61 - assert df.iloc[-2]['date'] == Timestamp('2022-01-03 11:00:00+00:00') - assert df.iloc[-1]['date'] == Timestamp('2022-01-03 12:00:00+00:00') + assert df.iloc[-2]["date"] == Timestamp("2022-01-03 11:00:00+00:00") + assert df.iloc[-1]["date"] == Timestamp("2022-01-03 12:00:00+00:00") # Gap in the data ... - df4 = generate_test_data(timeframe, 1, '2022-01-05 00:00:00+00:00') - res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + df4 = generate_test_data(timeframe, 1, "2022-01-05 00:00:00+00:00") + res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 assert isinstance(res[1], int) assert res[1] == 36 - df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT) + df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT) # New length = 61 + 1 assert len(df) == 61 # Empty dataframe - df4 = generate_test_data(timeframe, 0, '2022-01-05 00:00:00+00:00') - res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT) + df4 = generate_test_data(timeframe, 0, "2022-01-05 00:00:00+00:00") + res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT) assert res[0] is False # 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00 assert isinstance(res[1], int) @@ -506,59 +518,59 @@ def test_dp__add_external_df(default_conf_usdt): def test_dp_get_required_startup(default_conf_usdt): - timeframe = '1h' + timeframe = "1h" default_conf_usdt["timeframe"] = timeframe dp = DataProvider(default_conf_usdt, None) # No FreqAI config - assert dp.get_required_startup('5m') == 0 - assert dp.get_required_startup('1h') == 0 - assert dp.get_required_startup('1d') == 0 + assert dp.get_required_startup("5m") == 0 + assert dp.get_required_startup("1h") == 0 + assert dp.get_required_startup("1d") == 0 - dp._config['startup_candle_count'] = 20 - assert dp.get_required_startup('5m') == 20 - assert dp.get_required_startup('1h') == 20 - assert dp.get_required_startup('1h') == 20 + dp._config["startup_candle_count"] = 20 + assert dp.get_required_startup("5m") == 20 + assert dp.get_required_startup("1h") == 20 + assert dp.get_required_startup("1h") == 20 # With freqAI config - dp._config['freqai'] = { - 'enabled': True, - 'train_period_days': 20, - 'feature_parameters': { - 'indicator_periods_candles': [ + dp._config["freqai"] = { + "enabled": True, + "train_period_days": 20, + "feature_parameters": { + "indicator_periods_candles": [ 5, 20, ] - } + }, } - assert dp.get_required_startup('5m') == 5780 - assert dp.get_required_startup('1h') == 500 - assert dp.get_required_startup('1d') == 40 + assert dp.get_required_startup("5m") == 5780 + assert dp.get_required_startup("1h") == 500 + assert dp.get_required_startup("1d") == 40 # FreqAI kindof ignores startup_candle_count if it's below indicator_periods_candles - dp._config['startup_candle_count'] = 0 - assert dp.get_required_startup('5m') == 5780 - assert dp.get_required_startup('1h') == 500 - assert dp.get_required_startup('1d') == 40 + dp._config["startup_candle_count"] = 0 + assert dp.get_required_startup("5m") == 5780 + assert dp.get_required_startup("1h") == 500 + assert dp.get_required_startup("1d") == 40 - dp._config['freqai']['feature_parameters']['indicator_periods_candles'][1] = 50 - assert dp.get_required_startup('5m') == 5810 - assert dp.get_required_startup('1h') == 530 - assert dp.get_required_startup('1d') == 70 + dp._config["freqai"]["feature_parameters"]["indicator_periods_candles"][1] = 50 + assert dp.get_required_startup("5m") == 5810 + assert dp.get_required_startup("1h") == 530 + assert dp.get_required_startup("1d") == 70 # scenario from issue https://github.com/freqtrade/freqtrade/issues/9432 - dp._config['freqai'] = { - 'enabled': True, - 'train_period_days': 180, - 'feature_parameters': { - 'indicator_periods_candles': [ + dp._config["freqai"] = { + "enabled": True, + "train_period_days": 180, + "feature_parameters": { + "indicator_periods_candles": [ 10, 20, ] - } + }, } - dp._config['startup_candle_count'] = 40 - assert dp.get_required_startup('5m') == 51880 - assert dp.get_required_startup('1h') == 4360 - assert dp.get_required_startup('1d') == 220 + dp._config["startup_candle_count"] = 40 + assert dp.get_required_startup("5m") == 51880 + assert dp.get_required_startup("1h") == 4360 + assert dp.get_required_startup("1d") == 220 diff --git a/tests/data/test_download_data.py b/tests/data/test_download_data.py index 1518b28f3..4922a213f 100644 --- a/tests/data/test_download_data.py +++ b/tests/data/test_download_data.py @@ -10,83 +10,90 @@ from tests.conftest import EXMS, log_has, patch_exchange def test_download_data_main_no_markets(mocker, caplog): - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) - patch_exchange(mocker, id='binance') - mocker.patch(f'{EXMS}.get_markets', return_value={}) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) + patch_exchange(mocker, id="binance") + mocker.patch(f"{EXMS}.get_markets", return_value={}) config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) - config.update({ - "days": 20, - "pairs": ["ETH/BTC", "XRP/BTC"], - "timeframes": ["5m", "1h"] - }) + config.update({"days": 20, "pairs": ["ETH/BTC", "XRP/BTC"], "timeframes": ["5m", "1h"]}) download_data_main(config) - assert dl_mock.call_args[1]['timerange'].starttype == "date" + assert dl_mock.call_args[1]["timerange"].starttype == "date" assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog) def test_download_data_main_all_pairs(mocker, markets): - - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data', - MagicMock(return_value=["ETH/BTC", "XRP/BTC"])) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data", + MagicMock(return_value=["ETH/BTC", "XRP/BTC"]), + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) - config.update({ - "pairs": [".*/USDT"], - "timeframes": ["5m", "1h"] - }) + config.update({"pairs": [".*/USDT"], "timeframes": ["5m", "1h"]}) download_data_main(config) - expected = set(['BTC/USDT', 'ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) - assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + expected = set(["BTC/USDT", "ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"]) + assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected assert dl_mock.call_count == 1 dl_mock.reset_mock() - config.update({ - "pairs": [".*/USDT"], - "timeframes": ["5m", "1h"], - "include_inactive": True - }) + config.update({"pairs": [".*/USDT"], "timeframes": ["5m", "1h"], "include_inactive": True}) download_data_main(config) - expected = set(['BTC/USDT', 'ETH/USDT', 'LTC/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT']) - assert set(dl_mock.call_args_list[0][1]['pairs']) == expected + expected = set(["BTC/USDT", "ETH/USDT", "LTC/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"]) + assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected def test_download_data_main_trades(mocker): - dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_trades_data', - MagicMock(return_value=[])) - convert_mock = mocker.patch('freqtrade.data.history.history_utils.convert_trades_to_ohlcv', - MagicMock(return_value=[])) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils.refresh_backtest_trades_data", + MagicMock(return_value=[]), + ) + convert_mock = mocker.patch( + "freqtrade.data.history.history_utils.convert_trades_to_ohlcv", MagicMock(return_value=[]) + ) patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_markets', return_value={}) + mocker.patch(f"{EXMS}.get_markets", return_value={}) config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE) - config.update({ - "days": 20, - "pairs": ["ETH/BTC", "XRP/BTC"], - "timeframes": ["5m", "1h"], - "download_trades": True, - }) + config.update( + { + "days": 20, + "pairs": ["ETH/BTC", "XRP/BTC"], + "timeframes": ["5m", "1h"], + "download_trades": True, + } + ) download_data_main(config) - assert dl_mock.call_args[1]['timerange'].starttype == "date" + assert dl_mock.call_args[1]["timerange"].starttype == "date" + assert dl_mock.call_count == 1 + assert convert_mock.call_count == 0 + dl_mock.reset_mock() + + config.update( + { + "convert_trades": True, + } + ) + download_data_main(config) + + assert dl_mock.call_args[1]["timerange"].starttype == "date" assert dl_mock.call_count == 1 assert convert_mock.call_count == 1 - config.update({ - "download_trades": True, - "trading_mode": "futures", - }) def test_download_data_main_data_invalid(mocker): patch_exchange(mocker, id="kraken") - mocker.patch(f'{EXMS}.get_markets', return_value={}) + mocker.patch(f"{EXMS}.get_markets", return_value={}) config = setup_utils_configuration({"exchange": "kraken"}, RunMode.UTIL_EXCHANGE) - config.update({ - "days": 20, - "pairs": ["ETH/BTC", "XRP/BTC"], - "timeframes": ["5m", "1h"], - }) + config.update( + { + "days": 20, + "pairs": ["ETH/BTC", "XRP/BTC"], + "timeframes": ["5m", "1h"], + } + ) with pytest.raises(OperationalException, match=r"Historic klines not available for .*"): download_data_main(config) diff --git a/tests/data/test_entryexitanalysis.py b/tests/data/test_entryexitanalysis.py index 810e2c53b..49ef74c0a 100644 --- a/tests/data/test_entryexitanalysis.py +++ b/tests/data/test_entryexitanalysis.py @@ -20,198 +20,228 @@ def entryexitanalysis_cleanup() -> None: def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, user_dir, capsys): caplog.set_level(logging.INFO) - (user_dir / 'backtest_results').mkdir(parents=True, exist_ok=True) + (user_dir / "backtest_results").mkdir(parents=True, exist_ok=True) - default_conf.update({ - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - }) - patch_exchange(mocker) - result1 = pd.DataFrame({'pair': ['ETH/BTC', 'LTC/BTC', 'ETH/BTC', 'LTC/BTC'], - 'profit_ratio': [0.025, 0.05, -0.1, -0.05], - 'profit_abs': [0.5, 2.0, -4.0, -2.0], - 'open_date': pd.to_datetime(['2018-01-29 18:40:00', - '2018-01-30 03:30:00', - '2018-01-30 08:10:00', - '2018-01-31 13:30:00', ], utc=True - ), - 'close_date': pd.to_datetime(['2018-01-29 20:45:00', - '2018-01-30 05:35:00', - '2018-01-30 09:10:00', - '2018-01-31 15:00:00', ], utc=True), - 'trade_duration': [235, 40, 60, 90], - 'is_open': [False, False, False, False], - 'stake_amount': [0.01, 0.01, 0.01, 0.01], - 'open_rate': [0.104445, 0.10302485, 0.10302485, 0.10302485], - 'close_rate': [0.104969, 0.103541, 0.102041, 0.102541], - "is_short": [False, False, False, False], - 'enter_tag': ["enter_tag_long_a", - "enter_tag_long_b", - "enter_tag_long_a", - "enter_tag_long_b"], - 'exit_reason': [ExitType.ROI, - ExitType.EXIT_SIGNAL, - ExitType.STOP_LOSS, - ExitType.TRAILING_STOP_LOSS] - }) - - backtestmock = MagicMock(side_effect=[ + default_conf.update( { - 'results': result1, - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, } - ]) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['ETH/BTC', 'LTC/BTC', 'DASH/BTC'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) + ) + patch_exchange(mocker) + result1 = pd.DataFrame( + { + "pair": ["ETH/BTC", "LTC/BTC", "ETH/BTC", "LTC/BTC"], + "profit_ratio": [0.025, 0.05, -0.1, -0.05], + "profit_abs": [0.5, 2.0, -4.0, -2.0], + "open_date": pd.to_datetime( + [ + "2018-01-29 18:40:00", + "2018-01-30 03:30:00", + "2018-01-30 08:10:00", + "2018-01-31 13:30:00", + ], + utc=True, + ), + "close_date": pd.to_datetime( + [ + "2018-01-29 20:45:00", + "2018-01-30 05:35:00", + "2018-01-30 09:10:00", + "2018-01-31 15:00:00", + ], + utc=True, + ), + "trade_duration": [235, 40, 60, 90], + "is_open": [False, False, False, False], + "stake_amount": [0.01, 0.01, 0.01, 0.01], + "open_rate": [0.104445, 0.10302485, 0.10302485, 0.10302485], + "close_rate": [0.104969, 0.103541, 0.102041, 0.102541], + "is_short": [False, False, False, False], + "enter_tag": [ + "enter_tag_long_a", + "enter_tag_long_b", + "enter_tag_long_a", + "enter_tag_long_b", + ], + "exit_reason": [ + ExitType.ROI, + ExitType.EXIT_SIGNAL, + ExitType.STOP_LOSS, + ExitType.TRAILING_STOP_LOSS, + ], + } + ) + + backtestmock = MagicMock( + side_effect=[ + { + "results": result1, + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + } + ] + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["ETH/BTC", "LTC/BTC", "DASH/BTC"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--user-data-dir', str(user_dir), - '--timeframe', '5m', - '--timerange', '1515560100-1517287800', - '--export', 'signals', - '--cache', 'none', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--user-data-dir", + str(user_dir), + "--timeframe", + "5m", + "--timerange", + "1515560100-1517287800", + "--export", + "signals", + "--cache", + "none", ] args = get_args(args) start_backtesting(args) captured = capsys.readouterr() - assert 'BACKTESTING REPORT' in captured.out - assert 'EXIT REASON STATS' in captured.out - assert 'LEFT OPEN TRADES REPORT' in captured.out + assert "BACKTESTING REPORT" in captured.out + assert "EXIT REASON STATS" in captured.out + assert "LEFT OPEN TRADES REPORT" in captured.out base_args = [ - 'backtesting-analysis', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--user-data-dir', str(user_dir), + "backtesting-analysis", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--user-data-dir", + str(user_dir), ] # test group 0 and indicator list - args = get_args(base_args + - ['--analysis-groups', "0", - '--indicator-list', "close", "rsi", "profit_abs"] - ) + args = get_args( + base_args + ["--analysis-groups", "0", "--indicator-list", "close", "rsi", "profit_abs"] + ) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'LTC/BTC' in captured.out - assert 'ETH/BTC' in captured.out - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' in captured.out - assert 'exit_signal' in captured.out - assert 'roi' in captured.out - assert 'stop_loss' in captured.out - assert 'trailing_stop_loss' in captured.out - assert '0.5' in captured.out - assert '-4' in captured.out - assert '-2' in captured.out - assert '-3.5' in captured.out - assert '50' in captured.out - assert '0' in captured.out - assert '0.01616' in captured.out - assert '34.049' in captured.out - assert '0.104411' in captured.out - assert '52.8292' in captured.out + assert "LTC/BTC" in captured.out + assert "ETH/BTC" in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" in captured.out + assert "exit_signal" in captured.out + assert "roi" in captured.out + assert "stop_loss" in captured.out + assert "trailing_stop_loss" in captured.out + assert "0.5" in captured.out + assert "-4" in captured.out + assert "-2" in captured.out + assert "-3.5" in captured.out + assert "50" in captured.out + assert "0" in captured.out + assert "0.01616" in captured.out + assert "34.049" in captured.out + assert "0.104411" in captured.out + assert "52.8292" in captured.out # test group 1 - args = get_args(base_args + ['--analysis-groups', "1"]) + args = get_args(base_args + ["--analysis-groups", "1"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' in captured.out - assert 'total_profit_pct' in captured.out - assert '-3.5' in captured.out - assert '-1.75' in captured.out - assert '-7.5' in captured.out - assert '-3.75' in captured.out - assert '0' in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" in captured.out + assert "total_profit_pct" in captured.out + assert "-3.5" in captured.out + assert "-1.75" in captured.out + assert "-7.5" in captured.out + assert "-3.75" in captured.out + assert "0" in captured.out # test group 2 - args = get_args(base_args + ['--analysis-groups', "2"]) + args = get_args(base_args + ["--analysis-groups", "2"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' in captured.out - assert 'exit_signal' in captured.out - assert 'roi' in captured.out - assert 'stop_loss' in captured.out - assert 'trailing_stop_loss' in captured.out - assert 'total_profit_pct' in captured.out - assert '-10' in captured.out - assert '-5' in captured.out - assert '2.5' in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" in captured.out + assert "exit_signal" in captured.out + assert "roi" in captured.out + assert "stop_loss" in captured.out + assert "trailing_stop_loss" in captured.out + assert "total_profit_pct" in captured.out + assert "-10" in captured.out + assert "-5" in captured.out + assert "2.5" in captured.out # test group 3 - args = get_args(base_args + ['--analysis-groups', "3"]) + args = get_args(base_args + ["--analysis-groups", "3"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'LTC/BTC' in captured.out - assert 'ETH/BTC' in captured.out - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' in captured.out - assert 'total_profit_pct' in captured.out - assert '-7.5' in captured.out - assert '-3.75' in captured.out - assert '-1.75' in captured.out - assert '0' in captured.out - assert '2' in captured.out + assert "LTC/BTC" in captured.out + assert "ETH/BTC" in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" in captured.out + assert "total_profit_pct" in captured.out + assert "-7.5" in captured.out + assert "-3.75" in captured.out + assert "-1.75" in captured.out + assert "0" in captured.out + assert "2" in captured.out # test group 4 - args = get_args(base_args + ['--analysis-groups', "4"]) + args = get_args(base_args + ["--analysis-groups", "4"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'LTC/BTC' in captured.out - assert 'ETH/BTC' in captured.out - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' in captured.out - assert 'exit_signal' in captured.out - assert 'roi' in captured.out - assert 'stop_loss' in captured.out - assert 'trailing_stop_loss' in captured.out - assert 'total_profit_pct' in captured.out - assert '-10' in captured.out - assert '-5' in captured.out - assert '-4' in captured.out - assert '0.5' in captured.out - assert '1' in captured.out - assert '2.5' in captured.out + assert "LTC/BTC" in captured.out + assert "ETH/BTC" in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" in captured.out + assert "exit_signal" in captured.out + assert "roi" in captured.out + assert "stop_loss" in captured.out + assert "trailing_stop_loss" in captured.out + assert "total_profit_pct" in captured.out + assert "-10" in captured.out + assert "-5" in captured.out + assert "-4" in captured.out + assert "0.5" in captured.out + assert "1" in captured.out + assert "2.5" in captured.out # test group 5 - args = get_args(base_args + ['--analysis-groups', "5"]) + args = get_args(base_args + ["--analysis-groups", "5"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'exit_signal' in captured.out - assert 'roi' in captured.out - assert 'stop_loss' in captured.out - assert 'trailing_stop_loss' in captured.out + assert "exit_signal" in captured.out + assert "roi" in captured.out + assert "stop_loss" in captured.out + assert "trailing_stop_loss" in captured.out # test date filtering - args = get_args(base_args + - ['--analysis-groups', "0", "1", "2", - '--timerange', "20180129-20180130"] - ) + args = get_args( + base_args + ["--analysis-groups", "0", "1", "2", "--timerange", "20180129-20180130"] + ) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'enter_tag_long_a' in captured.out - assert 'enter_tag_long_b' not in captured.out + assert "enter_tag_long_a" in captured.out + assert "enter_tag_long_b" not in captured.out # Due to the backtest mock, there's no rejected signals generated. - args = get_args(base_args + ['--rejected-signals']) + args = get_args(base_args + ["--rejected-signals"]) start_analysis_entries_exits(args) captured = capsys.readouterr() - assert 'no rejected signals' in captured.out + assert "no rejected signals" in captured.out diff --git a/tests/data/test_history.py b/tests/data/test_history.py index f95b05835..29ac89337 100644 --- a/tests/data/test_history.py +++ b/tests/data/test_history.py @@ -17,19 +17,31 @@ from freqtrade.constants import DATETIME_PRINT_FORMAT from freqtrade.data.converter import ohlcv_to_dataframe from freqtrade.data.history import get_datahandler from freqtrade.data.history.datahandlers.jsondatahandler import JsonDataHandler, JsonGzDataHandler -from freqtrade.data.history.history_utils import (_download_pair_history, _download_trades_history, - _load_cached_data_for_updating, get_timerange, - load_data, load_pair_history, - refresh_backtest_ohlcv_data, - refresh_backtest_trades_data, refresh_data, - validate_backtest_data) +from freqtrade.data.history.history_utils import ( + _download_pair_history, + _download_trades_history, + _load_cached_data_for_updating, + get_timerange, + load_data, + load_pair_history, + refresh_backtest_ohlcv_data, + refresh_backtest_trades_data, + refresh_data, + validate_backtest_data, +) from freqtrade.enums import CandleType, TradingMode from freqtrade.exchange import timeframe_to_minutes from freqtrade.misc import file_dump_json from freqtrade.resolvers import StrategyResolver from freqtrade.util import dt_ts, dt_utc -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, get_patched_exchange, log_has, log_has_re, - patch_exchange) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + get_patched_exchange, + log_has, + log_has_re, + patch_exchange, +) def _clean_test_file(file: Path) -> None: @@ -38,7 +50,7 @@ def _clean_test_file(file: Path) -> None: :param file: complete path to the file :return: None """ - file_swp = Path(str(file) + '.swp') + file_swp = Path(str(file) + ".swp") # 1. Delete file from the test if file.is_file(): file.unlink() @@ -49,181 +61,197 @@ def _clean_test_file(file: Path) -> None: def test_load_data_30min_timeframe(caplog, testdatadir) -> None: - ld = load_pair_history(pair='UNITTEST/BTC', timeframe='30m', datadir=testdatadir) + ld = load_pair_history(pair="UNITTEST/BTC", timeframe="30m", datadir=testdatadir) assert isinstance(ld, DataFrame) assert not log_has( - 'Download history data for pair: "UNITTEST/BTC", timeframe: 30m ' - 'and store in None.', caplog + 'Download history data for pair: "UNITTEST/BTC", timeframe: 30m ' "and store in None.", + caplog, ) def test_load_data_7min_timeframe(caplog, testdatadir) -> None: - ld = load_pair_history(pair='UNITTEST/BTC', timeframe='7m', datadir=testdatadir) + ld = load_pair_history(pair="UNITTEST/BTC", timeframe="7m", datadir=testdatadir) assert isinstance(ld, DataFrame) assert ld.empty assert log_has( - 'No history for UNITTEST/BTC, spot, 7m found. ' - 'Use `freqtrade download-data` to download the data', caplog + "No history for UNITTEST/BTC, spot, 7m found. " + "Use `freqtrade download-data` to download the data", + caplog, ) def test_load_data_1min_timeframe(ohlcv_history, mocker, caplog, testdatadir) -> None: - mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history) - file = testdatadir / 'UNITTEST_BTC-1m.feather' - load_data(datadir=testdatadir, timeframe='1m', pairs=['UNITTEST/BTC']) + mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history) + file = testdatadir / "UNITTEST_BTC-1m.feather" + load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"]) assert file.is_file() assert not log_has( - 'Download history data for pair: "UNITTEST/BTC", interval: 1m ' - 'and store in None.', caplog + 'Download history data for pair: "UNITTEST/BTC", interval: 1m ' "and store in None.", caplog ) def test_load_data_mark(ohlcv_history, mocker, caplog, testdatadir) -> None: - mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history) - file = testdatadir / 'futures/UNITTEST_USDT_USDT-1h-mark.feather' - load_data(datadir=testdatadir, timeframe='1h', pairs=['UNITTEST/BTC'], candle_type='mark') + mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history) + file = testdatadir / "futures/UNITTEST_USDT_USDT-1h-mark.feather" + load_data(datadir=testdatadir, timeframe="1h", pairs=["UNITTEST/BTC"], candle_type="mark") assert file.is_file() assert not log_has( - 'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m ' - 'and store in None.', caplog + 'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m ' "and store in None.", + caplog, ) def test_load_data_startup_candles(mocker, testdatadir) -> None: ltfmock = mocker.patch( - 'freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load', - MagicMock(return_value=DataFrame())) - timerange = TimeRange('date', None, 1510639620, 0) - load_pair_history(pair='UNITTEST/BTC', timeframe='1m', - datadir=testdatadir, timerange=timerange, - startup_candles=20,) + "freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load", + MagicMock(return_value=DataFrame()), + ) + timerange = TimeRange("date", None, 1510639620, 0) + load_pair_history( + pair="UNITTEST/BTC", + timeframe="1m", + datadir=testdatadir, + timerange=timerange, + startup_candles=20, + ) assert ltfmock.call_count == 1 - assert ltfmock.call_args_list[0][1]['timerange'] != timerange + assert ltfmock.call_args_list[0][1]["timerange"] != timerange # startts is 20 minutes earlier - assert ltfmock.call_args_list[0][1]['timerange'].startts == timerange.startts - 20 * 60 + assert ltfmock.call_args_list[0][1]["timerange"].startts == timerange.startts - 20 * 60 -@pytest.mark.parametrize('candle_type', ['mark', '']) -def test_load_data_with_new_pair_1min(ohlcv_history_list, mocker, caplog, - default_conf, tmp_path, candle_type) -> None: +@pytest.mark.parametrize("candle_type", ["mark", ""]) +def test_load_data_with_new_pair_1min( + ohlcv_history_list, mocker, caplog, default_conf, tmp_path, candle_type +) -> None: """ Test load_pair_history() with 1 min timeframe """ - mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list) + mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history_list) exchange = get_patched_exchange(mocker, default_conf) - file = tmp_path / 'MEME_BTC-1m.feather' + file = tmp_path / "MEME_BTC-1m.feather" # do not download a new pair if refresh_pairs isn't set - load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type) + load_pair_history(datadir=tmp_path, timeframe="1m", pair="MEME/BTC", candle_type=candle_type) assert not file.is_file() assert log_has( f"No history for MEME/BTC, {candle_type}, 1m found. " - "Use `freqtrade download-data` to download the data", caplog + "Use `freqtrade download-data` to download the data", + caplog, ) # download a new pair if refresh_pairs is set - refresh_data(datadir=tmp_path, timeframe='1m', pairs=['MEME/BTC'], - exchange=exchange, candle_type=CandleType.SPOT - ) - load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type) + refresh_data( + datadir=tmp_path, + timeframe="1m", + pairs=["MEME/BTC"], + exchange=exchange, + candle_type=CandleType.SPOT, + ) + load_pair_history(datadir=tmp_path, timeframe="1m", pair="MEME/BTC", candle_type=candle_type) assert file.is_file() assert log_has_re( - r'\(0/1\) - Download history data for "MEME/BTC", 1m, ' - r'spot and store in .*', caplog + r'\(0/1\) - Download history data for "MEME/BTC", 1m, ' r"spot and store in .*", caplog ) def test_testdata_path(testdatadir) -> None: - assert str(Path('tests') / 'testdata') in str(testdatadir) + assert str(Path("tests") / "testdata") in str(testdatadir) -@pytest.mark.parametrize("pair,timeframe,expected_result,candle_type", [ - ("ETH/BTC", "5m", "freqtrade/hello/world/ETH_BTC-5m.json", ""), - ("ETH/USDT", "1M", "freqtrade/hello/world/ETH_USDT-1Mo.json", ""), - ("Fabric Token/ETH", "5m", "freqtrade/hello/world/Fabric_Token_ETH-5m.json", ""), - ("ETHH20", "5m", "freqtrade/hello/world/ETHH20-5m.json", ""), - (".XBTBON2H", "5m", "freqtrade/hello/world/_XBTBON2H-5m.json", ""), - ("ETHUSD.d", "5m", "freqtrade/hello/world/ETHUSD_d-5m.json", ""), - ("ACC_OLD/BTC", "5m", "freqtrade/hello/world/ACC_OLD_BTC-5m.json", ""), - ("ETH/BTC", "5m", "freqtrade/hello/world/futures/ETH_BTC-5m-mark.json", "mark"), - ("ACC_OLD/BTC", "5m", "freqtrade/hello/world/futures/ACC_OLD_BTC-5m-index.json", "index"), -]) +@pytest.mark.parametrize( + "pair,timeframe,expected_result,candle_type", + [ + ("ETH/BTC", "5m", "freqtrade/hello/world/ETH_BTC-5m.json", ""), + ("ETH/USDT", "1M", "freqtrade/hello/world/ETH_USDT-1Mo.json", ""), + ("Fabric Token/ETH", "5m", "freqtrade/hello/world/Fabric_Token_ETH-5m.json", ""), + ("ETHH20", "5m", "freqtrade/hello/world/ETHH20-5m.json", ""), + (".XBTBON2H", "5m", "freqtrade/hello/world/_XBTBON2H-5m.json", ""), + ("ETHUSD.d", "5m", "freqtrade/hello/world/ETHUSD_d-5m.json", ""), + ("ACC_OLD/BTC", "5m", "freqtrade/hello/world/ACC_OLD_BTC-5m.json", ""), + ("ETH/BTC", "5m", "freqtrade/hello/world/futures/ETH_BTC-5m-mark.json", "mark"), + ("ACC_OLD/BTC", "5m", "freqtrade/hello/world/futures/ACC_OLD_BTC-5m-index.json", "index"), + ], +) def test_json_pair_data_filename(pair, timeframe, expected_result, candle_type): fn = JsonDataHandler._pair_data_filename( - Path('freqtrade/hello/world'), - pair, - timeframe, - CandleType.from_string(candle_type) + Path("freqtrade/hello/world"), pair, timeframe, CandleType.from_string(candle_type) ) assert isinstance(fn, Path) assert fn == Path(expected_result) fn = JsonGzDataHandler._pair_data_filename( - Path('freqtrade/hello/world'), + Path("freqtrade/hello/world"), pair, timeframe, - candle_type=CandleType.from_string(candle_type) + candle_type=CandleType.from_string(candle_type), ) assert isinstance(fn, Path) - assert fn == Path(expected_result + '.gz') + assert fn == Path(expected_result + ".gz") -@pytest.mark.parametrize("pair,trading_mode,expected_result", [ - ("ETH/BTC", '', 'freqtrade/hello/world/ETH_BTC-trades.json'), - ("ETH/USDT:USDT", 'futures', 'freqtrade/hello/world/futures/ETH_USDT_USDT-trades.json'), - ("Fabric Token/ETH", '', 'freqtrade/hello/world/Fabric_Token_ETH-trades.json'), - ("ETHH20", '', 'freqtrade/hello/world/ETHH20-trades.json'), - (".XBTBON2H", '', 'freqtrade/hello/world/_XBTBON2H-trades.json'), - ("ETHUSD.d", '', 'freqtrade/hello/world/ETHUSD_d-trades.json'), - ("ACC_OLD_BTC", '', 'freqtrade/hello/world/ACC_OLD_BTC-trades.json'), -]) +@pytest.mark.parametrize( + "pair,trading_mode,expected_result", + [ + ("ETH/BTC", "", "freqtrade/hello/world/ETH_BTC-trades.json"), + ("ETH/USDT:USDT", "futures", "freqtrade/hello/world/futures/ETH_USDT_USDT-trades.json"), + ("Fabric Token/ETH", "", "freqtrade/hello/world/Fabric_Token_ETH-trades.json"), + ("ETHH20", "", "freqtrade/hello/world/ETHH20-trades.json"), + (".XBTBON2H", "", "freqtrade/hello/world/_XBTBON2H-trades.json"), + ("ETHUSD.d", "", "freqtrade/hello/world/ETHUSD_d-trades.json"), + ("ACC_OLD_BTC", "", "freqtrade/hello/world/ACC_OLD_BTC-trades.json"), + ], +) def test_json_pair_trades_filename(pair, trading_mode, expected_result): - fn = JsonDataHandler._pair_trades_filename(Path('freqtrade/hello/world'), pair, trading_mode) + fn = JsonDataHandler._pair_trades_filename(Path("freqtrade/hello/world"), pair, trading_mode) assert isinstance(fn, Path) assert fn == Path(expected_result) - fn = JsonGzDataHandler._pair_trades_filename(Path('freqtrade/hello/world'), pair, trading_mode) + fn = JsonGzDataHandler._pair_trades_filename(Path("freqtrade/hello/world"), pair, trading_mode) assert isinstance(fn, Path) - assert fn == Path(expected_result + '.gz') + assert fn == Path(expected_result + ".gz") def test_load_cached_data_for_updating(mocker, testdatadir) -> None: - - data_handler = get_datahandler(testdatadir, 'json') + data_handler = get_datahandler(testdatadir, "json") test_data = None - test_filename = testdatadir.joinpath('UNITTEST_BTC-1m.json') + test_filename = testdatadir.joinpath("UNITTEST_BTC-1m.json") with test_filename.open("rt") as file: test_data = json.load(file) - test_data_df = ohlcv_to_dataframe(test_data, '1m', 'UNITTEST/BTC', - fill_missing=False, drop_incomplete=False) + test_data_df = ohlcv_to_dataframe( + test_data, "1m", "UNITTEST/BTC", fill_missing=False, drop_incomplete=False + ) # now = last cached item + 1 hour now_ts = test_data[-1][0] / 1000 + 60 * 60 # timeframe starts earlier than the cached data # should fully update data - timerange = TimeRange('date', None, test_data[0][0] / 1000 - 1, 0) + timerange = TimeRange("date", None, test_data[0][0] / 1000 - 1, 0) data, start_ts, end_ts = _load_cached_data_for_updating( - 'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT) + "UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT + ) assert data.empty assert start_ts == test_data[0][0] - 1000 assert end_ts is None # timeframe starts earlier than the cached data - prepending - timerange = TimeRange('date', None, test_data[0][0] / 1000 - 1, 0) + timerange = TimeRange("date", None, test_data[0][0] / 1000 - 1, 0) data, start_ts, end_ts = _load_cached_data_for_updating( - 'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT, True) + "UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT, True + ) assert_frame_equal(data, test_data_df.iloc[:-1]) assert start_ts == test_data[0][0] - 1000 assert end_ts == test_data[0][0] # timeframe starts in the center of the cached data # should return the cached data w/o the last item - timerange = TimeRange('date', None, test_data[0][0] / 1000 + 1, 0) + timerange = TimeRange("date", None, test_data[0][0] / 1000 + 1, 0) data, start_ts, end_ts = _load_cached_data_for_updating( - 'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT) + "UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT + ) assert_frame_equal(data, test_data_df.iloc[:-1]) assert test_data[-2][0] <= start_ts < test_data[-1][0] @@ -231,27 +259,30 @@ def test_load_cached_data_for_updating(mocker, testdatadir) -> None: # timeframe starts after the cached data # should return the cached data w/o the last item - timerange = TimeRange('date', None, test_data[-1][0] / 1000 + 100, 0) + timerange = TimeRange("date", None, test_data[-1][0] / 1000 + 100, 0) data, start_ts, end_ts = _load_cached_data_for_updating( - 'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT) + "UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT + ) assert_frame_equal(data, test_data_df.iloc[:-1]) assert test_data[-2][0] <= start_ts < test_data[-1][0] assert end_ts is None # no datafile exist # should return timestamp start time - timerange = TimeRange('date', None, now_ts - 10000, 0) + timerange = TimeRange("date", None, now_ts - 10000, 0) data, start_ts, end_ts = _load_cached_data_for_updating( - 'NONEXIST/BTC', '1m', timerange, data_handler, CandleType.SPOT) + "NONEXIST/BTC", "1m", timerange, data_handler, CandleType.SPOT + ) assert data.empty assert start_ts == (now_ts - 10000) * 1000 assert end_ts is None # no datafile exist # should return timestamp start and end time time - timerange = TimeRange('date', 'date', now_ts - 1000000, now_ts - 100000) + timerange = TimeRange("date", "date", now_ts - 1000000, now_ts - 100000) data, start_ts, end_ts = _load_cached_data_for_updating( - 'NONEXIST/BTC', '1m', timerange, data_handler, CandleType.SPOT) + "NONEXIST/BTC", "1m", timerange, data_handler, CandleType.SPOT + ) assert data.empty assert start_ts == (now_ts - 1000000) * 1000 assert end_ts == (now_ts - 100000) * 1000 @@ -259,43 +290,43 @@ def test_load_cached_data_for_updating(mocker, testdatadir) -> None: # no datafile exist, no timeframe is set # should return an empty array and None data, start_ts, end_ts = _load_cached_data_for_updating( - 'NONEXIST/BTC', '1m', None, data_handler, CandleType.SPOT) + "NONEXIST/BTC", "1m", None, data_handler, CandleType.SPOT + ) assert data.empty assert start_ts is None assert end_ts is None -@pytest.mark.parametrize('candle_type,subdir,file_tail', [ - ('mark', 'futures/', '-mark'), - ('spot', '', ''), -]) +@pytest.mark.parametrize( + "candle_type,subdir,file_tail", + [ + ("mark", "futures/", "-mark"), + ("spot", "", ""), + ], +) def test_download_pair_history( - ohlcv_history_list, - mocker, - default_conf, - tmp_path, - candle_type, - subdir, - file_tail + ohlcv_history_list, mocker, default_conf, tmp_path, candle_type, subdir, file_tail ) -> None: - mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list) + mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history_list) exchange = get_patched_exchange(mocker, default_conf) - file1_1 = tmp_path / f'{subdir}MEME_BTC-1m{file_tail}.feather' - file1_5 = tmp_path / f'{subdir}MEME_BTC-5m{file_tail}.feather' - file2_1 = tmp_path / f'{subdir}CFI_BTC-1m{file_tail}.feather' - file2_5 = tmp_path / f'{subdir}CFI_BTC-5m{file_tail}.feather' + file1_1 = tmp_path / f"{subdir}MEME_BTC-1m{file_tail}.feather" + file1_5 = tmp_path / f"{subdir}MEME_BTC-5m{file_tail}.feather" + file2_1 = tmp_path / f"{subdir}CFI_BTC-1m{file_tail}.feather" + file2_5 = tmp_path / f"{subdir}CFI_BTC-5m{file_tail}.feather" assert not file1_1.is_file() assert not file2_1.is_file() - assert _download_pair_history(datadir=tmp_path, exchange=exchange, - pair='MEME/BTC', - timeframe='1m', - candle_type=candle_type) - assert _download_pair_history(datadir=tmp_path, exchange=exchange, - pair='CFI/BTC', - timeframe='1m', - candle_type=candle_type) + assert _download_pair_history( + datadir=tmp_path, + exchange=exchange, + pair="MEME/BTC", + timeframe="1m", + candle_type=candle_type, + ) + assert _download_pair_history( + datadir=tmp_path, exchange=exchange, pair="CFI/BTC", timeframe="1m", candle_type=candle_type + ) assert not exchange._pairs_last_refresh_time assert file1_1.is_file() assert file2_1.is_file() @@ -307,14 +338,16 @@ def test_download_pair_history( assert not file1_5.is_file() assert not file2_5.is_file() - assert _download_pair_history(datadir=tmp_path, exchange=exchange, - pair='MEME/BTC', - timeframe='5m', - candle_type=candle_type) - assert _download_pair_history(datadir=tmp_path, exchange=exchange, - pair='CFI/BTC', - timeframe='5m', - candle_type=candle_type) + assert _download_pair_history( + datadir=tmp_path, + exchange=exchange, + pair="MEME/BTC", + timeframe="5m", + candle_type=candle_type, + ) + assert _download_pair_history( + datadir=tmp_path, exchange=exchange, pair="CFI/BTC", timeframe="5m", candle_type=candle_type + ) assert not exchange._pairs_last_refresh_time assert file1_5.is_file() assert file2_5.is_file() @@ -323,30 +356,45 @@ def test_download_pair_history( def test_download_pair_history2(mocker, default_conf, testdatadir) -> None: tick = [ [1509836520000, 0.00162008, 0.00162008, 0.00162008, 0.00162008, 108.14853839], - [1509836580000, 0.00161, 0.00161, 0.00161, 0.00161, 82.390199] + [1509836580000, 0.00161, 0.00161, 0.00161, 0.00161, 82.390199], ] json_dump_mock = mocker.patch( - 'freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler.ohlcv_store', - return_value=None) - mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=tick) + "freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler.ohlcv_store", + return_value=None, + ) + mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=tick) exchange = get_patched_exchange(mocker, default_conf) - _download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/BTC", - timeframe='1m', candle_type='spot') - _download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/BTC", - timeframe='3m', candle_type='spot') - _download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/USDT", - timeframe='1h', candle_type='mark') + _download_pair_history( + datadir=testdatadir, + exchange=exchange, + pair="UNITTEST/BTC", + timeframe="1m", + candle_type="spot", + ) + _download_pair_history( + datadir=testdatadir, + exchange=exchange, + pair="UNITTEST/BTC", + timeframe="3m", + candle_type="spot", + ) + _download_pair_history( + datadir=testdatadir, + exchange=exchange, + pair="UNITTEST/USDT", + timeframe="1h", + candle_type="mark", + ) assert json_dump_mock.call_count == 3 def test_download_backtesting_data_exception(mocker, caplog, default_conf, tmp_path) -> None: - mocker.patch(f'{EXMS}.get_historic_ohlcv', - side_effect=Exception('File Error')) + mocker.patch(f"{EXMS}.get_historic_ohlcv", side_effect=Exception("File Error")) exchange = get_patched_exchange(mocker, default_conf) - assert not _download_pair_history(datadir=tmp_path, exchange=exchange, - pair='MEME/BTC', - timeframe='1m', candle_type='spot') + assert not _download_pair_history( + datadir=tmp_path, exchange=exchange, pair="MEME/BTC", timeframe="1m", candle_type="spot" + ) assert log_has('Failed to download history data for pair: "MEME/BTC", timeframe: 1m.', caplog) @@ -354,41 +402,46 @@ def test_load_partial_missing(testdatadir, caplog) -> None: # Make sure we start fresh - test missing data at start start = dt_utc(2018, 1, 1) end = dt_utc(2018, 1, 11) - data = load_data(testdatadir, '5m', ['UNITTEST/BTC'], startup_candles=20, - timerange=TimeRange('date', 'date', start.timestamp(), end.timestamp())) - assert log_has( - 'Using indicator startup period: 20 ...', caplog + data = load_data( + testdatadir, + "5m", + ["UNITTEST/BTC"], + startup_candles=20, + timerange=TimeRange("date", "date", start.timestamp(), end.timestamp()), ) + assert log_has("Using indicator startup period: 20 ...", caplog) # timedifference in 5 minutes td = ((end - start).total_seconds() // 60 // 5) + 1 - assert td != len(data['UNITTEST/BTC']) - start_real = data['UNITTEST/BTC'].iloc[0, 0] - assert log_has(f'UNITTEST/BTC, spot, 5m, ' - f'data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}', - caplog) + assert td != len(data["UNITTEST/BTC"]) + start_real = data["UNITTEST/BTC"].iloc[0, 0] + assert log_has( + f"UNITTEST/BTC, spot, 5m, data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}", + caplog, + ) # Make sure we start fresh - test missing data at end caplog.clear() start = dt_utc(2018, 1, 10) end = dt_utc(2018, 2, 20) - data = load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=TimeRange('date', 'date', start.timestamp(), end.timestamp())) + data = load_data( + datadir=testdatadir, + timeframe="5m", + pairs=["UNITTEST/BTC"], + timerange=TimeRange("date", "date", start.timestamp(), end.timestamp()), + ) # timedifference in 5 minutes td = ((end - start).total_seconds() // 60 // 5) + 1 - assert td != len(data['UNITTEST/BTC']) + assert td != len(data["UNITTEST/BTC"]) # Shift endtime with +5 - end_real = data['UNITTEST/BTC'].iloc[-1, 0].to_pydatetime() - assert log_has(f'UNITTEST/BTC, spot, 5m, ' - f'data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}', - caplog) + end_real = data["UNITTEST/BTC"].iloc[-1, 0].to_pydatetime() + assert log_has( + f"UNITTEST/BTC, spot, 5m, data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}", + caplog, + ) def test_init(default_conf) -> None: - assert {} == load_data( - datadir=Path(), - pairs=[], - timeframe=default_conf['timeframe'] - ) + assert {} == load_data(datadir=Path(), pairs=[], timeframe=default_conf["timeframe"]) def test_init_with_refresh(default_conf, mocker) -> None: @@ -396,20 +449,16 @@ def test_init_with_refresh(default_conf, mocker) -> None: refresh_data( datadir=Path(), pairs=[], - timeframe=default_conf['timeframe'], + timeframe=default_conf["timeframe"], exchange=exchange, - candle_type=CandleType.SPOT - ) - assert {} == load_data( - datadir=Path(), - pairs=[], - timeframe=default_conf['timeframe'] + candle_type=CandleType.SPOT, ) + assert {} == load_data(datadir=Path(), pairs=[], timeframe=default_conf["timeframe"]) def test_file_dump_json_tofile(testdatadir) -> None: - file = testdatadir / f'test_{uuid.uuid4()}.json' - data = {'bar': 'foo'} + file = testdatadir / f"test_{uuid.uuid4()}.json" + data = {"bar": "foo"} # check the file we will create does not exist assert not file.is_file() @@ -424,8 +473,8 @@ def test_file_dump_json_tofile(testdatadir) -> None: with file.open() as data_file: json_from_file = json.load(data_file) - assert 'bar' in json_from_file - assert json_from_file['bar'] == 'foo' + assert "bar" in json_from_file + assert json_from_file["bar"] == "foo" # Remove the file _clean_test_file(file) @@ -434,113 +483,116 @@ def test_file_dump_json_tofile(testdatadir) -> None: def test_get_timerange(default_conf, mocker, testdatadir) -> None: patch_exchange(mocker) - default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) + default_conf.update({"strategy": CURRENT_TEST_STRATEGY}) strategy = StrategyResolver.load_strategy(default_conf) data = strategy.advise_all_indicators( - load_data( - datadir=testdatadir, - timeframe='1m', - pairs=['UNITTEST/BTC'] - ) + load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"]) ) min_date, max_date = get_timerange(data) - assert min_date.isoformat() == '2017-11-04T23:02:00+00:00' - assert max_date.isoformat() == '2017-11-14T22:59:00+00:00' + assert min_date.isoformat() == "2017-11-04T23:02:00+00:00" + assert max_date.isoformat() == "2017-11-14T22:59:00+00:00" def test_validate_backtest_data_warn(default_conf, mocker, caplog, testdatadir) -> None: patch_exchange(mocker) - default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) + default_conf.update({"strategy": CURRENT_TEST_STRATEGY}) strategy = StrategyResolver.load_strategy(default_conf) data = strategy.advise_all_indicators( load_data( - datadir=testdatadir, - timeframe='1m', - pairs=['UNITTEST/BTC'], - fill_up_missing=False + datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"], fill_up_missing=False ) ) min_date, max_date = get_timerange(data) caplog.clear() - assert validate_backtest_data(data['UNITTEST/BTC'], 'UNITTEST/BTC', - min_date, max_date, timeframe_to_minutes('1m')) + assert validate_backtest_data( + data["UNITTEST/BTC"], "UNITTEST/BTC", min_date, max_date, timeframe_to_minutes("1m") + ) assert len(caplog.record_tuples) == 1 assert log_has( "UNITTEST/BTC has missing frames: expected 14397, got 13681, that's 716 missing values", - caplog) + caplog, + ) def test_validate_backtest_data(default_conf, mocker, caplog, testdatadir) -> None: patch_exchange(mocker) - default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) + default_conf.update({"strategy": CURRENT_TEST_STRATEGY}) strategy = StrategyResolver.load_strategy(default_conf) timerange = TimeRange() data = strategy.advise_all_indicators( - load_data( - datadir=testdatadir, - timeframe='5m', - pairs=['UNITTEST/BTC'], - timerange=timerange - ) + load_data(datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange) ) min_date, max_date = get_timerange(data) caplog.clear() - assert not validate_backtest_data(data['UNITTEST/BTC'], 'UNITTEST/BTC', - min_date, max_date, timeframe_to_minutes('5m')) + assert not validate_backtest_data( + data["UNITTEST/BTC"], "UNITTEST/BTC", min_date, max_date, timeframe_to_minutes("5m") + ) assert len(caplog.record_tuples) == 0 -@pytest.mark.parametrize('trademode,callcount', [ - ('spot', 4), - ('margin', 4), - ('futures', 8), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls -]) +@pytest.mark.parametrize( + "trademode,callcount", + [ + ("spot", 4), + ("margin", 4), + ("futures", 8), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls + ], +) def test_refresh_backtest_ohlcv_data( - mocker, default_conf, markets, caplog, testdatadir, trademode, callcount): + mocker, default_conf, markets, caplog, testdatadir, trademode, callcount +): caplog.set_level(logging.DEBUG) - dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history') - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + dl_mock = mocker.patch("freqtrade.data.history.history_utils._download_pair_history") + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) mocker.patch.object(Path, "exists", MagicMock(return_value=True)) mocker.patch.object(Path, "unlink", MagicMock()) - default_conf['trading_mode'] = trademode + default_conf["trading_mode"] = trademode - ex = get_patched_exchange(mocker, default_conf, id='bybit') + ex = get_patched_exchange(mocker, default_conf, id="bybit") timerange = TimeRange.parse_timerange("20190101-20190102") - refresh_backtest_ohlcv_data(exchange=ex, pairs=["ETH/BTC", "XRP/BTC"], - timeframes=["1m", "5m"], datadir=testdatadir, - timerange=timerange, erase=True, - trading_mode=trademode - ) + refresh_backtest_ohlcv_data( + exchange=ex, + pairs=["ETH/BTC", "XRP/BTC"], + timeframes=["1m", "5m"], + datadir=testdatadir, + timerange=timerange, + erase=True, + trading_mode=trademode, + ) assert dl_mock.call_count == callcount - assert dl_mock.call_args[1]['timerange'].starttype == 'date' + assert dl_mock.call_args[1]["timerange"].starttype == "date" assert log_has_re(r"Downloading pair ETH/BTC, .* interval 1m\.", caplog) - if trademode == 'futures': + if trademode == "futures": assert log_has_re(r"Downloading pair ETH/BTC, funding_rate, interval 8h\.", caplog) assert log_has_re(r"Downloading pair ETH/BTC, mark, interval 4h\.", caplog) def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir): - dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history', - MagicMock()) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils._download_pair_history", MagicMock() + ) ex = get_patched_exchange(mocker, default_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value={})) timerange = TimeRange.parse_timerange("20190101-20190102") - unav_pairs = refresh_backtest_ohlcv_data(exchange=ex, pairs=["BTT/BTC", "LTC/USDT"], - timeframes=["1m", "5m"], - datadir=testdatadir, - timerange=timerange, erase=False, - trading_mode='spot' - ) + unav_pairs = refresh_backtest_ohlcv_data( + exchange=ex, + pairs=["BTT/BTC", "LTC/USDT"], + timeframes=["1m", "5m"], + datadir=testdatadir, + timerange=timerange, + erase=False, + trading_mode="spot", + ) assert dl_mock.call_count == 0 assert "BTT/BTC" in unav_pairs @@ -549,90 +601,104 @@ def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir): def test_refresh_backtest_trades_data(mocker, default_conf, markets, caplog, testdatadir): - dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_trades_history', - MagicMock()) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + dl_mock = mocker.patch( + "freqtrade.data.history.history_utils._download_trades_history", MagicMock() + ) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) mocker.patch.object(Path, "exists", MagicMock(return_value=True)) mocker.patch.object(Path, "unlink", MagicMock()) ex = get_patched_exchange(mocker, default_conf) timerange = TimeRange.parse_timerange("20190101-20190102") - unavailable_pairs = refresh_backtest_trades_data(exchange=ex, - pairs=["ETH/BTC", "XRP/BTC", "XRP/ETH"], - datadir=testdatadir, - timerange=timerange, erase=True, - trading_mode=TradingMode.SPOT, - ) + unavailable_pairs = refresh_backtest_trades_data( + exchange=ex, + pairs=["ETH/BTC", "XRP/BTC", "XRP/ETH"], + datadir=testdatadir, + timerange=timerange, + erase=True, + trading_mode=TradingMode.SPOT, + ) assert dl_mock.call_count == 2 - assert dl_mock.call_args[1]['timerange'].starttype == 'date' + assert dl_mock.call_args[1]["timerange"].starttype == "date" assert log_has("Downloading trades for pair ETH/BTC.", caplog) assert unavailable_pairs == ["XRP/ETH"] assert log_has("Skipping pair XRP/ETH...", caplog) -def test_download_trades_history(trades_history, mocker, default_conf, testdatadir, caplog, - tmp_path, time_machine) -> None: +def test_download_trades_history( + trades_history, mocker, default_conf, testdatadir, caplog, tmp_path, time_machine +) -> None: start_dt = dt_utc(2023, 1, 1) time_machine.move_to(start_dt, tick=False) ght_mock = MagicMock(side_effect=lambda pair, *args, **kwargs: (pair, trades_history)) - mocker.patch(f'{EXMS}.get_historic_trades', ght_mock) + mocker.patch(f"{EXMS}.get_historic_trades", ght_mock) exchange = get_patched_exchange(mocker, default_conf) - file1 = tmp_path / 'ETH_BTC-trades.json.gz' - data_handler = get_datahandler(tmp_path, data_format='jsongz') + file1 = tmp_path / "ETH_BTC-trades.json.gz" + data_handler = get_datahandler(tmp_path, data_format="jsongz") assert not file1.is_file() - assert _download_trades_history(data_handler=data_handler, exchange=exchange, - pair='ETH/BTC', trading_mode=TradingMode.SPOT) + assert _download_trades_history( + data_handler=data_handler, exchange=exchange, pair="ETH/BTC", trading_mode=TradingMode.SPOT + ) assert log_has("Current Amount of trades: 0", caplog) assert log_has("New Amount of trades: 6", caplog) assert ght_mock.call_count == 1 # Default "since" - 30 days before current day. - assert ght_mock.call_args_list[0][1]['since'] == dt_ts(start_dt - timedelta(days=30)) + assert ght_mock.call_args_list[0][1]["since"] == dt_ts(start_dt - timedelta(days=30)) assert file1.is_file() caplog.clear() ght_mock.reset_mock() since_time = int(trades_history[-3][0] // 1000) since_time2 = int(trades_history[-1][0] // 1000) - timerange = TimeRange('date', None, since_time, 0) + timerange = TimeRange("date", None, since_time, 0) assert _download_trades_history( - data_handler=data_handler, exchange=exchange, pair='ETH/BTC', - timerange=timerange, trading_mode=TradingMode.SPOT) + data_handler=data_handler, + exchange=exchange, + pair="ETH/BTC", + timerange=timerange, + trading_mode=TradingMode.SPOT, + ) assert ght_mock.call_count == 1 # Check this in seconds - since we had to convert to seconds above too. - assert int(ght_mock.call_args_list[0][1]['since'] // 1000) == since_time2 - 5 - assert ght_mock.call_args_list[0][1]['from_id'] is not None + assert int(ght_mock.call_args_list[0][1]["since"] // 1000) == since_time2 - 5 + assert ght_mock.call_args_list[0][1]["from_id"] is not None file1.unlink() - mocker.patch(f'{EXMS}.get_historic_trades', MagicMock(side_effect=ValueError)) + mocker.patch(f"{EXMS}.get_historic_trades", MagicMock(side_effect=ValueError)) caplog.clear() - assert not _download_trades_history(data_handler=data_handler, exchange=exchange, - pair='ETH/BTC', trading_mode=TradingMode.SPOT) + assert not _download_trades_history( + data_handler=data_handler, exchange=exchange, pair="ETH/BTC", trading_mode=TradingMode.SPOT + ) assert log_has_re('Failed to download historic trades for pair: "ETH/BTC".*', caplog) - file2 = tmp_path / 'XRP_ETH-trades.json.gz' + file2 = tmp_path / "XRP_ETH-trades.json.gz" copyfile(testdatadir / file2.name, file2) ght_mock.reset_mock() - mocker.patch(f'{EXMS}.get_historic_trades', ght_mock) + mocker.patch(f"{EXMS}.get_historic_trades", ght_mock) # Since before first start date since_time = int(trades_history[0][0] // 1000) - 500 - timerange = TimeRange('date', None, since_time, 0) + timerange = TimeRange("date", None, since_time, 0) assert _download_trades_history( - data_handler=data_handler, exchange=exchange, pair='XRP/ETH', - timerange=timerange, trading_mode=TradingMode.SPOT) + data_handler=data_handler, + exchange=exchange, + pair="XRP/ETH", + timerange=timerange, + trading_mode=TradingMode.SPOT, + ) assert ght_mock.call_count == 1 - assert int(ght_mock.call_args_list[0][1]['since'] // 1000) == since_time - assert ght_mock.call_args_list[0][1]['from_id'] is None - assert log_has_re(r'Start .* earlier than available data. Redownloading trades for.*', caplog) + assert int(ght_mock.call_args_list[0][1]["since"] // 1000) == since_time + assert ght_mock.call_args_list[0][1]["from_id"] is None + assert log_has_re(r"Start .* earlier than available data. Redownloading trades for.*", caplog) _clean_test_file(file2) diff --git a/tests/data/test_trade_converter_kraken.py b/tests/data/test_trade_converter_kraken.py index cc5721030..480ea93ca 100644 --- a/tests/data/test_trade_converter_kraken.py +++ b/tests/data/test_trade_converter_kraken.py @@ -13,17 +13,22 @@ from tests.conftest import EXMS, log_has, log_has_re, patch_exchange def test_import_kraken_trades_from_csv(testdatadir, tmp_path, caplog, default_conf_usdt, mocker): with pytest.raises(OperationalException, match="This function is only for the kraken exchange"): - import_kraken_trades_from_csv(default_conf_usdt, 'feather') + import_kraken_trades_from_csv(default_conf_usdt, "feather") - default_conf_usdt['exchange']['name'] = 'kraken' + default_conf_usdt["exchange"]["name"] = "kraken" - patch_exchange(mocker, id='kraken') - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={ - 'BCH/EUR': {'symbol': 'BCH/EUR', 'id': 'BCHEUR', 'altname': 'BCHEUR'}, - })) - dstfile = tmp_path / 'BCH_EUR-trades.feather' + patch_exchange(mocker, id="kraken") + mocker.patch( + f"{EXMS}.markets", + PropertyMock( + return_value={ + "BCH/EUR": {"symbol": "BCH/EUR", "id": "BCHEUR", "altname": "BCHEUR"}, + } + ), + ) + dstfile = tmp_path / "BCH_EUR-trades.feather" assert not dstfile.is_file() - default_conf_usdt['datadir'] = tmp_path + default_conf_usdt["datadir"] = tmp_path # There's 2 files in this tree, containing a total of 2 days. # tests/testdata/kraken/ # └── trades_csv @@ -31,29 +36,31 @@ def test_import_kraken_trades_from_csv(testdatadir, tmp_path, caplog, default_co # └── incremental_q2 # └── BCHEUR.csv <-- 2023-01-02 - copytree(testdatadir / 'kraken/trades_csv', tmp_path / 'trades_csv') + copytree(testdatadir / "kraken/trades_csv", tmp_path / "trades_csv") - import_kraken_trades_from_csv(default_conf_usdt, 'feather') + import_kraken_trades_from_csv(default_conf_usdt, "feather") assert log_has("Found csv files for BCHEUR.", caplog) assert log_has("Converting pairs: BCH/EUR.", caplog) assert log_has_re(r"BCH/EUR: 340 trades.* 2023-01-01.* 2023-01-02.*", caplog) assert dstfile.is_file() - dh = get_datahandler(tmp_path, 'feather') - trades = dh.trades_load('BCH_EUR', TradingMode.SPOT) + dh = get_datahandler(tmp_path, "feather") + trades = dh.trades_load("BCH_EUR", TradingMode.SPOT) assert len(trades) == 340 - assert trades['date'].min().to_pydatetime() == datetime(2023, 1, 1, 0, 3, 56, - tzinfo=timezone.utc) - assert trades['date'].max().to_pydatetime() == datetime(2023, 1, 2, 23, 17, 3, - tzinfo=timezone.utc) + assert trades["date"].min().to_pydatetime() == datetime( + 2023, 1, 1, 0, 3, 56, tzinfo=timezone.utc + ) + assert trades["date"].max().to_pydatetime() == datetime( + 2023, 1, 2, 23, 17, 3, tzinfo=timezone.utc + ) # ID is not filled - assert len(trades.loc[trades['id'] != '']) == 0 + assert len(trades.loc[trades["id"] != ""]) == 0 caplog.clear() - default_conf_usdt['pairs'] = ['XRP/EUR'] + default_conf_usdt["pairs"] = ["XRP/EUR"] # Filtered to non-existing pair - import_kraken_trades_from_csv(default_conf_usdt, 'feather') + import_kraken_trades_from_csv(default_conf_usdt, "feather") assert log_has("Found csv files for BCHEUR.", caplog) assert log_has("No data found for pairs XRP/EUR.", caplog) diff --git a/tests/edge/test_edge.py b/tests/edge/test_edge.py index 53840b190..50c03f0f3 100644 --- a/tests/edge/test_edge.py +++ b/tests/edge/test_edge.py @@ -16,8 +16,12 @@ from freqtrade.enums import ExitType from freqtrade.exceptions import OperationalException from freqtrade.util.datetime_helpers import dt_ts, dt_utc from tests.conftest import EXMS, get_patched_freqtradebot, log_has -from tests.optimize import (BTContainer, BTrade, _build_backtest_dataframe, - _get_frame_time_from_offset) +from tests.optimize import ( + BTContainer, + BTrade, + _build_backtest_dataframe, + _get_frame_time_from_offset, +) # Cases to be tested: @@ -33,70 +37,82 @@ timeframe_in_minute = 60 # End helper functions # Open trade should be removed from the end -tc0 = BTContainer(data=[ - # D O H L C V B S - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 1]], # enter trade (signal on last candle) - stop_loss=-0.99, roi={"0": float('inf')}, profit_perc=0.00, - trades=[] +tc0 = BTContainer( + data=[ + # D O H L C V B S + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 1], + ], # enter trade (signal on last candle) + stop_loss=-0.99, + roi={"0": float("inf")}, + profit_perc=0.00, + trades=[], ) # Two complete trades within dataframe(with sell hit for all) -tc1 = BTContainer(data=[ - # D O H L C V B S - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 1], # enter trade (signal on last candle) - [2, 5000, 5025, 4975, 4987, 6172, 0, 0], # exit at open - [3, 5000, 5025, 4975, 4987, 6172, 1, 0], # no action - [4, 5000, 5025, 4975, 4987, 6172, 0, 0], # should enter the trade - [5, 5000, 5025, 4975, 4987, 6172, 0, 1], # no action - [6, 5000, 5025, 4975, 4987, 6172, 0, 0], # should sell -], - stop_loss=-0.99, roi={"0": float('inf')}, profit_perc=0.00, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=2), - BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=4, close_tick=6)] +tc1 = BTContainer( + data=[ + # D O H L C V B S + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 1], # enter trade (signal on last candle) + [2, 5000, 5025, 4975, 4987, 6172, 0, 0], # exit at open + [3, 5000, 5025, 4975, 4987, 6172, 1, 0], # no action + [4, 5000, 5025, 4975, 4987, 6172, 0, 0], # should enter the trade + [5, 5000, 5025, 4975, 4987, 6172, 0, 1], # no action + [6, 5000, 5025, 4975, 4987, 6172, 0, 0], # should sell + ], + stop_loss=-0.99, + roi={"0": float("inf")}, + profit_perc=0.00, + trades=[ + BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=2), + BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=4, close_tick=6), + ], ) # 3) Entered, sl 1%, candle drops 8% => Trade closed, 1% loss -tc2 = BTContainer(data=[ - # D O H L C V B S - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4600, 4987, 6172, 0, 0], # enter trade, stoploss hit - [2, 5000, 5025, 4975, 4987, 6172, 0, 0], -], - stop_loss=-0.01, roi={"0": float('inf')}, profit_perc=-0.01, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)] +tc2 = BTContainer( + data=[ + # D O H L C V B S + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4600, 4987, 6172, 0, 0], # enter trade, stoploss hit + [2, 5000, 5025, 4975, 4987, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": float("inf")}, + profit_perc=-0.01, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)], ) # 4) Entered, sl 3 %, candle drops 4%, recovers to 1 % = > Trade closed, 3 % loss -tc3 = BTContainer(data=[ - # D O H L C V B S - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4800, 4987, 6172, 0, 0], # enter trade, stoploss hit - [2, 5000, 5025, 4975, 4987, 6172, 0, 0], -], - stop_loss=-0.03, roi={"0": float('inf')}, profit_perc=-0.03, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)] +tc3 = BTContainer( + data=[ + # D O H L C V B S + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4800, 4987, 6172, 0, 0], # enter trade, stoploss hit + [2, 5000, 5025, 4975, 4987, 6172, 0, 0], + ], + stop_loss=-0.03, + roi={"0": float("inf")}, + profit_perc=-0.03, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)], ) # 5) Stoploss and sell are hit. should sell on stoploss -tc4 = BTContainer(data=[ - # D O H L C V B S - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4800, 4987, 6172, 0, 1], # enter trade, stoploss hit, sell signal - [2, 5000, 5025, 4975, 4987, 6172, 0, 0], -], - stop_loss=-0.03, roi={"0": float('inf')}, profit_perc=-0.03, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)] +tc4 = BTContainer( + data=[ + # D O H L C V B S + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4800, 4987, 6172, 0, 1], # enter trade, stoploss hit, sell signal + [2, 5000, 5025, 4975, 4987, 6172, 0, 0], + ], + stop_loss=-0.03, + roi={"0": float("inf")}, + profit_perc=-0.03, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)], ) -TESTS = [ - tc0, - tc1, - tc2, - tc3, - tc4 -] +TESTS = [tc0, tc1, tc2, tc3, tc4] @pytest.mark.parametrize("data", TESTS) @@ -110,7 +126,7 @@ def test_edge_results(edge_conf, mocker, caplog, data) -> None: caplog.set_level(logging.DEBUG) edge.fee = 0 - trades = edge._find_trades_for_stoploss_range(frame, 'TEST/BTC', [data.stop_loss]) + trades = edge._find_trades_for_stoploss_range(frame, "TEST/BTC", [data.stop_loss]) results = edge._fill_calculable_fields(DataFrame(trades)) if trades else DataFrame() assert len(trades) == len(data.trades) @@ -128,106 +144,117 @@ def test_edge_results(edge_conf, mocker, caplog, data) -> None: def test_adjust(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - 'C/D': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - 'N/O': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60) - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + "C/D": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + "N/O": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) - pairs = ['A/B', 'C/D', 'E/F', 'G/H'] - assert (edge.adjust(pairs) == ['E/F', 'C/D']) + pairs = ["A/B", "C/D", "E/F", "G/H"] + assert edge.adjust(pairs) == ["E/F", "C/D"] def test_edge_get_stoploss(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - 'C/D': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - 'N/O': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60) - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + "C/D": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + "N/O": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) - assert edge.get_stoploss('E/F') == -0.01 + assert edge.get_stoploss("E/F") == -0.01 def test_nonexisting_get_stoploss(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) - assert edge.get_stoploss('N/O') == -0.1 + assert edge.get_stoploss("N/O") == -0.1 def test_edge_stake_amount(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.02, 0.66, 3.71, 0.50, 1.71, 10, 60), - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.02, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) assert edge._capital_ratio == 0.5 - assert edge.stake_amount('E/F', free_capital=100, total_capital=100, - capital_in_trade=25) == 31.25 + assert ( + edge.stake_amount("E/F", free_capital=100, total_capital=100, capital_in_trade=25) == 31.25 + ) - assert edge.stake_amount('E/F', free_capital=20, total_capital=100, - capital_in_trade=25) == 20 + assert edge.stake_amount("E/F", free_capital=20, total_capital=100, capital_in_trade=25) == 20 - assert edge.stake_amount('E/F', free_capital=0, total_capital=100, - capital_in_trade=25) == 0 + assert edge.stake_amount("E/F", free_capital=0, total_capital=100, capital_in_trade=25) == 0 # Test with increased allowed_risk # Result should be no more than allowed capital edge._allowed_risk = 0.4 edge._capital_ratio = 0.5 - assert edge.stake_amount('E/F', free_capital=100, total_capital=100, - capital_in_trade=25) == 62.5 + assert ( + edge.stake_amount("E/F", free_capital=100, total_capital=100, capital_in_trade=25) == 62.5 + ) - assert edge.stake_amount('E/F', free_capital=100, total_capital=100, - capital_in_trade=0) == 50 + assert edge.stake_amount("E/F", free_capital=100, total_capital=100, capital_in_trade=0) == 50 edge._capital_ratio = 1 # Full capital is available - assert edge.stake_amount('E/F', free_capital=100, total_capital=100, - capital_in_trade=0) == 100 + assert edge.stake_amount("E/F", free_capital=100, total_capital=100, capital_in_trade=0) == 100 # Full capital is available - assert edge.stake_amount('E/F', free_capital=0, total_capital=100, - capital_in_trade=0) == 0 + assert edge.stake_amount("E/F", free_capital=0, total_capital=100, capital_in_trade=0) == 0 def test_nonexisting_stake_amount(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.11, 0.66, 3.71, 0.50, 1.71, 10, 60), - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.11, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) # should use strategy stoploss - assert edge.stake_amount('N/O', 1, 2, 1) == 0.15 + assert edge.stake_amount("N/O", 1, 2, 1) == 0.15 def test_edge_heartbeat_calculate(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - heartbeat = edge_conf['edge']['process_throttle_secs'] + heartbeat = edge_conf["edge"]["process_throttle_secs"] # should not recalculate if heartbeat not reached edge._last_updated = dt_ts() - heartbeat + 1 - assert edge.calculate(edge_conf['exchange']['pair_whitelist']) is False + assert edge.calculate(edge_conf["exchange"]["pair_whitelist"]) is False -def mocked_load_data(datadir, pairs=None, timeframe='0m', - timerange=None, *args, **kwargs): +def mocked_load_data(datadir, pairs=None, timeframe="0m", timerange=None, *args, **kwargs): if pairs is None: pairs = [] hz = 0.1 @@ -240,8 +267,10 @@ def mocked_load_data(datadir, pairs=None, timeframe='0m', math.sin(x * hz) / 1000 + base + 0.0001, math.sin(x * hz) / 1000 + base - 0.0001, math.sin(x * hz) / 1000 + base, - 123.45 - ] for x in range(0, 500)] + 123.45, + ] + for x in range(0, 500) + ] hz = 0.2 base = 0.002 @@ -252,36 +281,38 @@ def mocked_load_data(datadir, pairs=None, timeframe='0m', math.sin(x * hz) / 1000 + base + 0.0001, math.sin(x * hz) / 1000 + base - 0.0001, math.sin(x * hz) / 1000 + base, - 123.45 - ] for x in range(0, 500)] + 123.45, + ] + for x in range(0, 500) + ] - pairdata = {'NEO/BTC': ohlcv_to_dataframe(NEOBTC, '1h', pair="NEO/BTC", - fill_missing=True), - 'LTC/BTC': ohlcv_to_dataframe(LTCBTC, '1h', pair="LTC/BTC", - fill_missing=True)} + pairdata = { + "NEO/BTC": ohlcv_to_dataframe(NEOBTC, "1h", pair="NEO/BTC", fill_missing=True), + "LTC/BTC": ohlcv_to_dataframe(LTCBTC, "1h", pair="LTC/BTC", fill_missing=True), + } return pairdata def test_edge_process_downloaded_data(mocker, edge_conf): freqtrade = get_patched_freqtradebot(mocker, edge_conf) - mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.001)) - mocker.patch('freqtrade.edge.edge_positioning.refresh_data', MagicMock()) - mocker.patch('freqtrade.edge.edge_positioning.load_data', mocked_load_data) + mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.001)) + mocker.patch("freqtrade.edge.edge_positioning.refresh_data", MagicMock()) + mocker.patch("freqtrade.edge.edge_positioning.load_data", mocked_load_data) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - assert edge.calculate(edge_conf['exchange']['pair_whitelist']) + assert edge.calculate(edge_conf["exchange"]["pair_whitelist"]) assert len(edge._cached_pairs) == 2 assert edge._last_updated <= dt_ts() + 2 def test_edge_process_no_data(mocker, edge_conf, caplog): freqtrade = get_patched_freqtradebot(mocker, edge_conf) - mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.001)) - mocker.patch('freqtrade.edge.edge_positioning.refresh_data', MagicMock()) - mocker.patch('freqtrade.edge.edge_positioning.load_data', MagicMock(return_value={})) + mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.001)) + mocker.patch("freqtrade.edge.edge_positioning.refresh_data", MagicMock()) + mocker.patch("freqtrade.edge.edge_positioning.load_data", MagicMock(return_value={})) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - assert not edge.calculate(edge_conf['exchange']['pair_whitelist']) + assert not edge.calculate(edge_conf["exchange"]["pair_whitelist"]) assert len(edge._cached_pairs) == 0 assert log_has("No data found. Edge is stopped ...", caplog) assert edge._last_updated == 0 @@ -289,50 +320,55 @@ def test_edge_process_no_data(mocker, edge_conf, caplog): def test_edge_process_no_trades(mocker, edge_conf, caplog): freqtrade = get_patched_freqtradebot(mocker, edge_conf) - mocker.patch(f'{EXMS}.get_fee', return_value=0.001) - mocker.patch('freqtrade.edge.edge_positioning.refresh_data', ) - mocker.patch('freqtrade.edge.edge_positioning.load_data', mocked_load_data) + mocker.patch(f"{EXMS}.get_fee", return_value=0.001) + mocker.patch( + "freqtrade.edge.edge_positioning.refresh_data", + ) + mocker.patch("freqtrade.edge.edge_positioning.load_data", mocked_load_data) # Return empty - mocker.patch('freqtrade.edge.Edge._find_trades_for_stoploss_range', return_value=[]) + mocker.patch("freqtrade.edge.Edge._find_trades_for_stoploss_range", return_value=[]) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) - assert not edge.calculate(edge_conf['exchange']['pair_whitelist']) + assert not edge.calculate(edge_conf["exchange"]["pair_whitelist"]) assert len(edge._cached_pairs) == 0 assert log_has("No trades found.", caplog) def test_edge_process_no_pairs(mocker, edge_conf, caplog): - edge_conf['exchange']['pair_whitelist'] = [] - mocker.patch('freqtrade.freqtradebot.validate_config_consistency') + edge_conf["exchange"]["pair_whitelist"] = [] + mocker.patch("freqtrade.freqtradebot.validate_config_consistency") freqtrade = get_patched_freqtradebot(mocker, edge_conf) - fee_mock = mocker.patch(f'{EXMS}.get_fee', return_value=0.001) - mocker.patch('freqtrade.edge.edge_positioning.refresh_data') - mocker.patch('freqtrade.edge.edge_positioning.load_data', mocked_load_data) + fee_mock = mocker.patch(f"{EXMS}.get_fee", return_value=0.001) + mocker.patch("freqtrade.edge.edge_positioning.refresh_data") + mocker.patch("freqtrade.edge.edge_positioning.load_data", mocked_load_data) # Return empty - mocker.patch('freqtrade.edge.Edge._find_trades_for_stoploss_range', return_value=[]) + mocker.patch("freqtrade.edge.Edge._find_trades_for_stoploss_range", return_value=[]) edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) assert fee_mock.call_count == 0 assert edge.fee is None - assert not edge.calculate(['XRP/USDT']) + assert not edge.calculate(["XRP/USDT"]) assert fee_mock.call_count == 1 assert edge.fee == 0.001 -def test_edge_init_error(mocker, edge_conf,): - edge_conf['stake_amount'] = 0.5 - mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.001)) - with pytest.raises(OperationalException, match='Edge works only with unlimited stake amount'): +def test_edge_init_error(mocker, edge_conf): + edge_conf["stake_amount"] = 0.5 + mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.001)) + with pytest.raises(OperationalException, match="Edge works only with unlimited stake amount"): get_patched_freqtradebot(mocker, edge_conf) -@pytest.mark.parametrize("fee,risk_reward_ratio,expectancy", [ - (0.0005, 306.5384615384, 101.5128205128), - (0.001, 152.6923076923, 50.2307692308), -]) +@pytest.mark.parametrize( + "fee,risk_reward_ratio,expectancy", + [ + (0.0005, 306.5384615384, 101.5128205128), + (0.001, 152.6923076923, 50.2307692308), + ], +) def test_process_expectancy(mocker, edge_conf, fee, risk_reward_ratio, expectancy): - edge_conf['edge']['min_trade_number'] = 2 + edge_conf["edge"]["min_trade_number"] = 2 freqtrade = get_patched_freqtradebot(mocker, edge_conf) def get_fee(*args, **kwargs): @@ -342,38 +378,42 @@ def test_process_expectancy(mocker, edge_conf, fee, risk_reward_ratio, expectanc edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), - 'trade_duration': '', - 'open_rate': 17, - 'close_rate': 17, - 'exit_type': 'exit_signal'}, - - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'trade_duration': '', - 'open_rate': 20, - 'close_rate': 20, - 'exit_type': 'exit_signal'}, - - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), - 'trade_duration': '', - 'open_rate': 26, - 'close_rate': 34, - 'exit_type': 'exit_signal'} + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:05:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:10:00.000000000"), + "trade_duration": "", + "open_rate": 17, + "close_rate": 17, + "exit_type": "exit_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "trade_duration": "", + "open_rate": 20, + "close_rate": 20, + "exit_type": "exit_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:30:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:40:00.000000000"), + "trade_duration": "", + "open_rate": 26, + "close_rate": 34, + "exit_type": "exit_signal", + }, ] trades_df = DataFrame(trades) @@ -381,12 +421,12 @@ def test_process_expectancy(mocker, edge_conf, fee, risk_reward_ratio, expectanc final = edge._process_expectancy(trades_df) assert len(final) == 1 - assert 'TEST/BTC' in final - assert final['TEST/BTC'].stoploss == -0.9 - assert round(final['TEST/BTC'].winrate, 10) == 0.3333333333 - assert round(final['TEST/BTC'].risk_reward_ratio, 10) == risk_reward_ratio - assert round(final['TEST/BTC'].required_risk_reward, 10) == 2.0 - assert round(final['TEST/BTC'].expectancy, 10) == expectancy + assert "TEST/BTC" in final + assert final["TEST/BTC"].stoploss == -0.9 + assert round(final["TEST/BTC"].winrate, 10) == 0.3333333333 + assert round(final["TEST/BTC"].risk_reward_ratio, 10) == risk_reward_ratio + assert round(final["TEST/BTC"].required_risk_reward, 10) == 2.0 + assert round(final["TEST/BTC"].expectancy, 10) == expectancy # Pop last item so no trade is profitable trades.pop() @@ -397,154 +437,170 @@ def test_process_expectancy(mocker, edge_conf, fee, risk_reward_ratio, expectanc assert isinstance(final, dict) -def test_process_expectancy_remove_pumps(mocker, edge_conf, fee,): - edge_conf['edge']['min_trade_number'] = 2 - edge_conf['edge']['remove_pumps'] = True +def test_process_expectancy_remove_pumps(mocker, edge_conf, fee): + edge_conf["edge"]["min_trade_number"] = 2 + edge_conf["edge"]["remove_pumps"] = True freqtrade = get_patched_freqtradebot(mocker, edge_conf) freqtrade.exchange.get_fee = fee edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), - 'open_index': 1, - 'close_index': 1, - 'trade_duration': '', - 'open_rate': 17, - 'close_rate': 15, - 'exit_type': 'sell_signal'}, - - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'open_index': 4, - 'close_index': 4, - 'trade_duration': '', - 'open_rate': 20, - 'close_rate': 10, - 'exit_type': 'sell_signal'}, - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'open_index': 4, - 'close_index': 4, - 'trade_duration': '', - 'open_rate': 20, - 'close_rate': 10, - 'exit_type': 'sell_signal'}, - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'open_index': 4, - 'close_index': 4, - 'trade_duration': '', - 'open_rate': 20, - 'close_rate': 10, - 'exit_type': 'sell_signal'}, - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'open_index': 4, - 'close_index': 4, - 'trade_duration': '', - 'open_rate': 20, - 'close_rate': 10, - 'exit_type': 'sell_signal'}, - - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), - 'open_index': 6, - 'close_index': 7, - 'trade_duration': '', - 'open_rate': 26, - 'close_rate': 134, - 'exit_type': 'sell_signal'} + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:05:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:10:00.000000000"), + "open_index": 1, + "close_index": 1, + "trade_duration": "", + "open_rate": 17, + "close_rate": 15, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "open_index": 4, + "close_index": 4, + "trade_duration": "", + "open_rate": 20, + "close_rate": 10, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "open_index": 4, + "close_index": 4, + "trade_duration": "", + "open_rate": 20, + "close_rate": 10, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "open_index": 4, + "close_index": 4, + "trade_duration": "", + "open_rate": 20, + "close_rate": 10, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "open_index": 4, + "close_index": 4, + "trade_duration": "", + "open_rate": 20, + "close_rate": 10, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:30:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:40:00.000000000"), + "open_index": 6, + "close_index": 7, + "trade_duration": "", + "open_rate": 26, + "close_rate": 134, + "exit_type": "sell_signal", + }, ] trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) - assert 'TEST/BTC' in final - assert final['TEST/BTC'].stoploss == -0.9 - assert final['TEST/BTC'].nb_trades == len(trades_df) - 1 - assert round(final['TEST/BTC'].winrate, 10) == 0.0 + assert "TEST/BTC" in final + assert final["TEST/BTC"].stoploss == -0.9 + assert final["TEST/BTC"].nb_trades == len(trades_df) - 1 + assert round(final["TEST/BTC"].winrate, 10) == 0.0 -def test_process_expectancy_only_wins(mocker, edge_conf, fee,): - edge_conf['edge']['min_trade_number'] = 2 +def test_process_expectancy_only_wins(mocker, edge_conf, fee): + edge_conf["edge"]["min_trade_number"] = 2 freqtrade = get_patched_freqtradebot(mocker, edge_conf) freqtrade.exchange.get_fee = fee edge = Edge(edge_conf, freqtrade.exchange, freqtrade.strategy) trades = [ - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:05:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:10:00.000000000'), - 'open_index': 1, - 'close_index': 1, - 'trade_duration': '', - 'open_rate': 15, - 'close_rate': 17, - 'exit_type': 'sell_signal'}, - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:20:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:25:00.000000000'), - 'open_index': 4, - 'close_index': 4, - 'trade_duration': '', - 'open_rate': 10, - 'close_rate': 20, - 'exit_type': 'sell_signal'}, - {'pair': 'TEST/BTC', - 'stoploss': -0.9, - 'profit_percent': '', - 'profit_abs': '', - 'open_date': np.datetime64('2018-10-03T00:30:00.000000000'), - 'close_date': np.datetime64('2018-10-03T00:40:00.000000000'), - 'open_index': 6, - 'close_index': 7, - 'trade_duration': '', - 'open_rate': 26, - 'close_rate': 134, - 'exit_type': 'sell_signal'} + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:05:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:10:00.000000000"), + "open_index": 1, + "close_index": 1, + "trade_duration": "", + "open_rate": 15, + "close_rate": 17, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:20:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:25:00.000000000"), + "open_index": 4, + "close_index": 4, + "trade_duration": "", + "open_rate": 10, + "close_rate": 20, + "exit_type": "sell_signal", + }, + { + "pair": "TEST/BTC", + "stoploss": -0.9, + "profit_percent": "", + "profit_abs": "", + "open_date": np.datetime64("2018-10-03T00:30:00.000000000"), + "close_date": np.datetime64("2018-10-03T00:40:00.000000000"), + "open_index": 6, + "close_index": 7, + "trade_duration": "", + "open_rate": 26, + "close_rate": 134, + "exit_type": "sell_signal", + }, ] trades_df = DataFrame(trades) trades_df = edge._fill_calculable_fields(trades_df) final = edge._process_expectancy(trades_df) - assert 'TEST/BTC' in final - assert final['TEST/BTC'].stoploss == -0.9 - assert final['TEST/BTC'].nb_trades == len(trades_df) - assert round(final['TEST/BTC'].winrate, 10) == 1.0 - assert round(final['TEST/BTC'].risk_reward_ratio, 10) == float('inf') - assert round(final['TEST/BTC'].expectancy, 10) == float('inf') + assert "TEST/BTC" in final + assert final["TEST/BTC"].stoploss == -0.9 + assert final["TEST/BTC"].nb_trades == len(trades_df) + assert round(final["TEST/BTC"].winrate, 10) == 1.0 + assert round(final["TEST/BTC"].risk_reward_ratio, 10) == float("inf") + assert round(final["TEST/BTC"].expectancy, 10) == float("inf") diff --git a/tests/exchange/test_binance.py b/tests/exchange/test_binance.py index 625033645..b961c2809 100644 --- a/tests/exchange/test_binance.py +++ b/tests/exchange/test_binance.py @@ -11,167 +11,160 @@ from tests.conftest import EXMS, get_mock_coro, get_patched_exchange, log_has_re from tests.exchange.test_exchange import ccxt_exceptionhandlers -@pytest.mark.parametrize('side,type,time_in_force,expected', [ - ('buy', 'limit', 'gtc', {'timeInForce': 'GTC'}), - ('buy', 'limit', 'IOC', {'timeInForce': 'IOC'}), - ('buy', 'market', 'IOC', {}), - ('buy', 'limit', 'PO', {'timeInForce': 'PO'}), - ('sell', 'limit', 'PO', {'timeInForce': 'PO'}), - ('sell', 'market', 'PO', {}), - ]) +@pytest.mark.parametrize( + "side,type,time_in_force,expected", + [ + ("buy", "limit", "gtc", {"timeInForce": "GTC"}), + ("buy", "limit", "IOC", {"timeInForce": "IOC"}), + ("buy", "market", "IOC", {}), + ("buy", "limit", "PO", {"timeInForce": "PO"}), + ("sell", "limit", "PO", {"timeInForce": "PO"}), + ("sell", "market", "PO", {}), + ], +) def test__get_params_binance(default_conf, mocker, side, type, time_in_force, expected): - exchange = get_patched_exchange(mocker, default_conf, id='binance') + exchange = get_patched_exchange(mocker, default_conf, id="binance") assert exchange._get_params(side, type, 1, False, time_in_force) == expected -@pytest.mark.parametrize('trademode', [TradingMode.FUTURES, TradingMode.SPOT]) -@pytest.mark.parametrize('limitratio,expected,side', [ - (None, 220 * 0.99, "sell"), - (0.99, 220 * 0.99, "sell"), - (0.98, 220 * 0.98, "sell"), - (None, 220 * 1.01, "buy"), - (0.99, 220 * 1.01, "buy"), - (0.98, 220 * 1.02, "buy"), -]) +@pytest.mark.parametrize("trademode", [TradingMode.FUTURES, TradingMode.SPOT]) +@pytest.mark.parametrize( + "limitratio,expected,side", + [ + (None, 220 * 0.99, "sell"), + (0.99, 220 * 0.99, "sell"), + (0.98, 220 * 0.98, "sell"), + (None, 220 * 1.01, "buy"), + (0.99, 220 * 1.01, "buy"), + (0.98, 220 * 1.02, "buy"), + ], +) def test_create_stoploss_order_binance(default_conf, mocker, limitratio, expected, side, trademode): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' - order_type = 'stop_loss_limit' if trademode == TradingMode.SPOT else 'stop' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" + order_type = "stop_loss_limit" if trademode == TradingMode.SPOT else "stop" - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False - default_conf['margin_mode'] = MarginMode.ISOLATED - default_conf['trading_mode'] = trademode - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}}) + default_conf["dry_run"] = False + default_conf["margin_mode"] = MarginMode.ISOLATED + default_conf["trading_mode"] = trademode + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'binance') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "binance") with pytest.raises(InvalidOrderException): order = exchange.create_stoploss( - pair='ETH/BTC', + pair="ETH/BTC", amount=1, stop_price=190, side=side, - order_types={'stoploss': 'limit', 'stoploss_on_exchange_limit_ratio': 1.05}, - leverage=1.0 + order_types={"stoploss": "limit", "stoploss_on_exchange_limit_ratio": 1.05}, + leverage=1.0, ) api_mock.create_order.reset_mock() - order_types = {'stoploss': 'limit', 'stoploss_price_type': 'mark'} + order_types = {"stoploss": "limit", "stoploss_price_type": "mark"} if limitratio is not None: - order_types.update({'stoploss_on_exchange_limit_ratio': limitratio}) + order_types.update({"stoploss_on_exchange_limit_ratio": limitratio}) order = exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types=order_types, - side=side, - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types=order_types, side=side, leverage=1.0 ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC' - assert api_mock.create_order.call_args_list[0][1]['type'] == order_type - assert api_mock.create_order.call_args_list[0][1]['side'] == side - assert api_mock.create_order.call_args_list[0][1]['amount'] == 1 + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args_list[0][1]["symbol"] == "ETH/BTC" + assert api_mock.create_order.call_args_list[0][1]["type"] == order_type + assert api_mock.create_order.call_args_list[0][1]["side"] == side + assert api_mock.create_order.call_args_list[0][1]["amount"] == 1 # Price should be 1% below stopprice - assert api_mock.create_order.call_args_list[0][1]['price'] == expected + assert api_mock.create_order.call_args_list[0][1]["price"] == expected if trademode == TradingMode.SPOT: - params_dict = {'stopPrice': 220} + params_dict = {"stopPrice": 220} else: - params_dict = {'stopPrice': 220, 'reduceOnly': True, 'workingType': 'MARK_PRICE'} - assert api_mock.create_order.call_args_list[0][1]['params'] == params_dict + params_dict = {"stopPrice": 220, "reduceOnly": True, "workingType": "MARK_PRICE"} + assert api_mock.create_order.call_args_list[0][1]["params"] == params_dict # test exception handling with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'binance') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "binance") exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side=side, - leverage=1.0) + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) with pytest.raises(InvalidOrderException): api_mock.create_order = MagicMock( - side_effect=ccxt.InvalidOrder("binance Order would trigger immediately.")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'binance') + side_effect=ccxt.InvalidOrder("binance Order would trigger immediately.") + ) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "binance") exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side=side, - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 ) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, "binance", - "create_stoploss", "create_order", retries=1, - pair='ETH/BTC', amount=1, stop_price=220, order_types={}, - side=side, leverage=1.0) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + "binance", + "create_stoploss", + "create_order", + retries=1, + pair="ETH/BTC", + amount=1, + stop_price=220, + order_types={}, + side=side, + leverage=1.0, + ) def test_create_stoploss_order_dry_run_binance(default_conf, mocker): api_mock = MagicMock() - order_type = 'stop_loss_limit' - default_conf['dry_run'] = True - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + order_type = "stop_loss_limit" + default_conf["dry_run"] = True + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'binance') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "binance") with pytest.raises(InvalidOrderException): order = exchange.create_stoploss( - pair='ETH/BTC', + pair="ETH/BTC", amount=1, stop_price=190, side="sell", - order_types={'stoploss_on_exchange_limit_ratio': 1.05}, - leverage=1.0 + order_types={"stoploss_on_exchange_limit_ratio": 1.05}, + leverage=1.0, ) api_mock.create_order.reset_mock() order = exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side="sell", - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side="sell", leverage=1.0 ) - assert 'id' in order - assert 'info' in order - assert 'type' in order + assert "id" in order + assert "info" in order + assert "type" in order - assert order['type'] == order_type - assert order['price'] == 220 - assert order['amount'] == 1 + assert order["type"] == order_type + assert order["price"] == 220 + assert order["amount"] == 1 -@pytest.mark.parametrize('sl1,sl2,sl3,side', [ - (1501, 1499, 1501, "sell"), - (1499, 1501, 1499, "buy") -]) +@pytest.mark.parametrize( + "sl1,sl2,sl3,side", [(1501, 1499, 1501, "sell"), (1499, 1501, 1499, "buy")] +) def test_stoploss_adjust_binance(mocker, default_conf, sl1, sl2, sl3, side): - exchange = get_patched_exchange(mocker, default_conf, id='binance') + exchange = get_patched_exchange(mocker, default_conf, id="binance") order = { - 'type': 'stop_loss_limit', - 'price': 1500, - 'stopPrice': 1500, - 'info': {'stopPrice': 1500}, + "type": "stop_loss_limit", + "price": 1500, + "stopPrice": 1500, + "info": {"stopPrice": 1500}, } assert exchange.stoploss_adjust(sl1, order, side=side) assert not exchange.stoploss_adjust(sl2, order, side=side) @@ -179,314 +172,316 @@ def test_stoploss_adjust_binance(mocker, default_conf, sl1, sl2, sl3, side): def test_fill_leverage_tiers_binance(default_conf, mocker): api_mock = MagicMock() - api_mock.fetch_leverage_tiers = MagicMock(return_value={ - 'ADA/BUSD': [ - { - "tier": 1, - "minNotional": 0, - "maxNotional": 100000, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20, - "info": { - "bracket": "1", - "initialLeverage": "20", - "maxNotional": "100000", - "minNotional": "0", - "maintMarginRatio": "0.025", - "cum": "0.0" - } - }, - { - "tier": 2, - "minNotional": 100000, - "maxNotional": 500000, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10, - "info": { - "bracket": "2", - "initialLeverage": "10", - "maxNotional": "500000", - "minNotional": "100000", - "maintMarginRatio": "0.05", - "cum": "2500.0" - } - }, - { - "tier": 3, - "minNotional": 500000, - "maxNotional": 1000000, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5, - "info": { - "bracket": "3", - "initialLeverage": "5", - "maxNotional": "1000000", - "minNotional": "500000", - "maintMarginRatio": "0.1", - "cum": "27500.0" - } - }, - { - "tier": 4, - "minNotional": 1000000, - "maxNotional": 2000000, - "maintenanceMarginRate": 0.15, - "maxLeverage": 3, - "info": { - "bracket": "4", - "initialLeverage": "3", - "maxNotional": "2000000", - "minNotional": "1000000", - "maintMarginRatio": "0.15", - "cum": "77500.0" - } - }, - { - "tier": 5, - "minNotional": 2000000, - "maxNotional": 5000000, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2, - "info": { - "bracket": "5", - "initialLeverage": "2", - "maxNotional": "5000000", - "minNotional": "2000000", - "maintMarginRatio": "0.25", - "cum": "277500.0" - } - }, - { - "tier": 6, - "minNotional": 5000000, - "maxNotional": 30000000, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1, - "info": { - "bracket": "6", - "initialLeverage": "1", - "maxNotional": "30000000", - "minNotional": "5000000", - "maintMarginRatio": "0.5", - "cum": "1527500.0" - } - } - ], - "ZEC/USDT": [ - { - "tier": 1, - "minNotional": 0, - "maxNotional": 50000, - "maintenanceMarginRate": 0.01, - "maxLeverage": 50, - "info": { - "bracket": "1", - "initialLeverage": "50", - "maxNotional": "50000", - "minNotional": "0", - "maintMarginRatio": "0.01", - "cum": "0.0" - } - }, - { - "tier": 2, - "minNotional": 50000, - "maxNotional": 150000, - "maintenanceMarginRate": 0.025, - "maxLeverage": 20, - "info": { - "bracket": "2", - "initialLeverage": "20", - "maxNotional": "150000", - "minNotional": "50000", - "maintMarginRatio": "0.025", - "cum": "750.0" - } - }, - { - "tier": 3, - "minNotional": 150000, - "maxNotional": 250000, - "maintenanceMarginRate": 0.05, - "maxLeverage": 10, - "info": { - "bracket": "3", - "initialLeverage": "10", - "maxNotional": "250000", - "minNotional": "150000", - "maintMarginRatio": "0.05", - "cum": "4500.0" - } - }, - { - "tier": 4, - "minNotional": 250000, - "maxNotional": 500000, - "maintenanceMarginRate": 0.1, - "maxLeverage": 5, - "info": { - "bracket": "4", - "initialLeverage": "5", - "maxNotional": "500000", - "minNotional": "250000", - "maintMarginRatio": "0.1", - "cum": "17000.0" - } - }, - { - "tier": 5, - "minNotional": 500000, - "maxNotional": 1000000, - "maintenanceMarginRate": 0.125, - "maxLeverage": 4, - "info": { - "bracket": "5", - "initialLeverage": "4", - "maxNotional": "1000000", - "minNotional": "500000", - "maintMarginRatio": "0.125", - "cum": "29500.0" - } - }, - { - "tier": 6, - "minNotional": 1000000, - "maxNotional": 2000000, - "maintenanceMarginRate": 0.25, - "maxLeverage": 2, - "info": { - "bracket": "6", - "initialLeverage": "2", - "maxNotional": "2000000", - "minNotional": "1000000", - "maintMarginRatio": "0.25", - "cum": "154500.0" - } - }, - { - "tier": 7, - "minNotional": 2000000, - "maxNotional": 30000000, - "maintenanceMarginRate": 0.5, - "maxLeverage": 1, - "info": { - "bracket": "7", - "initialLeverage": "1", - "maxNotional": "30000000", - "minNotional": "2000000", - "maintMarginRatio": "0.5", - "cum": "654500.0" - } - } - ], - }) - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + api_mock.fetch_leverage_tiers = MagicMock( + return_value={ + "ADA/BUSD": [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 100000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "info": { + "bracket": "1", + "initialLeverage": "20", + "maxNotional": "100000", + "minNotional": "0", + "maintMarginRatio": "0.025", + "cum": "0.0", + }, + }, + { + "tier": 2, + "minNotional": 100000, + "maxNotional": 500000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "info": { + "bracket": "2", + "initialLeverage": "10", + "maxNotional": "500000", + "minNotional": "100000", + "maintMarginRatio": "0.05", + "cum": "2500.0", + }, + }, + { + "tier": 3, + "minNotional": 500000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "info": { + "bracket": "3", + "initialLeverage": "5", + "maxNotional": "1000000", + "minNotional": "500000", + "maintMarginRatio": "0.1", + "cum": "27500.0", + }, + }, + { + "tier": 4, + "minNotional": 1000000, + "maxNotional": 2000000, + "maintenanceMarginRate": 0.15, + "maxLeverage": 3, + "info": { + "bracket": "4", + "initialLeverage": "3", + "maxNotional": "2000000", + "minNotional": "1000000", + "maintMarginRatio": "0.15", + "cum": "77500.0", + }, + }, + { + "tier": 5, + "minNotional": 2000000, + "maxNotional": 5000000, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2, + "info": { + "bracket": "5", + "initialLeverage": "2", + "maxNotional": "5000000", + "minNotional": "2000000", + "maintMarginRatio": "0.25", + "cum": "277500.0", + }, + }, + { + "tier": 6, + "minNotional": 5000000, + "maxNotional": 30000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "info": { + "bracket": "6", + "initialLeverage": "1", + "maxNotional": "30000000", + "minNotional": "5000000", + "maintMarginRatio": "0.5", + "cum": "1527500.0", + }, + }, + ], + "ZEC/USDT": [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 50000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50, + "info": { + "bracket": "1", + "initialLeverage": "50", + "maxNotional": "50000", + "minNotional": "0", + "maintMarginRatio": "0.01", + "cum": "0.0", + }, + }, + { + "tier": 2, + "minNotional": 50000, + "maxNotional": 150000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "info": { + "bracket": "2", + "initialLeverage": "20", + "maxNotional": "150000", + "minNotional": "50000", + "maintMarginRatio": "0.025", + "cum": "750.0", + }, + }, + { + "tier": 3, + "minNotional": 150000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "info": { + "bracket": "3", + "initialLeverage": "10", + "maxNotional": "250000", + "minNotional": "150000", + "maintMarginRatio": "0.05", + "cum": "4500.0", + }, + }, + { + "tier": 4, + "minNotional": 250000, + "maxNotional": 500000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "info": { + "bracket": "4", + "initialLeverage": "5", + "maxNotional": "500000", + "minNotional": "250000", + "maintMarginRatio": "0.1", + "cum": "17000.0", + }, + }, + { + "tier": 5, + "minNotional": 500000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4, + "info": { + "bracket": "5", + "initialLeverage": "4", + "maxNotional": "1000000", + "minNotional": "500000", + "maintMarginRatio": "0.125", + "cum": "29500.0", + }, + }, + { + "tier": 6, + "minNotional": 1000000, + "maxNotional": 2000000, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2, + "info": { + "bracket": "6", + "initialLeverage": "2", + "maxNotional": "2000000", + "minNotional": "1000000", + "maintMarginRatio": "0.25", + "cum": "154500.0", + }, + }, + { + "tier": 7, + "minNotional": 2000000, + "maxNotional": 30000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "info": { + "bracket": "7", + "initialLeverage": "1", + "maxNotional": "30000000", + "minNotional": "2000000", + "maintMarginRatio": "0.5", + "cum": "654500.0", + }, + }, + ], + } + ) + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED exchange = get_patched_exchange(mocker, default_conf, api_mock, id="binance") exchange.fill_leverage_tiers() assert exchange._leverage_tiers == { - 'ADA/BUSD': [ + "ADA/BUSD": [ { "minNotional": 0, "maxNotional": 100000, "maintenanceMarginRate": 0.025, "maxLeverage": 20, - "maintAmt": 0.0 + "maintAmt": 0.0, }, { "minNotional": 100000, "maxNotional": 500000, "maintenanceMarginRate": 0.05, "maxLeverage": 10, - "maintAmt": 2500.0 + "maintAmt": 2500.0, }, { "minNotional": 500000, "maxNotional": 1000000, "maintenanceMarginRate": 0.1, "maxLeverage": 5, - "maintAmt": 27500.0 + "maintAmt": 27500.0, }, { "minNotional": 1000000, "maxNotional": 2000000, "maintenanceMarginRate": 0.15, "maxLeverage": 3, - "maintAmt": 77500.0 + "maintAmt": 77500.0, }, { "minNotional": 2000000, "maxNotional": 5000000, "maintenanceMarginRate": 0.25, "maxLeverage": 2, - "maintAmt": 277500.0 + "maintAmt": 277500.0, }, { "minNotional": 5000000, "maxNotional": 30000000, "maintenanceMarginRate": 0.5, "maxLeverage": 1, - "maintAmt": 1527500.0 - } + "maintAmt": 1527500.0, + }, ], "ZEC/USDT": [ { - 'minNotional': 0, - 'maxNotional': 50000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 50, - 'maintAmt': 0.0 + "minNotional": 0, + "maxNotional": 50000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 50, + "maintAmt": 0.0, }, { - 'minNotional': 50000, - 'maxNotional': 150000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 20, - 'maintAmt': 750.0 + "minNotional": 50000, + "maxNotional": 150000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 20, + "maintAmt": 750.0, }, { - 'minNotional': 150000, - 'maxNotional': 250000, - 'maintenanceMarginRate': 0.05, - 'maxLeverage': 10, - 'maintAmt': 4500.0 + "minNotional": 150000, + "maxNotional": 250000, + "maintenanceMarginRate": 0.05, + "maxLeverage": 10, + "maintAmt": 4500.0, }, { - 'minNotional': 250000, - 'maxNotional': 500000, - 'maintenanceMarginRate': 0.1, - 'maxLeverage': 5, - 'maintAmt': 17000.0 + "minNotional": 250000, + "maxNotional": 500000, + "maintenanceMarginRate": 0.1, + "maxLeverage": 5, + "maintAmt": 17000.0, }, { - 'minNotional': 500000, - 'maxNotional': 1000000, - 'maintenanceMarginRate': 0.125, - 'maxLeverage': 4, - 'maintAmt': 29500.0 + "minNotional": 500000, + "maxNotional": 1000000, + "maintenanceMarginRate": 0.125, + "maxLeverage": 4, + "maintAmt": 29500.0, }, { - 'minNotional': 1000000, - 'maxNotional': 2000000, - 'maintenanceMarginRate': 0.25, - 'maxLeverage': 2, - 'maintAmt': 154500.0 + "minNotional": 1000000, + "maxNotional": 2000000, + "maintenanceMarginRate": 0.25, + "maxLeverage": 2, + "maintAmt": 154500.0, }, { - 'minNotional': 2000000, - 'maxNotional': 30000000, - 'maintenanceMarginRate': 0.5, - 'maxLeverage': 1, - 'maintAmt': 654500.0 + "minNotional": 2000000, + "maxNotional": 30000000, + "maintenanceMarginRate": 0.5, + "maxLeverage": 1, + "maintAmt": 654500.0, }, - ] + ], } api_mock = MagicMock() api_mock.load_leverage_tiers = MagicMock() - type(api_mock).has = PropertyMock(return_value={'fetchLeverageTiers': True}) + type(api_mock).has = PropertyMock(return_value={"fetchLeverageTiers": True}) ccxt_exceptionhandlers( mocker, @@ -500,8 +495,8 @@ def test_fill_leverage_tiers_binance(default_conf, mocker): def test_fill_leverage_tiers_binance_dryrun(default_conf, mocker, leverage_tiers): api_mock = MagicMock() - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED exchange = get_patched_exchange(mocker, default_conf, api_mock, id="binance") exchange.fill_leverage_tiers() assert len(exchange._leverage_tiers.keys()) > 100 @@ -516,35 +511,42 @@ def test_additional_exchange_init_binance(default_conf, mocker): api_mock = MagicMock() api_mock.fapiPrivateGetPositionSideDual = MagicMock(return_value={"dualSidePosition": True}) api_mock.fapiPrivateGetMultiAssetsMargin = MagicMock(return_value={"multiAssetsMargin": True}) - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED - with pytest.raises(OperationalException, - match=r"Hedge Mode is not supported.*\nMulti-Asset Mode is not supported.*"): + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED + with pytest.raises( + OperationalException, + match=r"Hedge Mode is not supported.*\nMulti-Asset Mode is not supported.*", + ): get_patched_exchange(mocker, default_conf, id="binance", api_mock=api_mock) api_mock.fapiPrivateGetPositionSideDual = MagicMock(return_value={"dualSidePosition": False}) api_mock.fapiPrivateGetMultiAssetsMargin = MagicMock(return_value={"multiAssetsMargin": False}) exchange = get_patched_exchange(mocker, default_conf, id="binance", api_mock=api_mock) assert exchange - ccxt_exceptionhandlers(mocker, default_conf, api_mock, 'binance', - "additional_exchange_init", "fapiPrivateGetPositionSideDual") + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + "binance", + "additional_exchange_init", + "fapiPrivateGetPositionSideDual", + ) def test__set_leverage_binance(mocker, default_conf): - api_mock = MagicMock() api_mock.set_leverage = MagicMock() - type(api_mock).has = PropertyMock(return_value={'setLeverage': True}) - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + type(api_mock).has = PropertyMock(return_value={"setLeverage": True}) + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED exchange = get_patched_exchange(mocker, default_conf, api_mock, id="binance") - exchange._set_leverage(3.2, 'BTC/USDT:USDT') + exchange._set_leverage(3.2, "BTC/USDT:USDT") assert api_mock.set_leverage.call_count == 1 # Leverage is rounded to 3. - assert api_mock.set_leverage.call_args_list[0][1]['leverage'] == 3 - assert api_mock.set_leverage.call_args_list[0][1]['symbol'] == 'BTC/USDT:USDT' + assert api_mock.set_leverage.call_args_list[0][1]["leverage"] == 3 + assert api_mock.set_leverage.call_args_list[0][1]["symbol"] == "BTC/USDT:USDT" ccxt_exceptionhandlers( mocker, @@ -559,7 +561,7 @@ def test__set_leverage_binance(mocker, default_conf): @pytest.mark.asyncio -@pytest.mark.parametrize('candle_type', [CandleType.MARK, '']) +@pytest.mark.parametrize("candle_type", [CandleType.MARK, ""]) async def test__async_get_historic_ohlcv_binance(default_conf, mocker, caplog, candle_type): ohlcv = [ [ @@ -572,22 +574,24 @@ async def test__async_get_historic_ohlcv_binance(default_conf, mocker, caplog, c ] ] - exchange = get_patched_exchange(mocker, default_conf, id='binance') + exchange = get_patched_exchange(mocker, default_conf, id="binance") # Monkey-patch async function exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - pair = 'ETH/BTC' + pair = "ETH/BTC" respair, restf, restype, res, _ = await exchange._async_get_historic_ohlcv( - pair, "5m", 1500000000000, is_new_pair=False, candle_type=candle_type) + pair, "5m", 1500000000000, is_new_pair=False, candle_type=candle_type + ) assert respair == pair - assert restf == '5m' + assert restf == "5m" assert restype == candle_type # Call with very old timestamp - causes tons of requests assert exchange._api_async.fetch_ohlcv.call_count > 400 # assert res == ohlcv exchange._api_async.fetch_ohlcv.reset_mock() _, _, _, res, _ = await exchange._async_get_historic_ohlcv( - pair, "5m", 1500000000000, is_new_pair=True, candle_type=candle_type) + pair, "5m", 1500000000000, is_new_pair=True, candle_type=candle_type + ) # Called twice - one "init" call - and one to get the actual data. assert exchange._api_async.fetch_ohlcv.call_count == 2 @@ -595,14 +599,17 @@ async def test__async_get_historic_ohlcv_binance(default_conf, mocker, caplog, c assert log_has_re(r"Candle-data for ETH/BTC available starting with .*", caplog) -@pytest.mark.parametrize('pair,nominal_value,mm_ratio,amt', [ - ("XRP/USDT:USDT", 0.0, 0.025, 0), - ("BNB/USDT:USDT", 100.0, 0.0065, 0), - ("BTC/USDT:USDT", 170.30, 0.004, 0), - ("XRP/USDT:USDT", 999999.9, 0.1, 27500.0), - ("BNB/USDT:USDT", 5000000.0, 0.15, 233035.0), - ("BTC/USDT:USDT", 600000000, 0.5, 1.997038E8), -]) +@pytest.mark.parametrize( + "pair,nominal_value,mm_ratio,amt", + [ + ("XRP/USDT:USDT", 0.0, 0.025, 0), + ("BNB/USDT:USDT", 100.0, 0.0065, 0), + ("BTC/USDT:USDT", 170.30, 0.004, 0), + ("XRP/USDT:USDT", 999999.9, 0.1, 27500.0), + ("BNB/USDT:USDT", 5000000.0, 0.15, 233035.0), + ("BTC/USDT:USDT", 600000000, 0.5, 1.997038e8), + ], +) def test_get_maintenance_ratio_and_amt_binance( default_conf, mocker, @@ -612,7 +619,7 @@ def test_get_maintenance_ratio_and_amt_binance( mm_ratio, amt, ): - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, id="binance") exchange._leverage_tiers = leverage_tiers (result_ratio, result_amt) = exchange.get_maintenance_ratio_and_amt(pair, nominal_value) diff --git a/tests/exchange/test_bitpanda.py b/tests/exchange/test_bitpanda.py index de44be986..83561b914 100644 --- a/tests/exchange/test_bitpanda.py +++ b/tests/exchange/test_bitpanda.py @@ -5,43 +5,50 @@ from tests.conftest import EXMS, get_patched_exchange def test_get_trades_for_order(default_conf, mocker): - exchange_name = 'bitpanda' - order_id = 'ABCD-ABCD' + exchange_name = "bitpanda" + order_id = "ABCD-ABCD" since = datetime(2018, 5, 5, 0, 0, 0) default_conf["dry_run"] = False - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) api_mock = MagicMock() - api_mock.fetch_my_trades = MagicMock(return_value=[{'id': 'TTR67E-3PFBD-76IISV', - 'order': 'ABCD-ABCD', - 'info': {'pair': 'XLTCZBTC', - 'time': 1519860024.4388, - 'type': 'buy', - 'ordertype': 'limit', - 'price': '20.00000', - 'cost': '38.62000', - 'fee': '0.06179', - 'vol': '5', - 'id': 'ABCD-ABCD'}, - 'timestamp': 1519860024438, - 'datetime': '2018-02-28T23:20:24.438Z', - 'symbol': 'LTC/BTC', - 'type': 'limit', - 'side': 'buy', - 'price': 165.0, - 'amount': 0.2340606, - 'fee': {'cost': 0.06179, 'currency': 'BTC'} - }]) + api_mock.fetch_my_trades = MagicMock( + return_value=[ + { + "id": "TTR67E-3PFBD-76IISV", + "order": "ABCD-ABCD", + "info": { + "pair": "XLTCZBTC", + "time": 1519860024.4388, + "type": "buy", + "ordertype": "limit", + "price": "20.00000", + "cost": "38.62000", + "fee": "0.06179", + "vol": "5", + "id": "ABCD-ABCD", + }, + "timestamp": 1519860024438, + "datetime": "2018-02-28T23:20:24.438Z", + "symbol": "LTC/BTC", + "type": "limit", + "side": "buy", + "price": 165.0, + "amount": 0.2340606, + "fee": {"cost": 0.06179, "currency": "BTC"}, + } + ] + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - orders = exchange.get_trades_for_order(order_id, 'LTC/BTC', since) + orders = exchange.get_trades_for_order(order_id, "LTC/BTC", since) assert len(orders) == 1 - assert orders[0]['price'] == 165 + assert orders[0]["price"] == 165 assert api_mock.fetch_my_trades.call_count == 1 # since argument should be assert isinstance(api_mock.fetch_my_trades.call_args[0][1], int) - assert api_mock.fetch_my_trades.call_args[0][0] == 'LTC/BTC' + assert api_mock.fetch_my_trades.call_args[0][0] == "LTC/BTC" # Same test twice, hardcoded number and doing the same calculation assert api_mock.fetch_my_trades.call_args[0][1] == 1525478395000 # bitpanda requires "to" argument. - assert 'to' in api_mock.fetch_my_trades.call_args[1]['params'] + assert "to" in api_mock.fetch_my_trades.call_args[1]["params"] diff --git a/tests/exchange/test_bybit.py b/tests/exchange/test_bybit.py index 556547d88..8f09b049d 100644 --- a/tests/exchange/test_bybit.py +++ b/tests/exchange/test_bybit.py @@ -11,9 +11,9 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers def test_additional_exchange_init_bybit(default_conf, mocker, caplog): - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED api_mock = MagicMock() api_mock.set_position_mode = MagicMock(return_value={"dualSidePosition": False}) api_mock.is_unified_enabled = MagicMock(return_value=[False, False]) @@ -35,82 +35,84 @@ def test_additional_exchange_init_bybit(default_conf, mocker, caplog): # assert api_mock.is_unified_enabled.call_count == 1 # assert exchange.unified_account is True - ccxt_exceptionhandlers(mocker, default_conf, api_mock, 'bybit', - "additional_exchange_init", "set_position_mode") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, "bybit", "additional_exchange_init", "set_position_mode" + ) async def test_bybit_fetch_funding_rate(default_conf, mocker): - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" api_mock = MagicMock() api_mock.fetch_funding_rate_history = get_mock_coro(return_value=[]) - exchange = get_patched_exchange(mocker, default_conf, id='bybit', api_mock=api_mock) + exchange = get_patched_exchange(mocker, default_conf, id="bybit", api_mock=api_mock) limit = 200 # Test fetch_funding_rate_history (current data) await exchange._fetch_funding_rate_history( - pair='BTC/USDT:USDT', - timeframe='4h', + pair="BTC/USDT:USDT", + timeframe="4h", limit=limit, - ) + ) assert api_mock.fetch_funding_rate_history.call_count == 1 - assert api_mock.fetch_funding_rate_history.call_args_list[0][0][0] == 'BTC/USDT:USDT' + assert api_mock.fetch_funding_rate_history.call_args_list[0][0][0] == "BTC/USDT:USDT" kwargs = api_mock.fetch_funding_rate_history.call_args_list[0][1] - assert kwargs['since'] is None + assert kwargs["since"] is None api_mock.fetch_funding_rate_history.reset_mock() since_ms = 1610000000000 # Test fetch_funding_rate_history (current data) await exchange._fetch_funding_rate_history( - pair='BTC/USDT:USDT', - timeframe='4h', + pair="BTC/USDT:USDT", + timeframe="4h", limit=limit, since_ms=since_ms, - ) + ) assert api_mock.fetch_funding_rate_history.call_count == 1 - assert api_mock.fetch_funding_rate_history.call_args_list[0][0][0] == 'BTC/USDT:USDT' + assert api_mock.fetch_funding_rate_history.call_args_list[0][0][0] == "BTC/USDT:USDT" kwargs = api_mock.fetch_funding_rate_history.call_args_list[0][1] - assert kwargs['since'] == since_ms + assert kwargs["since"] == since_ms def test_bybit_get_funding_fees(default_conf, mocker): now = datetime.now(timezone.utc) - exchange = get_patched_exchange(mocker, default_conf, id='bybit') + exchange = get_patched_exchange(mocker, default_conf, id="bybit") exchange._fetch_and_calculate_funding_fees = MagicMock() - exchange.get_funding_fees('BTC/USDT:USDT', 1, False, now) + exchange.get_funding_fees("BTC/USDT:USDT", 1, False, now) assert exchange._fetch_and_calculate_funding_fees.call_count == 0 - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - exchange = get_patched_exchange(mocker, default_conf, id='bybit') + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + exchange = get_patched_exchange(mocker, default_conf, id="bybit") exchange._fetch_and_calculate_funding_fees = MagicMock() - exchange.get_funding_fees('BTC/USDT:USDT', 1, False, now) + exchange.get_funding_fees("BTC/USDT:USDT", 1, False, now) assert exchange._fetch_and_calculate_funding_fees.call_count == 1 def test_bybit_fetch_orders(default_conf, mocker, limit_order): - api_mock = MagicMock() - api_mock.fetch_orders = MagicMock(return_value=[ - limit_order['buy'], - limit_order['sell'], - ]) - api_mock.fetch_open_orders = MagicMock(return_value=[limit_order['buy']]) - api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order['buy']]) + api_mock.fetch_orders = MagicMock( + return_value=[ + limit_order["buy"], + limit_order["sell"], + ] + ) + api_mock.fetch_open_orders = MagicMock(return_value=[limit_order["buy"]]) + api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order["buy"]]) - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) start_time = datetime.now(timezone.utc) - timedelta(days=20) - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='bybit') + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="bybit") # Not available in dry-run - assert exchange.fetch_orders('mocked', start_time) == [] + assert exchange.fetch_orders("mocked", start_time) == [] assert api_mock.fetch_orders.call_count == 0 - default_conf['dry_run'] = False + default_conf["dry_run"] = False - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='bybit') - res = exchange.fetch_orders('mocked', start_time) + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="bybit") + res = exchange.fetch_orders("mocked", start_time) # Bybit will call the endpoint 3 times, as it has a limit of 7 days per call assert api_mock.fetch_orders.call_count == 3 assert api_mock.fetch_open_orders.call_count == 0 @@ -119,53 +121,59 @@ def test_bybit_fetch_orders(default_conf, mocker, limit_order): def test_bybit_fetch_order_canceled_empty(default_conf_usdt, mocker): - default_conf_usdt['dry_run'] = False + default_conf_usdt["dry_run"] = False api_mock = MagicMock() - api_mock.fetch_order = MagicMock(return_value={ - 'id': '123', - 'symbol': 'BTC/USDT', - 'status': 'canceled', - 'filled': 0.0, - 'remaining': 0.0, - 'amount': 20.0, - }) + api_mock.fetch_order = MagicMock( + return_value={ + "id": "123", + "symbol": "BTC/USDT", + "status": "canceled", + "filled": 0.0, + "remaining": 0.0, + "amount": 20.0, + } + ) mocker.patch(f"{EXMS}.exchange_has", return_value=True) - exchange = get_patched_exchange(mocker, default_conf_usdt, api_mock, id='bybit') + exchange = get_patched_exchange(mocker, default_conf_usdt, api_mock, id="bybit") - res = exchange.fetch_order('123', 'BTC/USDT') - assert res['remaining'] is None - assert res['filled'] == 0.0 - assert res['amount'] == 20.0 - assert res['status'] == 'canceled' + res = exchange.fetch_order("123", "BTC/USDT") + assert res["remaining"] is None + assert res["filled"] == 0.0 + assert res["amount"] == 20.0 + assert res["status"] == "canceled" - api_mock.fetch_order = MagicMock(return_value={ - 'id': '123', - 'symbol': 'BTC/USDT', - 'status': 'canceled', - 'filled': 0.0, - 'remaining': 20.0, - 'amount': 20.0, - }) + api_mock.fetch_order = MagicMock( + return_value={ + "id": "123", + "symbol": "BTC/USDT", + "status": "canceled", + "filled": 0.0, + "remaining": 20.0, + "amount": 20.0, + } + ) # Don't touch orders which return correctly. - res1 = exchange.fetch_order('123', 'BTC/USDT') - assert res1['remaining'] == 20.0 - assert res1['filled'] == 0.0 - assert res1['amount'] == 20.0 - assert res1['status'] == 'canceled' + res1 = exchange.fetch_order("123", "BTC/USDT") + assert res1["remaining"] == 20.0 + assert res1["filled"] == 0.0 + assert res1["amount"] == 20.0 + assert res1["status"] == "canceled" # Reverse test - remaining is not touched - api_mock.fetch_order = MagicMock(return_value={ - 'id': '124', - 'symbol': 'BTC/USDT', - 'status': 'open', - 'filled': 0.0, - 'remaining': 20.0, - 'amount': 20.0, - }) - res2 = exchange.fetch_order('123', 'BTC/USDT') - assert res2['remaining'] == 20.0 - assert res2['filled'] == 0.0 - assert res2['amount'] == 20.0 - assert res2['status'] == 'open' + api_mock.fetch_order = MagicMock( + return_value={ + "id": "124", + "symbol": "BTC/USDT", + "status": "open", + "filled": 0.0, + "remaining": 20.0, + "amount": 20.0, + } + ) + res2 = exchange.fetch_order("123", "BTC/USDT") + assert res2["remaining"] == 20.0 + assert res2["filled"] == 0.0 + assert res2["amount"] == 20.0 + assert res2["status"] == "open" diff --git a/tests/exchange/test_exchange.py b/tests/exchange/test_exchange.py index c8293965a..520de1c0f 100644 --- a/tests/exchange/test_exchange.py +++ b/tests/exchange/test_exchange.py @@ -11,78 +11,110 @@ from numpy import NaN from pandas import DataFrame from freqtrade.enums import CandleType, MarginMode, RunMode, TradingMode -from freqtrade.exceptions import (ConfigurationError, DDosProtection, DependencyException, - ExchangeError, InsufficientFundsError, InvalidOrderException, - OperationalException, PricingError, TemporaryError) -from freqtrade.exchange import (Binance, Bybit, Exchange, Kraken, market_is_active, - timeframe_to_prev_date) -from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, API_RETRY_COUNT, - calculate_backoff, remove_exchange_credentials) +from freqtrade.exceptions import ( + ConfigurationError, + DDosProtection, + DependencyException, + ExchangeError, + InsufficientFundsError, + InvalidOrderException, + OperationalException, + PricingError, + TemporaryError, +) +from freqtrade.exchange import ( + Binance, + Bybit, + Exchange, + Kraken, + market_is_active, + timeframe_to_prev_date, +) +from freqtrade.exchange.common import ( + API_FETCH_ORDER_RETRY_COUNT, + API_RETRY_COUNT, + calculate_backoff, + remove_exchange_credentials, +) from freqtrade.resolvers.exchange_resolver import ExchangeResolver from freqtrade.util import dt_now, dt_ts -from tests.conftest import (EXMS, generate_test_data_raw, get_mock_coro, get_patched_exchange, - log_has, log_has_re, num_log_has_re) +from tests.conftest import ( + EXMS, + generate_test_data_raw, + get_mock_coro, + get_patched_exchange, + log_has, + log_has_re, + num_log_has_re, +) # Make sure to always keep one exchange here which is NOT subclassed!! -EXCHANGES = ['binance', 'kraken', 'gate', 'kucoin', 'bybit', 'okx'] +EXCHANGES = ["binance", "kraken", "gate", "kucoin", "bybit", "okx"] get_entry_rate_data = [ - ('other', 20, 19, 10, 0.0, 20), # Full ask side - ('ask', 20, 19, 10, 0.0, 20), # Full ask side - ('ask', 20, 19, 10, 1.0, 10), # Full last side - ('ask', 20, 19, 10, 0.5, 15), # Between ask and last - ('ask', 20, 19, 10, 0.7, 13), # Between ask and last - ('ask', 20, 19, 10, 0.3, 17), # Between ask and last - ('ask', 5, 6, 10, 1.0, 5), # last bigger than ask - ('ask', 5, 6, 10, 0.5, 5), # last bigger than ask - ('ask', 20, 19, 10, None, 20), # price_last_balance missing - ('ask', 10, 20, None, 0.5, 10), # last not available - uses ask - ('ask', 4, 5, None, 0.5, 4), # last not available - uses ask - ('ask', 4, 5, None, 1, 4), # last not available - uses ask - ('ask', 4, 5, None, 0, 4), # last not available - uses ask - ('same', 21, 20, 10, 0.0, 20), # Full bid side - ('bid', 21, 20, 10, 0.0, 20), # Full bid side - ('bid', 21, 20, 10, 1.0, 10), # Full last side - ('bid', 21, 20, 10, 0.5, 15), # Between bid and last - ('bid', 21, 20, 10, 0.7, 13), # Between bid and last - ('bid', 21, 20, 10, 0.3, 17), # Between bid and last - ('bid', 6, 5, 10, 1.0, 5), # last bigger than bid - ('bid', 21, 20, 10, None, 20), # price_last_balance missing - ('bid', 6, 5, 10, 0.5, 5), # last bigger than bid - ('bid', 21, 20, None, 0.5, 20), # last not available - uses bid - ('bid', 6, 5, None, 0.5, 5), # last not available - uses bid - ('bid', 6, 5, None, 1, 5), # last not available - uses bid - ('bid', 6, 5, None, 0, 5), # last not available - uses bid + ("other", 20, 19, 10, 0.0, 20), # Full ask side + ("ask", 20, 19, 10, 0.0, 20), # Full ask side + ("ask", 20, 19, 10, 1.0, 10), # Full last side + ("ask", 20, 19, 10, 0.5, 15), # Between ask and last + ("ask", 20, 19, 10, 0.7, 13), # Between ask and last + ("ask", 20, 19, 10, 0.3, 17), # Between ask and last + ("ask", 5, 6, 10, 1.0, 5), # last bigger than ask + ("ask", 5, 6, 10, 0.5, 5), # last bigger than ask + ("ask", 20, 19, 10, None, 20), # price_last_balance missing + ("ask", 10, 20, None, 0.5, 10), # last not available - uses ask + ("ask", 4, 5, None, 0.5, 4), # last not available - uses ask + ("ask", 4, 5, None, 1, 4), # last not available - uses ask + ("ask", 4, 5, None, 0, 4), # last not available - uses ask + ("same", 21, 20, 10, 0.0, 20), # Full bid side + ("bid", 21, 20, 10, 0.0, 20), # Full bid side + ("bid", 21, 20, 10, 1.0, 10), # Full last side + ("bid", 21, 20, 10, 0.5, 15), # Between bid and last + ("bid", 21, 20, 10, 0.7, 13), # Between bid and last + ("bid", 21, 20, 10, 0.3, 17), # Between bid and last + ("bid", 6, 5, 10, 1.0, 5), # last bigger than bid + ("bid", 21, 20, 10, None, 20), # price_last_balance missing + ("bid", 6, 5, 10, 0.5, 5), # last bigger than bid + ("bid", 21, 20, None, 0.5, 20), # last not available - uses bid + ("bid", 6, 5, None, 0.5, 5), # last not available - uses bid + ("bid", 6, 5, None, 1, 5), # last not available - uses bid + ("bid", 6, 5, None, 0, 5), # last not available - uses bid ] get_exit_rate_data = [ - ('bid', 12.0, 11.0, 11.5, 0.0, 11.0), # full bid side - ('bid', 12.0, 11.0, 11.5, 1.0, 11.5), # full last side - ('bid', 12.0, 11.0, 11.5, 0.5, 11.25), # between bid and lat - ('bid', 12.0, 11.2, 10.5, 0.0, 11.2), # Last smaller than bid - ('bid', 12.0, 11.2, 10.5, 1.0, 11.2), # Last smaller than bid - uses bid - ('bid', 12.0, 11.2, 10.5, 0.5, 11.2), # Last smaller than bid - uses bid - ('bid', 0.003, 0.002, 0.005, 0.0, 0.002), - ('bid', 0.003, 0.002, 0.005, None, 0.002), - ('ask', 12.0, 11.0, 12.5, 0.0, 12.0), # full ask side - ('ask', 12.0, 11.0, 12.5, 1.0, 12.5), # full last side - ('ask', 12.0, 11.0, 12.5, 0.5, 12.25), # between bid and lat - ('ask', 12.2, 11.2, 10.5, 0.0, 12.2), # Last smaller than ask - ('ask', 12.0, 11.0, 10.5, 1.0, 12.0), # Last smaller than ask - uses ask - ('ask', 12.0, 11.2, 10.5, 0.5, 12.0), # Last smaller than ask - uses ask - ('ask', 10.0, 11.0, 11.0, 0.0, 10.0), - ('ask', 10.11, 11.2, 11.0, 0.0, 10.11), - ('ask', 0.001, 0.002, 11.0, 0.0, 0.001), - ('ask', 0.006, 1.0, 11.0, 0.0, 0.006), - ('ask', 0.006, 1.0, 11.0, None, 0.006), + ("bid", 12.0, 11.0, 11.5, 0.0, 11.0), # full bid side + ("bid", 12.0, 11.0, 11.5, 1.0, 11.5), # full last side + ("bid", 12.0, 11.0, 11.5, 0.5, 11.25), # between bid and lat + ("bid", 12.0, 11.2, 10.5, 0.0, 11.2), # Last smaller than bid + ("bid", 12.0, 11.2, 10.5, 1.0, 11.2), # Last smaller than bid - uses bid + ("bid", 12.0, 11.2, 10.5, 0.5, 11.2), # Last smaller than bid - uses bid + ("bid", 0.003, 0.002, 0.005, 0.0, 0.002), + ("bid", 0.003, 0.002, 0.005, None, 0.002), + ("ask", 12.0, 11.0, 12.5, 0.0, 12.0), # full ask side + ("ask", 12.0, 11.0, 12.5, 1.0, 12.5), # full last side + ("ask", 12.0, 11.0, 12.5, 0.5, 12.25), # between bid and lat + ("ask", 12.2, 11.2, 10.5, 0.0, 12.2), # Last smaller than ask + ("ask", 12.0, 11.0, 10.5, 1.0, 12.0), # Last smaller than ask - uses ask + ("ask", 12.0, 11.2, 10.5, 0.5, 12.0), # Last smaller than ask - uses ask + ("ask", 10.0, 11.0, 11.0, 0.0, 10.0), + ("ask", 10.11, 11.2, 11.0, 0.0, 10.11), + ("ask", 0.001, 0.002, 11.0, 0.0, 0.001), + ("ask", 0.006, 1.0, 11.0, 0.0, 0.006), + ("ask", 0.006, 1.0, 11.0, None, 0.006), ] -def ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - fun, mock_ccxt_fun, retries=API_RETRY_COUNT + 1, **kwargs): - - with patch('freqtrade.exchange.common.time.sleep'): +def ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + fun, + mock_ccxt_fun, + retries=API_RETRY_COUNT + 1, + **kwargs, +): + with patch("freqtrade.exchange.common.time.sleep"): with pytest.raises(DDosProtection): api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.DDoSProtection("DDos")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) @@ -102,10 +134,10 @@ def ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, assert api_mock.__dict__[mock_ccxt_fun].call_count == 1 -async def async_ccxt_exception(mocker, default_conf, api_mock, fun, mock_ccxt_fun, - retries=API_RETRY_COUNT + 1, **kwargs): - - with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)): +async def async_ccxt_exception( + mocker, default_conf, api_mock, fun, mock_ccxt_fun, retries=API_RETRY_COUNT + 1, **kwargs +): + with patch("freqtrade.exchange.common.asyncio.sleep", get_mock_coro(None)): with pytest.raises(DDosProtection): api_mock.__dict__[mock_ccxt_fun] = MagicMock(side_effect=ccxt.DDoSProtection("Dooh")) exchange = get_patched_exchange(mocker, default_conf, api_mock) @@ -131,35 +163,35 @@ async def async_ccxt_exception(mocker, default_conf, api_mock, fun, mock_ccxt_fu def test_init(default_conf, mocker, caplog): caplog.set_level(logging.INFO) get_patched_exchange(mocker, default_conf) - assert log_has('Instance is running with dry_run enabled', caplog) + assert log_has("Instance is running with dry_run enabled", caplog) def test_remove_exchange_credentials(default_conf) -> None: conf = deepcopy(default_conf) - remove_exchange_credentials(conf['exchange'], False) + remove_exchange_credentials(conf["exchange"], False) - assert conf['exchange']['key'] != '' - assert conf['exchange']['secret'] != '' + assert conf["exchange"]["key"] != "" + assert conf["exchange"]["secret"] != "" - remove_exchange_credentials(conf['exchange'], True) - assert conf['exchange']['key'] == '' - assert conf['exchange']['secret'] == '' - assert conf['exchange']['password'] == '' - assert conf['exchange']['uid'] == '' + remove_exchange_credentials(conf["exchange"], True) + assert conf["exchange"]["key"] == "" + assert conf["exchange"]["secret"] == "" + assert conf["exchange"]["password"] == "" + assert conf["exchange"]["uid"] == "" def test_init_ccxt_kwargs(default_conf, mocker, caplog): - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_stakecurrency') - aei_mock = mocker.patch(f'{EXMS}.additional_exchange_init') + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_stakecurrency") + aei_mock = mocker.patch(f"{EXMS}.additional_exchange_init") caplog.set_level(logging.INFO) conf = copy.deepcopy(default_conf) - conf['exchange']['ccxt_async_config'] = {'aiohttp_trust_env': True, 'asyncio_loop': True} + conf["exchange"]["ccxt_async_config"] = {"aiohttp_trust_env": True, "asyncio_loop": True} ex = Exchange(conf) assert log_has( - "Applying additional ccxt config: {'aiohttp_trust_env': True, 'asyncio_loop': True}", - caplog) + "Applying additional ccxt config: {'aiohttp_trust_env': True, 'asyncio_loop': True}", caplog + ) assert ex._api_async.aiohttp_trust_env assert not ex._api.aiohttp_trust_env assert aei_mock.call_count == 1 @@ -167,26 +199,26 @@ def test_init_ccxt_kwargs(default_conf, mocker, caplog): # Reset logging and config caplog.clear() conf = copy.deepcopy(default_conf) - conf['exchange']['ccxt_config'] = {'TestKWARG': 11} - conf['exchange']['ccxt_sync_config'] = {'TestKWARG44': 11} - conf['exchange']['ccxt_async_config'] = {'asyncio_loop': True} + conf["exchange"]["ccxt_config"] = {"TestKWARG": 11} + conf["exchange"]["ccxt_sync_config"] = {"TestKWARG44": 11} + conf["exchange"]["ccxt_async_config"] = {"asyncio_loop": True} asynclogmsg = "Applying additional ccxt config: {'TestKWARG': 11, 'asyncio_loop': True}" ex = Exchange(conf) assert not ex._api_async.aiohttp_trust_env - assert hasattr(ex._api, 'TestKWARG') + assert hasattr(ex._api, "TestKWARG") assert ex._api.TestKWARG == 11 # ccxt_config is assigned to both sync and async - assert not hasattr(ex._api_async, 'TestKWARG44') + assert not hasattr(ex._api_async, "TestKWARG44") - assert hasattr(ex._api_async, 'TestKWARG') + assert hasattr(ex._api_async, "TestKWARG") assert log_has("Applying additional ccxt config: {'TestKWARG': 11, 'TestKWARG44': 11}", caplog) assert log_has(asynclogmsg, caplog) # Test additional headers case - Exchange._ccxt_params = {'hello': 'world'} + Exchange._ccxt_params = {"hello": "world"} ex = Exchange(conf) assert log_has("Applying additional ccxt config: {'TestKWARG': 11, 'TestKWARG44': 11}", caplog) - assert ex._api.hello == 'world' + assert ex._api.hello == "world" assert ex._ccxt_config == {} Exchange._headers = {} @@ -194,68 +226,74 @@ def test_init_ccxt_kwargs(default_conf, mocker, caplog): def test_destroy(default_conf, mocker, caplog): caplog.set_level(logging.DEBUG) get_patched_exchange(mocker, default_conf) - assert log_has('Exchange object destroyed, closing async loop', caplog) + assert log_has("Exchange object destroyed, closing async loop", caplog) def test_init_exception(default_conf, mocker): - default_conf['exchange']['name'] = 'wrong_exchange_name' + default_conf["exchange"]["name"] = "wrong_exchange_name" - with pytest.raises(OperationalException, - match=f"Exchange {default_conf['exchange']['name']} is not supported"): + with pytest.raises( + OperationalException, match=f"Exchange {default_conf['exchange']['name']} is not supported" + ): Exchange(default_conf) - default_conf['exchange']['name'] = 'binance' - with pytest.raises(OperationalException, - match=f"Exchange {default_conf['exchange']['name']} is not supported"): + default_conf["exchange"]["name"] = "binance" + with pytest.raises( + OperationalException, match=f"Exchange {default_conf['exchange']['name']} is not supported" + ): mocker.patch("ccxt.binance", MagicMock(side_effect=AttributeError)) Exchange(default_conf) - with pytest.raises(OperationalException, - match=r"Initialization of ccxt failed. Reason: DeadBeef"): + with pytest.raises( + OperationalException, match=r"Initialization of ccxt failed. Reason: DeadBeef" + ): mocker.patch("ccxt.binance", MagicMock(side_effect=ccxt.BaseError("DeadBeef"))) Exchange(default_conf) def test_exchange_resolver(default_conf, mocker, caplog): - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=MagicMock())) - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') - default_conf['exchange']['name'] = 'zaif' + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=MagicMock())) + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") + default_conf["exchange"]["name"] = "zaif" exchange = ExchangeResolver.load_exchange(default_conf) assert isinstance(exchange, Exchange) assert log_has_re(r"No .* specific subclass found. Using the generic class instead.", caplog) caplog.clear() - default_conf['exchange']['name'] = 'Bybit' + default_conf["exchange"]["name"] = "Bybit" exchange = ExchangeResolver.load_exchange(default_conf) assert isinstance(exchange, Exchange) assert isinstance(exchange, Bybit) - assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.", - caplog) + assert not log_has_re( + r"No .* specific subclass found. Using the generic class instead.", caplog + ) caplog.clear() - default_conf['exchange']['name'] = 'kraken' + default_conf["exchange"]["name"] = "kraken" exchange = ExchangeResolver.load_exchange(default_conf) assert isinstance(exchange, Exchange) assert isinstance(exchange, Kraken) assert not isinstance(exchange, Binance) - assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.", - caplog) + assert not log_has_re( + r"No .* specific subclass found. Using the generic class instead.", caplog + ) - default_conf['exchange']['name'] = 'binance' + default_conf["exchange"]["name"] = "binance" exchange = ExchangeResolver.load_exchange(default_conf) assert isinstance(exchange, Exchange) assert isinstance(exchange, Binance) assert not isinstance(exchange, Kraken) - assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.", - caplog) + assert not log_has_re( + r"No .* specific subclass found. Using the generic class instead.", caplog + ) # Test mapping - default_conf['exchange']['name'] = 'binanceus' + default_conf["exchange"]["name"] = "binanceus" exchange = ExchangeResolver.load_exchange(default_conf) assert isinstance(exchange, Exchange) assert isinstance(exchange, Binance) @@ -287,201 +325,202 @@ def test_validate_order_time_in_force(default_conf, mocker, caplog): ex.validate_order_time_in_force(tif2) -@pytest.mark.parametrize("price,precision_mode,precision,expected", [ - (2.34559, 2, 4, 0.0001), - (2.34559, 2, 5, 0.00001), - (2.34559, 2, 3, 0.001), - (2.9999, 2, 3, 0.001), - (200.0511, 2, 3, 0.001), - # Tests for Tick_size - (2.34559, 4, 0.0001, 0.0001), - (2.34559, 4, 0.00001, 0.00001), - (2.34559, 4, 0.0025, 0.0025), - (2.9909, 4, 0.0025, 0.0025), - (234.43, 4, 0.5, 0.5), - (234.43, 4, 0.0025, 0.0025), - (234.43, 4, 0.00013, 0.00013), - -]) +@pytest.mark.parametrize( + "price,precision_mode,precision,expected", + [ + (2.34559, 2, 4, 0.0001), + (2.34559, 2, 5, 0.00001), + (2.34559, 2, 3, 0.001), + (2.9999, 2, 3, 0.001), + (200.0511, 2, 3, 0.001), + # Tests for Tick_size + (2.34559, 4, 0.0001, 0.0001), + (2.34559, 4, 0.00001, 0.00001), + (2.34559, 4, 0.0025, 0.0025), + (2.9909, 4, 0.0025, 0.0025), + (234.43, 4, 0.5, 0.5), + (234.43, 4, 0.0025, 0.0025), + (234.43, 4, 0.00013, 0.00013), + ], +) def test_price_get_one_pip(default_conf, mocker, price, precision_mode, precision, expected): - markets = PropertyMock(return_value={'ETH/BTC': {'precision': {'price': precision}}}) + markets = PropertyMock(return_value={"ETH/BTC": {"precision": {"price": precision}}}) exchange = get_patched_exchange(mocker, default_conf, id="binance") - mocker.patch(f'{EXMS}.markets', markets) - mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=precision_mode)) - pair = 'ETH/BTC' + mocker.patch(f"{EXMS}.markets", markets) + mocker.patch(f"{EXMS}.precisionMode", PropertyMock(return_value=precision_mode)) + pair = "ETH/BTC" assert pytest.approx(exchange.price_get_one_pip(pair, price)) == expected def test__get_stake_amount_limit(mocker, default_conf) -> None: - exchange = get_patched_exchange(mocker, default_conf, id="binance") stoploss = -0.05 - markets = {'ETH/BTC': {'symbol': 'ETH/BTC'}} + markets = {"ETH/BTC": {"symbol": "ETH/BTC"}} # no pair found - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - with pytest.raises(ValueError, match=r'.*get market information.*'): - exchange.get_min_pair_stake_amount('BNB/BTC', 1, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + with pytest.raises(ValueError, match=r".*get market information.*"): + exchange.get_min_pair_stake_amount("BNB/BTC", 1, stoploss) # no cost/amount Min markets["ETH/BTC"]["limits"] = { - 'cost': {'min': None, 'max': None}, - 'amount': {'min': None, 'max': None}, + "cost": {"min": None, "max": None}, + "amount": {"min": None, "max": None}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 1, stoploss) assert result is None - result = exchange.get_max_pair_stake_amount('ETH/BTC', 1) - assert result == float('inf') + result = exchange.get_max_pair_stake_amount("ETH/BTC", 1) + assert result == float("inf") # min/max cost is set markets["ETH/BTC"]["limits"] = { - 'cost': {'min': 2, 'max': 10000}, - 'amount': {'min': None, 'max': None}, + "cost": {"min": 2, "max": 10000}, + "amount": {"min": None, "max": None}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) # min - result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 1, stoploss) expected_result = 2 * (1 + 0.05) / (1 - abs(stoploss)) assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 1, stoploss, 3.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 1, stoploss, 3.0) assert pytest.approx(result) == expected_result / 3 # max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 10000 # min amount is set markets["ETH/BTC"]["limits"] = { - 'cost': {'min': None, 'max': None}, - 'amount': {'min': 2, 'max': 10000}, + "cost": {"min": None, "max": None}, + "amount": {"min": 2, "max": 10000}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss) expected_result = 2 * 2 * (1 + 0.05) assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 5.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss, 5.0) assert pytest.approx(result) == expected_result / 5 # max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 20000 # min amount and cost are set (cost is minimal and therefore ignored) markets["ETH/BTC"]["limits"] = { - 'cost': {'min': 2, 'max': None}, - 'amount': {'min': 2, 'max': None}, + "cost": {"min": 2, "max": None}, + "amount": {"min": 2, "max": None}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss) expected_result = max(2, 2 * 2) * (1 + 0.05) assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 10) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss, 10) assert pytest.approx(result) == expected_result / 10 # min amount and cost are set (amount is minial) markets["ETH/BTC"]["limits"] = { - 'cost': {'min': 8, 'max': 10000}, - 'amount': {'min': 2, 'max': 500}, + "cost": {"min": 8, "max": 10000}, + "amount": {"min": 2, "max": 500}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss) expected_result = max(8, 2 * 2) * (1 + 0.05) / (1 - abs(stoploss)) assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, stoploss, 7.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, stoploss, 7.0) assert pytest.approx(result) == expected_result / 7.0 # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 1000 - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -0.4) expected_result = max(8, 2 * 2) * 1.5 assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -0.4, 8.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -0.4, 8.0) assert pytest.approx(result) == expected_result / 8.0 # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 1000 # Really big stoploss - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -1) expected_result = max(8, 2 * 2) * 1.5 assert pytest.approx(result) == expected_result # With Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -1, 12.0) assert pytest.approx(result) == expected_result / 12 # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 1000 - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2, 12.0) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2, 12.0) assert result == 1000 / 12 - markets["ETH/BTC"]["contractSize"] = '0.01' - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + markets["ETH/BTC"]["contractSize"] = "0.01" + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, id="binance") - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) # Contract size 0.01 - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -1) assert pytest.approx(result) == expected_result * 0.01 # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 10 - markets["ETH/BTC"]["contractSize"] = '10' - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + markets["ETH/BTC"]["contractSize"] = "10" + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) # With Leverage, Contract size 10 - result = exchange.get_min_pair_stake_amount('ETH/BTC', 2, -1, 12.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 2, -1, 12.0) assert pytest.approx(result) == (expected_result / 12) * 10.0 # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2) assert result == 10000 def test_get_min_pair_stake_amount_real_data(mocker, default_conf) -> None: exchange = get_patched_exchange(mocker, default_conf, id="binance") stoploss = -0.05 - markets = {'ETH/BTC': {'symbol': 'ETH/BTC'}} + markets = {"ETH/BTC": {"symbol": "ETH/BTC"}} # ~Real Binance data markets["ETH/BTC"]["limits"] = { - 'cost': {'min': 0.0001, 'max': 4000}, - 'amount': {'min': 0.001, 'max': 10000}, + "cost": {"min": 0.0001, "max": 4000}, + "amount": {"min": 0.001, "max": 10000}, } - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - result = exchange.get_min_pair_stake_amount('ETH/BTC', 0.020405, stoploss) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 0.020405, stoploss) expected_result = max(0.0001, 0.001 * 0.020405) * (1 + 0.05) / (1 - abs(stoploss)) assert round(result, 8) == round(expected_result, 8) # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 2.0) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 2.0) assert result == 4000 # Leverage - result = exchange.get_min_pair_stake_amount('ETH/BTC', 0.020405, stoploss, 3.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 0.020405, stoploss, 3.0) assert round(result, 8) == round(expected_result / 3, 8) # Contract_size markets["ETH/BTC"]["contractSize"] = 0.1 - result = exchange.get_min_pair_stake_amount('ETH/BTC', 0.020405, stoploss, 3.0) + result = exchange.get_min_pair_stake_amount("ETH/BTC", 0.020405, stoploss, 3.0) assert round(result, 8) == round((expected_result / 3), 8) # Max - result = exchange.get_max_pair_stake_amount('ETH/BTC', 12.0) + result = exchange.get_max_pair_stake_amount("ETH/BTC", 12.0) assert result == 4000 def test__load_async_markets(default_conf, mocker, caplog): - mocker.patch(f'{EXMS}._init_ccxt') - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + mocker.patch(f"{EXMS}._init_ccxt") + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") exchange = Exchange(default_conf) exchange._api_async.load_markets = get_mock_coro(None) exchange._load_async_markets() @@ -491,27 +530,27 @@ def test__load_async_markets(default_conf, mocker, caplog): exchange._api_async.load_markets = Mock(side_effect=ccxt.BaseError("deadbeef")) exchange._load_async_markets() - assert log_has('Could not load async markets. Reason: deadbeef', caplog) + assert log_has("Could not load async markets. Reason: deadbeef", caplog) def test__load_markets(default_conf, mocker, caplog): caplog.set_level(logging.INFO) api_mock = MagicMock() api_mock.load_markets = MagicMock(side_effect=ccxt.BaseError("SomeError")) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) - assert log_has('Unable to initialize markets.', caplog) + assert log_has("Unable to initialize markets.", caplog) - expected_return = {'ETH/BTC': 'available'} + expected_return = {"ETH/BTC": "available"} api_mock = MagicMock() api_mock.load_markets = MagicMock(return_value=expected_return) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - default_conf['exchange']['pair_whitelist'] = ['ETH/BTC'] + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + default_conf["exchange"]["pair_whitelist"] = ["ETH/BTC"] ex = Exchange(default_conf) assert ex.markets == expected_return @@ -519,15 +558,16 @@ def test__load_markets(default_conf, mocker, caplog): def test_reload_markets(default_conf, mocker, caplog, time_machine): caplog.set_level(logging.DEBUG) - initial_markets = {'ETH/BTC': {}} - updated_markets = {'ETH/BTC': {}, "LTC/BTC": {}} + initial_markets = {"ETH/BTC": {}} + updated_markets = {"ETH/BTC": {}, "LTC/BTC": {}} start_dt = dt_now() time_machine.move_to(start_dt, tick=False) api_mock = MagicMock() api_mock.load_markets = MagicMock(return_value=initial_markets) - default_conf['exchange']['markets_refresh_interval'] = 10 - exchange = get_patched_exchange(mocker, default_conf, api_mock, id="binance", - mock_markets=False) + default_conf["exchange"]["markets_refresh_interval"] = 10 + exchange = get_patched_exchange( + mocker, default_conf, api_mock, id="binance", mock_markets=False + ) exchange._load_async_markets = MagicMock() assert exchange._last_markets_refresh == dt_ts() @@ -545,7 +585,7 @@ def test_reload_markets(default_conf, mocker, caplog, time_machine): exchange.reload_markets() assert exchange.markets == updated_markets assert exchange._load_async_markets.call_count == 1 - assert log_has('Performing scheduled market reload..', caplog) + assert log_has("Performing scheduled market reload..", caplog) # Not called again exchange._load_async_markets.reset_mock() @@ -559,7 +599,7 @@ def test_reload_markets_exception(default_conf, mocker, caplog): api_mock = MagicMock() api_mock.load_markets = MagicMock(side_effect=ccxt.NetworkError("LoadError")) - default_conf['exchange']['markets_refresh_interval'] = 10 + default_conf["exchange"]["markets_refresh_interval"] = 10 exchange = get_patched_exchange(mocker, default_conf, api_mock, id="binance") # less than 10 minutes have passed, no reload @@ -568,71 +608,87 @@ def test_reload_markets_exception(default_conf, mocker, caplog): assert log_has_re(r"Could not reload markets.*", caplog) -@pytest.mark.parametrize("stake_currency", ['ETH', 'BTC', 'USDT']) +@pytest.mark.parametrize("stake_currency", ["ETH", "BTC", "USDT"]) def test_validate_stakecurrency(default_conf, stake_currency, mocker, caplog): - default_conf['stake_currency'] = stake_currency + default_conf["stake_currency"] = stake_currency api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/ETH': {'quote': 'ETH'}, 'NEO/USDT': {'quote': 'USDT'}, - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_pricing') + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/ETH": {"quote": "ETH"}, + "NEO/USDT": {"quote": "USDT"}, + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) def test_validate_stakecurrency_error(default_conf, mocker, caplog): - default_conf['stake_currency'] = 'XRP' + default_conf["stake_currency"] = "XRP" api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/ETH': {'quote': 'ETH'}, 'NEO/USDT': {'quote': 'USDT'}, - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - with pytest.raises(ConfigurationError, - match=r'XRP is not available as stake on .*' - 'Available currencies are: BTC, ETH, USDT'): + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/ETH": {"quote": "ETH"}, + "NEO/USDT": {"quote": "USDT"}, + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + with pytest.raises( + ConfigurationError, + match=r"XRP is not available as stake on .*Available currencies are: BTC, ETH, USDT", + ): Exchange(default_conf) - type(api_mock).load_markets = MagicMock(side_effect=ccxt.NetworkError('No connection.')) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) + type(api_mock).load_markets = MagicMock(side_effect=ccxt.NetworkError("No connection.")) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) - with pytest.raises(OperationalException, - match=r'Could not load markets, therefore cannot start\. Please.*'): + with pytest.raises( + OperationalException, match=r"Could not load markets, therefore cannot start\. Please.*" + ): Exchange(default_conf) def test_get_quote_currencies(default_conf, mocker): ex = get_patched_exchange(mocker, default_conf) - assert set(ex.get_quote_currencies()) == set(['USD', 'ETH', 'BTC', 'USDT', 'BUSD']) + assert set(ex.get_quote_currencies()) == set(["USD", "ETH", "BTC", "USDT", "BUSD"]) -@pytest.mark.parametrize('pair,expected', [ - ('XRP/BTC', 'BTC'), - ('LTC/USD', 'USD'), - ('ETH/USDT', 'USDT'), - ('XLTCUSDT', 'USDT'), - ('XRP/NOCURRENCY', ''), -]) +@pytest.mark.parametrize( + "pair,expected", + [ + ("XRP/BTC", "BTC"), + ("LTC/USD", "USD"), + ("ETH/USDT", "USDT"), + ("XLTCUSDT", "USDT"), + ("XRP/NOCURRENCY", ""), + ], +) def test_get_pair_quote_currency(default_conf, mocker, pair, expected): ex = get_patched_exchange(mocker, default_conf) assert ex.get_pair_quote_currency(pair) == expected -@pytest.mark.parametrize('pair,expected', [ - ('XRP/BTC', 'XRP'), - ('LTC/USD', 'LTC'), - ('ETH/USDT', 'ETH'), - ('XLTCUSDT', 'LTC'), - ('XRP/NOCURRENCY', ''), -]) +@pytest.mark.parametrize( + "pair,expected", + [ + ("XRP/BTC", "XRP"), + ("LTC/USD", "LTC"), + ("ETH/USDT", "ETH"), + ("XLTCUSDT", "LTC"), + ("XRP/NOCURRENCY", ""), + ], +) def test_get_pair_base_currency(default_conf, mocker, pair, expected): ex = get_patched_exchange(mocker, default_conf) assert ex.get_pair_base_currency(pair) == expected @@ -640,218 +696,236 @@ def test_get_pair_base_currency(default_conf, mocker, pair, expected): def test_validate_pairs(default_conf, mocker): # test exchange.validate_pairs directly api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, - 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/BTC': {'quote': 'BTC'}, - 'NEO/BTC': {'quote': 'BTC'}, - }) - id_mock = PropertyMock(return_value='test_exchange') + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/BTC": {"quote": "BTC"}, + "NEO/BTC": {"quote": "BTC"}, + } + ) + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) def test_validate_pairs_not_available(default_conf, mocker): api_mock = MagicMock() - type(api_mock).markets = PropertyMock(return_value={ - 'XRP/BTC': {'inactive': True, 'base': 'XRP', 'quote': 'BTC'} - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}._load_async_markets') + type(api_mock).markets = PropertyMock( + return_value={"XRP/BTC": {"inactive": True, "base": "XRP", "quote": "BTC"}} + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}._load_async_markets") - with pytest.raises(OperationalException, match=r'not available'): + with pytest.raises(OperationalException, match=r"not available"): Exchange(default_conf) def test_validate_pairs_exception(default_conf, mocker, caplog): caplog.set_level(logging.INFO) api_mock = MagicMock() - mocker.patch(f'{EXMS}.name', PropertyMock(return_value='Binance')) + mocker.patch(f"{EXMS}.name", PropertyMock(return_value="Binance")) type(api_mock).markets = PropertyMock(return_value={}) - mocker.patch(f'{EXMS}._init_ccxt', api_mock) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') - mocker.patch(f'{EXMS}._load_async_markets') + mocker.patch(f"{EXMS}._init_ccxt", api_mock) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") + mocker.patch(f"{EXMS}._load_async_markets") - with pytest.raises(OperationalException, match=r'Pair ETH/BTC is not available on Binance'): + with pytest.raises(OperationalException, match=r"Pair ETH/BTC is not available on Binance"): Exchange(default_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={})) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value={})) Exchange(default_conf) - assert log_has('Unable to validate pairs (assuming they are correct).', caplog) + assert log_has("Unable to validate pairs (assuming they are correct).", caplog) def test_validate_pairs_restricted(default_conf, mocker, caplog): api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/BTC': {'quote': 'BTC', 'info': {'prohibitedIn': ['US']}}, - 'NEO/BTC': {'quote': 'BTC', 'info': 'TestString'}, # info can also be a string ... - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_pricing') - mocker.patch(f'{EXMS}.validate_stakecurrency') + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/BTC": {"quote": "BTC", "info": {"prohibitedIn": ["US"]}}, + "NEO/BTC": {"quote": "BTC", "info": "TestString"}, # info can also be a string ... + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_pricing") + mocker.patch(f"{EXMS}.validate_stakecurrency") Exchange(default_conf) - assert log_has("Pair XRP/BTC is restricted for some users on this exchange." - "Please check if you are impacted by this restriction " - "on the exchange and eventually remove XRP/BTC from your whitelist.", caplog) + assert log_has( + "Pair XRP/BTC is restricted for some users on this exchange." + "Please check if you are impacted by this restriction " + "on the exchange and eventually remove XRP/BTC from your whitelist.", + caplog, + ) def test_validate_pairs_stakecompatibility(default_conf, mocker, caplog): api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/BTC': {'quote': 'BTC'}, 'NEO/BTC': {'quote': 'BTC'}, - 'HELLO-WORLD': {'quote': 'BTC'}, - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/BTC": {"quote": "BTC"}, + "NEO/BTC": {"quote": "BTC"}, + "HELLO-WORLD": {"quote": "BTC"}, + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) def test_validate_pairs_stakecompatibility_downloaddata(default_conf, mocker, caplog): api_mock = MagicMock() - default_conf['stake_currency'] = '' - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/BTC': {'quote': 'BTC'}, 'NEO/BTC': {'quote': 'BTC'}, - 'HELLO-WORLD': {'quote': 'BTC'}, - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + default_conf["stake_currency"] = "" + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/BTC": {"quote": "BTC"}, + "NEO/BTC": {"quote": "BTC"}, + "HELLO-WORLD": {"quote": "BTC"}, + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) assert type(api_mock).load_markets.call_count == 1 def test_validate_pairs_stakecompatibility_fail(default_conf, mocker, caplog): - default_conf['exchange']['pair_whitelist'].append('HELLO-WORLD') + default_conf["exchange"]["pair_whitelist"].append("HELLO-WORLD") api_mock = MagicMock() - type(api_mock).load_markets = MagicMock(return_value={ - 'ETH/BTC': {'quote': 'BTC'}, 'LTC/BTC': {'quote': 'BTC'}, - 'XRP/BTC': {'quote': 'BTC'}, 'NEO/BTC': {'quote': 'BTC'}, - 'HELLO-WORLD': {'quote': 'USDT'}, - }) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_stakecurrency') + type(api_mock).load_markets = MagicMock( + return_value={ + "ETH/BTC": {"quote": "BTC"}, + "LTC/BTC": {"quote": "BTC"}, + "XRP/BTC": {"quote": "BTC"}, + "NEO/BTC": {"quote": "BTC"}, + "HELLO-WORLD": {"quote": "USDT"}, + } + ) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_stakecurrency") with pytest.raises(OperationalException, match=r"Stake-currency 'BTC' not compatible with.*"): Exchange(default_conf) -@pytest.mark.parametrize("timeframe", [ - ('5m'), ("1m"), ("15m"), ("1h") -]) +@pytest.mark.parametrize("timeframe", [("5m"), ("1m"), ("15m"), ("1h")]) def test_validate_timeframes(default_conf, mocker, timeframe): default_conf["timeframe"] = timeframe api_mock = MagicMock() - id_mock = PropertyMock(return_value='test_exchange') + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock - timeframes = PropertyMock(return_value={'1m': '1m', - '5m': '5m', - '15m': '15m', - '1h': '1h'}) + timeframes = PropertyMock(return_value={"1m": "1m", "5m": "5m", "15m": "15m", "1h": "1h"}) type(api_mock).timeframes = timeframes - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") Exchange(default_conf) def test_validate_timeframes_failed(default_conf, mocker): default_conf["timeframe"] = "3m" api_mock = MagicMock() - id_mock = PropertyMock(return_value='test_exchange') + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock - timeframes = PropertyMock(return_value={'15s': '15s', - '1m': '1m', - '5m': '5m', - '15m': '15m', - '1h': '1h'}) + timeframes = PropertyMock( + return_value={"15s": "15s", "1m": "1m", "5m": "5m", "15m": "15m", "1h": "1h"} + ) type(api_mock).timeframes = timeframes - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') - with pytest.raises(ConfigurationError, - match=r"Invalid timeframe '3m'. This exchange supports.*"): + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") + with pytest.raises( + ConfigurationError, match=r"Invalid timeframe '3m'. This exchange supports.*" + ): Exchange(default_conf) default_conf["timeframe"] = "15s" - with pytest.raises(ConfigurationError, - match=r"Timeframes < 1m are currently not supported by Freqtrade."): + with pytest.raises( + ConfigurationError, match=r"Timeframes < 1m are currently not supported by Freqtrade." + ): Exchange(default_conf) # Will not raise an exception in util mode. - default_conf['runmode'] = RunMode.UTIL_EXCHANGE + default_conf["runmode"] = RunMode.UTIL_EXCHANGE Exchange(default_conf) def test_validate_timeframes_emulated_ohlcv_1(default_conf, mocker): default_conf["timeframe"] = "3m" api_mock = MagicMock() - id_mock = PropertyMock(return_value='test_exchange') + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock # delete timeframes so magicmock does not autocreate it del api_mock.timeframes - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_stakecurrency') - with pytest.raises(OperationalException, - match=r'The ccxt library does not provide the list of timeframes ' - r'for the exchange .* and this exchange ' - r'is therefore not supported. *'): + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_stakecurrency") + with pytest.raises( + OperationalException, + match=r"The ccxt library does not provide the list of timeframes " + r"for the exchange .* and this exchange " + r"is therefore not supported. *", + ): Exchange(default_conf) def test_validate_timeframes_emulated_ohlcvi_2(default_conf, mocker): default_conf["timeframe"] = "3m" api_mock = MagicMock() - id_mock = PropertyMock(return_value='test_exchange') + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock # delete timeframes so magicmock does not autocreate it del api_mock.timeframes - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', - MagicMock(return_value={'timeframes': None})) - mocker.patch(f'{EXMS}.validate_pairs', MagicMock()) - mocker.patch(f'{EXMS}.validate_stakecurrency') - with pytest.raises(OperationalException, - match=r'The ccxt library does not provide the list of timeframes ' - r'for the exchange .* and this exchange ' - r'is therefore not supported. *'): + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={"timeframes": None})) + mocker.patch(f"{EXMS}.validate_pairs", MagicMock()) + mocker.patch(f"{EXMS}.validate_stakecurrency") + with pytest.raises( + OperationalException, + match=r"The ccxt library does not provide the list of timeframes " + r"for the exchange .* and this exchange " + r"is therefore not supported. *", + ): Exchange(default_conf) @@ -859,57 +933,54 @@ def test_validate_timeframes_not_in_config(default_conf, mocker): # TODO: this test does not assert ... del default_conf["timeframe"] api_mock = MagicMock() - id_mock = PropertyMock(return_value='test_exchange') + id_mock = PropertyMock(return_value="test_exchange") type(api_mock).id = id_mock - timeframes = PropertyMock(return_value={'1m': '1m', - '5m': '5m', - '15m': '15m', - '1h': '1h'}) + timeframes = PropertyMock(return_value={"1m": "1m", "5m": "5m", "15m": "15m", "1h": "1h"}) type(api_mock).timeframes = timeframes - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') - mocker.patch(f'{EXMS}.validate_required_startup_candles') + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") + mocker.patch(f"{EXMS}.validate_required_startup_candles") Exchange(default_conf) def test_validate_pricing(default_conf, mocker): api_mock = MagicMock() has = { - 'fetchL2OrderBook': True, - 'fetchTicker': True, + "fetchL2OrderBook": True, + "fetchTicker": True, } type(api_mock).has = PropertyMock(return_value=has) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_trading_mode_and_margin_mode') - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.name', 'Binance') - default_conf['exchange']['name'] = 'binance' + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_trading_mode_and_margin_mode") + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.name", "Binance") + default_conf["exchange"]["name"] = "binance" ExchangeResolver.load_exchange(default_conf) - has.update({'fetchTicker': False}) + has.update({"fetchTicker": False}) with pytest.raises(OperationalException, match="Ticker pricing not available for .*"): ExchangeResolver.load_exchange(default_conf) - has.update({'fetchTicker': True}) + has.update({"fetchTicker": True}) - default_conf['exit_pricing']['use_order_book'] = True + default_conf["exit_pricing"]["use_order_book"] = True ExchangeResolver.load_exchange(default_conf) - has.update({'fetchL2OrderBook': False}) + has.update({"fetchL2OrderBook": False}) with pytest.raises(OperationalException, match="Orderbook not available for .*"): ExchangeResolver.load_exchange(default_conf) - has.update({'fetchL2OrderBook': True}) + has.update({"fetchL2OrderBook": True}) # Binance has no tickers on futures - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED with pytest.raises(OperationalException, match="Ticker pricing not available for .*"): ExchangeResolver.load_exchange(default_conf) @@ -918,96 +989,97 @@ def test_validate_pricing(default_conf, mocker): def test_validate_ordertypes(default_conf, mocker): api_mock = MagicMock() - type(api_mock).has = PropertyMock(return_value={'createMarketOrder': True}) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') + type(api_mock).has = PropertyMock(return_value={"createMarketOrder": True}) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") - default_conf['order_types'] = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'market', - 'stoploss_on_exchange': False + default_conf["order_types"] = { + "entry": "limit", + "exit": "limit", + "stoploss": "market", + "stoploss_on_exchange": False, } Exchange(default_conf) - type(api_mock).has = PropertyMock(return_value={'createMarketOrder': False}) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) + type(api_mock).has = PropertyMock(return_value={"createMarketOrder": False}) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) - default_conf['order_types'] = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'market', - 'stoploss_on_exchange': False + default_conf["order_types"] = { + "entry": "limit", + "exit": "limit", + "stoploss": "market", + "stoploss_on_exchange": False, } - with pytest.raises(OperationalException, - match=r'Exchange .* does not support market orders.'): + with pytest.raises(OperationalException, match=r"Exchange .* does not support market orders."): Exchange(default_conf) - default_conf['order_types'] = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': True + default_conf["order_types"] = { + "entry": "limit", + "exit": "limit", + "stoploss": "limit", + "stoploss_on_exchange": True, } - with pytest.raises(OperationalException, - match=r'On exchange stoploss is not supported for .*'): + with pytest.raises(OperationalException, match=r"On exchange stoploss is not supported for .*"): Exchange(default_conf) -@pytest.mark.parametrize('exchange_name,stopadv, expected', [ - ('binance', 'last', True), - ('binance', 'mark', True), - ('binance', 'index', False), - ('bybit', 'last', True), - ('bybit', 'mark', True), - ('bybit', 'index', True), - ('okx', 'last', True), - ('okx', 'mark', True), - ('okx', 'index', True), - ('gate', 'last', True), - ('gate', 'mark', True), - ('gate', 'index', True), - ]) +@pytest.mark.parametrize( + "exchange_name,stopadv, expected", + [ + ("binance", "last", True), + ("binance", "mark", True), + ("binance", "index", False), + ("bybit", "last", True), + ("bybit", "mark", True), + ("bybit", "index", True), + ("okx", "last", True), + ("okx", "mark", True), + ("okx", "index", True), + ("gate", "last", True), + ("gate", "mark", True), + ("gate", "index", True), + ], +) def test_validate_ordertypes_stop_advanced(default_conf, mocker, exchange_name, stopadv, expected): - api_mock = MagicMock() - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED - type(api_mock).has = PropertyMock(return_value={'createMarketOrder': True}) - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_stakecurrency') - mocker.patch(f'{EXMS}.validate_pricing') - default_conf['order_types'] = { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': True, - 'stoploss_price_type': stopadv, + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED + type(api_mock).has = PropertyMock(return_value={"createMarketOrder": True}) + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_stakecurrency") + mocker.patch(f"{EXMS}.validate_pricing") + default_conf["order_types"] = { + "entry": "limit", + "exit": "limit", + "stoploss": "limit", + "stoploss_on_exchange": True, + "stoploss_price_type": stopadv, } - default_conf['exchange']['name'] = exchange_name + default_conf["exchange"]["name"] = exchange_name if expected: ExchangeResolver.load_exchange(default_conf) else: - with pytest.raises(OperationalException, - match=r'On exchange stoploss price type is not supported for .*'): + with pytest.raises( + OperationalException, match=r"On exchange stoploss price type is not supported for .*" + ): ExchangeResolver.load_exchange(default_conf) def test_validate_order_types_not_in_config(default_conf, mocker): api_mock = MagicMock() - mocker.patch(f'{EXMS}._init_ccxt', MagicMock(return_value=api_mock)) - mocker.patch(f'{EXMS}._load_markets', MagicMock(return_value={})) - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}.validate_pricing') - mocker.patch(f'{EXMS}.validate_stakecurrency') + mocker.patch(f"{EXMS}._init_ccxt", MagicMock(return_value=api_mock)) + mocker.patch(f"{EXMS}._load_markets", MagicMock(return_value={})) + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}.validate_pricing") + mocker.patch(f"{EXMS}.validate_stakecurrency") conf = copy.deepcopy(default_conf) Exchange(conf) @@ -1015,82 +1087,81 @@ def test_validate_order_types_not_in_config(default_conf, mocker): def test_validate_required_startup_candles(default_conf, mocker, caplog): api_mock = MagicMock() - mocker.patch(f'{EXMS}.name', PropertyMock(return_value='Binance')) + mocker.patch(f"{EXMS}.name", PropertyMock(return_value="Binance")) - mocker.patch(f'{EXMS}._init_ccxt', api_mock) - mocker.patch(f'{EXMS}.validate_timeframes') - mocker.patch(f'{EXMS}._load_async_markets') - mocker.patch(f'{EXMS}.validate_pairs') - mocker.patch(f'{EXMS}.validate_pricing') - mocker.patch(f'{EXMS}.validate_stakecurrency') + mocker.patch(f"{EXMS}._init_ccxt", api_mock) + mocker.patch(f"{EXMS}.validate_timeframes") + mocker.patch(f"{EXMS}._load_async_markets") + mocker.patch(f"{EXMS}.validate_pairs") + mocker.patch(f"{EXMS}.validate_pricing") + mocker.patch(f"{EXMS}.validate_stakecurrency") - default_conf['startup_candle_count'] = 20 + default_conf["startup_candle_count"] = 20 ex = Exchange(default_conf) assert ex # assumption is that the exchange provides 500 candles per call.s - assert ex.validate_required_startup_candles(200, '5m') == 1 - assert ex.validate_required_startup_candles(499, '5m') == 1 - assert ex.validate_required_startup_candles(600, '5m') == 2 - assert ex.validate_required_startup_candles(501, '5m') == 2 - assert ex.validate_required_startup_candles(499, '5m') == 1 - assert ex.validate_required_startup_candles(1000, '5m') == 3 - assert ex.validate_required_startup_candles(2499, '5m') == 5 - assert log_has_re(r'Using 5 calls to get OHLCV. This.*', caplog) + assert ex.validate_required_startup_candles(200, "5m") == 1 + assert ex.validate_required_startup_candles(499, "5m") == 1 + assert ex.validate_required_startup_candles(600, "5m") == 2 + assert ex.validate_required_startup_candles(501, "5m") == 2 + assert ex.validate_required_startup_candles(499, "5m") == 1 + assert ex.validate_required_startup_candles(1000, "5m") == 3 + assert ex.validate_required_startup_candles(2499, "5m") == 5 + assert log_has_re(r"Using 5 calls to get OHLCV. This.*", caplog) - with pytest.raises(OperationalException, match=r'This strategy requires 2500.*'): - ex.validate_required_startup_candles(2500, '5m') + with pytest.raises(OperationalException, match=r"This strategy requires 2500.*"): + ex.validate_required_startup_candles(2500, "5m") # Ensure the same also happens on init - default_conf['startup_candle_count'] = 6000 - with pytest.raises(OperationalException, match=r'This strategy requires 6000.*'): + default_conf["startup_candle_count"] = 6000 + with pytest.raises(OperationalException, match=r"This strategy requires 6000.*"): Exchange(default_conf) # Emulate kraken mode - ex._ft_has['ohlcv_has_history'] = False - with pytest.raises(OperationalException, - match=r'This strategy requires 2500.*, ' - r'which is more than the amount.*'): - ex.validate_required_startup_candles(2500, '5m') + ex._ft_has["ohlcv_has_history"] = False + with pytest.raises( + OperationalException, + match=r"This strategy requires 2500.*, " r"which is more than the amount.*", + ): + ex.validate_required_startup_candles(2500, "5m") def test_exchange_has(default_conf, mocker): exchange = get_patched_exchange(mocker, default_conf) - assert not exchange.exchange_has('ASDFASDF') + assert not exchange.exchange_has("ASDFASDF") api_mock = MagicMock() - type(api_mock).has = PropertyMock(return_value={'deadbeef': True}) + type(api_mock).has = PropertyMock(return_value={"deadbeef": True}) exchange = get_patched_exchange(mocker, default_conf, api_mock) assert exchange.exchange_has("deadbeef") - type(api_mock).has = PropertyMock(return_value={'deadbeef': False}) + type(api_mock).has = PropertyMock(return_value={"deadbeef": False}) exchange = get_patched_exchange(mocker, default_conf, api_mock) assert not exchange.exchange_has("deadbeef") - exchange._ft_has['exchange_has_overrides'] = {'deadbeef': True} + exchange._ft_has["exchange_has_overrides"] = {"deadbeef": True} assert exchange.exchange_has("deadbeef") -@pytest.mark.parametrize("side,leverage", [ - ("buy", 1), - ("buy", 5), - ("sell", 1.0), - ("sell", 5.0), -]) +@pytest.mark.parametrize( + "side,leverage", + [ + ("buy", 1), + ("buy", 5), + ("sell", 1.0), + ("sell", 5.0), + ], +) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_create_dry_run_order(default_conf, mocker, side, exchange_name, leverage): - default_conf['dry_run'] = True + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) order = exchange.create_dry_run_order( - pair='ETH/BTC', - ordertype='limit', - side=side, - amount=1, - rate=200, - leverage=leverage + pair="ETH/BTC", ordertype="limit", side=side, amount=1, rate=200, leverage=leverage ) - assert 'id' in order - assert f'dry_run_{side}_' in order["id"] + assert "id" in order + assert f"dry_run_{side}_" in order["id"] assert order["side"] == side assert order["type"] == "limit" assert order["symbol"] == "ETH/BTC" @@ -1099,18 +1170,24 @@ def test_create_dry_run_order(default_conf, mocker, side, exchange_name, leverag assert order["cost"] == 1 * 200 -@pytest.mark.parametrize('side,is_short,order_reason', [ - ("buy", False, "entry"), - ("sell", False, "exit"), - ("buy", True, "exit"), - ("sell", True, "entry"), -]) -@pytest.mark.parametrize("order_type,price_side,fee", [ - ("limit", "same", 1.0), - ("limit", "other", 2.0), - ("market", "same", 2.0), - ("market", "other", 2.0), -]) +@pytest.mark.parametrize( + "side,is_short,order_reason", + [ + ("buy", False, "entry"), + ("sell", False, "exit"), + ("buy", True, "exit"), + ("sell", True, "entry"), + ], +) +@pytest.mark.parametrize( + "order_type,price_side,fee", + [ + ("limit", "same", 1.0), + ("limit", "other", 2.0), + ("market", "same", 2.0), + ("market", "other", 2.0), + ], +) def test_create_dry_run_order_fees( default_conf, mocker, @@ -1121,66 +1198,75 @@ def test_create_dry_run_order_fees( price_side, fee, ): - mocker.patch( - f'{EXMS}.get_fee', - side_effect=lambda symbol, taker_or_maker: 2.0 if taker_or_maker == 'taker' else 1.0 - ) - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=price_side == 'other') exchange = get_patched_exchange(mocker, default_conf) + mocker.patch( + f"{EXMS}.get_fee", + side_effect=lambda symbol, taker_or_maker: 2.0 if taker_or_maker == "taker" else 1.0, + ) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=price_side == "other") order = exchange.create_dry_run_order( - pair='LTC/USDT', - ordertype=order_type, - side=side, - amount=10, - rate=2.0, - leverage=1.0 + pair="LTC/USDT", ordertype=order_type, side=side, amount=10, rate=2.0, leverage=1.0 ) - if price_side == 'other' or order_type == 'market': - assert order['fee']['rate'] == fee + if price_side == "other" or order_type == "market": + assert order["fee"]["rate"] == fee return else: - assert order['fee'] is None + assert order["fee"] is None - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=price_side != 'other') + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=price_side != "other") - order1 = exchange.fetch_dry_run_order(order['id']) - assert order1['fee']['rate'] == fee + order1 = exchange.fetch_dry_run_order(order["id"]) + assert order1["fee"]["rate"] == fee -@pytest.mark.parametrize("side,price,filled,converted", [ - # order_book_l2_usd spread: - # best ask: 25.566 - # best bid: 25.563 - ("buy", 25.563, False, False), - ("buy", 25.566, True, False), - ("sell", 25.566, False, False), - ("sell", 25.563, True, False), - ("buy", 29.563, True, True), - ("sell", 21.563, True, True), -]) +@pytest.mark.parametrize( + "side,price,filled,converted", + [ + # order_book_l2_usd spread: + # best ask: 25.566 + # best bid: 25.563 + ("buy", 25.563, False, False), + ("buy", 25.566, True, False), + ("sell", 25.566, False, False), + ("sell", 25.563, True, False), + ("buy", 29.563, True, True), + ("sell", 21.563, True, True), + ], +) @pytest.mark.parametrize("leverage", [1, 2, 5]) @pytest.mark.parametrize("exchange_name", EXCHANGES) -def test_create_dry_run_order_limit_fill(default_conf, mocker, side, price, filled, caplog, - exchange_name, order_book_l2_usd, converted, leverage): - default_conf['dry_run'] = True +def test_create_dry_run_order_limit_fill( + default_conf, + mocker, + side, + price, + filled, + caplog, + exchange_name, + order_book_l2_usd, + converted, + leverage, +): + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - mocker.patch.multiple(EXMS, - exchange_has=MagicMock(return_value=True), - fetch_l2_order_book=order_book_l2_usd, - ) + mocker.patch.multiple( + EXMS, + exchange_has=MagicMock(return_value=True), + fetch_l2_order_book=order_book_l2_usd, + ) order = exchange.create_order( - pair='LTC/USDT', - ordertype='limit', + pair="LTC/USDT", + ordertype="limit", side=side, amount=1, rate=price, leverage=leverage, ) assert order_book_l2_usd.call_count == 1 - assert 'id' in order - assert f'dry_run_{side}_' in order["id"] + assert "id" in order + assert f"dry_run_{side}_" in order["id"] assert order["side"] == side if not converted: assert order["average"] == price @@ -1192,110 +1278,108 @@ def test_create_dry_run_order_limit_fill(default_conf, mocker, side, price, fill assert log_has_re(r"Converted .* to market order.*", caplog) assert order["symbol"] == "LTC/USDT" - assert order['status'] == 'open' if not filled else 'closed' + assert order["status"] == "open" if not filled else "closed" order_book_l2_usd.reset_mock() # fetch order again... - order_closed = exchange.fetch_dry_run_order(order['id']) + order_closed = exchange.fetch_dry_run_order(order["id"]) assert order_book_l2_usd.call_count == (1 if not filled else 0) - assert order_closed['status'] == ('open' if not filled else 'closed') - assert order_closed['filled'] == (0 if not filled else 1) - assert order_closed['cost'] == 1 * order_closed['average'] + assert order_closed["status"] == ("open" if not filled else "closed") + assert order_closed["filled"] == (0 if not filled else 1) + assert order_closed["cost"] == 1 * order_closed["average"] order_book_l2_usd.reset_mock() # Empty orderbook test - mocker.patch(f'{EXMS}.fetch_l2_order_book', return_value={'asks': [], 'bids': []}) - exchange._dry_run_open_orders[order['id']]['status'] = 'open' - order_closed = exchange.fetch_dry_run_order(order['id']) + mocker.patch(f"{EXMS}.fetch_l2_order_book", return_value={"asks": [], "bids": []}) + exchange._dry_run_open_orders[order["id"]]["status"] = "open" + order_closed = exchange.fetch_dry_run_order(order["id"]) -@pytest.mark.parametrize("side,rate,amount,endprice", [ - # spread is 25.263-25.266 - ("buy", 25.564, 1, 25.566), - ("buy", 25.564, 100, 25.5672), # Requires interpolation - ("buy", 25.590, 100, 25.5672), # Price above spread ... average is lower - ("buy", 25.564, 1000, 25.575), # More than orderbook return - ("buy", 24.000, 100000, 25.200), # Run into max_slippage of 5% - ("sell", 25.564, 1, 25.563), - ("sell", 25.564, 100, 25.5625), # Requires interpolation - ("sell", 25.510, 100, 25.5625), # price below spread - average is higher - ("sell", 25.564, 1000, 25.5555), # More than orderbook return - ("sell", 27, 10000, 25.65), # max-slippage 5% -]) +@pytest.mark.parametrize( + "side,rate,amount,endprice", + [ + # spread is 25.263-25.266 + ("buy", 25.564, 1, 25.566), + ("buy", 25.564, 100, 25.5672), # Requires interpolation + ("buy", 25.590, 100, 25.5672), # Price above spread ... average is lower + ("buy", 25.564, 1000, 25.575), # More than orderbook return + ("buy", 24.000, 100000, 25.200), # Run into max_slippage of 5% + ("sell", 25.564, 1, 25.563), + ("sell", 25.564, 100, 25.5625), # Requires interpolation + ("sell", 25.510, 100, 25.5625), # price below spread - average is higher + ("sell", 25.564, 1000, 25.5555), # More than orderbook return + ("sell", 27, 10000, 25.65), # max-slippage 5% + ], +) @pytest.mark.parametrize("leverage", [1, 2, 5]) @pytest.mark.parametrize("exchange_name", EXCHANGES) -def test_create_dry_run_order_market_fill(default_conf, mocker, side, rate, amount, endprice, - exchange_name, order_book_l2_usd, leverage): - default_conf['dry_run'] = True +def test_create_dry_run_order_market_fill( + default_conf, mocker, side, rate, amount, endprice, exchange_name, order_book_l2_usd, leverage +): + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - mocker.patch.multiple(EXMS, - exchange_has=MagicMock(return_value=True), - fetch_l2_order_book=order_book_l2_usd, - ) + mocker.patch.multiple( + EXMS, + exchange_has=MagicMock(return_value=True), + fetch_l2_order_book=order_book_l2_usd, + ) order = exchange.create_order( - pair='LTC/USDT', - ordertype='market', + pair="LTC/USDT", + ordertype="market", side=side, amount=amount, rate=rate, leverage=leverage, ) - assert 'id' in order - assert f'dry_run_{side}_' in order["id"] + assert "id" in order + assert f"dry_run_{side}_" in order["id"] assert order["side"] == side assert order["type"] == "market" assert order["symbol"] == "LTC/USDT" - assert order['status'] == 'closed' - assert order['filled'] == amount - assert order['amount'] == amount - assert pytest.approx(order['cost']) == amount * order['average'] + assert order["status"] == "closed" + assert order["filled"] == amount + assert order["amount"] == amount + assert pytest.approx(order["cost"]) == amount * order["average"] assert round(order["average"], 4) == round(endprice, 4) @pytest.mark.parametrize("side", ["buy", "sell"]) -@pytest.mark.parametrize("ordertype,rate,marketprice", [ - ("market", None, None), - ("market", 200, True), - ("limit", 200, None), - ("stop_loss_limit", 200, None) -]) +@pytest.mark.parametrize( + "ordertype,rate,marketprice", + [ + ("market", None, None), + ("market", 200, True), + ("limit", 200, None), + ("stop_loss_limit", 200, None), + ], +) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_create_order(default_conf, mocker, side, ordertype, rate, marketprice, exchange_name): api_mock = MagicMock() - order_id = f'test_prod_{side}_{randint(0, 10 ** 6)}' + order_id = f"test_prod_{side}_{randint(0, 10 ** 6)}" api_mock.options = {} if not marketprice else {"createMarketBuyOrderRequiresPrice": True} - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - }, - 'symbol': 'XLTCUSDT', - 'amount': 1 - }) - default_conf['dry_run'] = False - default_conf['margin_mode'] = 'isolated' - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) + api_mock.create_order = MagicMock( + return_value={"id": order_id, "info": {"foo": "bar"}, "symbol": "XLTCUSDT", "amount": 1} + ) + default_conf["dry_run"] = False + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange._set_leverage = MagicMock() exchange.set_margin_mode = MagicMock() order = exchange.create_order( - pair='XLTCUSDT', - ordertype=ordertype, - side=side, - amount=1, - rate=rate, - leverage=1.0 + pair="XLTCUSDT", ordertype=ordertype, side=side, amount=1, rate=rate, leverage=1.0 ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert order['amount'] == 1 - assert api_mock.create_order.call_args[0][0] == 'XLTCUSDT' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert order["amount"] == 1 + assert api_mock.create_order.call_args[0][0] == "XLTCUSDT" assert api_mock.create_order.call_args[0][1] == ordertype assert api_mock.create_order.call_args[0][2] == side assert api_mock.create_order.call_args[0][3] == 1 @@ -1303,76 +1387,79 @@ def test_create_order(default_conf, mocker, side, ordertype, rate, marketprice, assert exchange._set_leverage.call_count == 0 assert exchange.set_margin_mode.call_count == 0 - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - }, - 'symbol': 'ADA/USDT:USDT', - 'amount': 1 - }) + api_mock.create_order = MagicMock( + return_value={ + "id": order_id, + "info": {"foo": "bar"}, + "symbol": "ADA/USDT:USDT", + "amount": 1, + } + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange.trading_mode = TradingMode.FUTURES exchange._set_leverage = MagicMock() exchange.set_margin_mode = MagicMock() order = exchange.create_order( - pair='ADA/USDT:USDT', - ordertype=ordertype, - side=side, - amount=1, - rate=200, - leverage=3.0 + pair="ADA/USDT:USDT", ordertype=ordertype, side=side, amount=1, rate=200, leverage=3.0 ) - if exchange_name != 'okx': + if exchange_name != "okx": assert exchange._set_leverage.call_count == 1 assert exchange.set_margin_mode.call_count == 1 else: assert api_mock.set_leverage.call_count == 1 - assert order['amount'] == 0.01 + assert order["amount"] == 0.01 @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_buy_dry_run(default_conf, mocker, exchange_name): - default_conf['dry_run'] = True + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - order = exchange.create_order(pair='ETH/BTC', ordertype='limit', side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force='gtc') - assert 'id' in order - assert 'dry_run_buy_' in order['id'] + order = exchange.create_order( + pair="ETH/BTC", + ordertype="limit", + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force="gtc", + ) + assert "id" in order + assert "dry_run_buy_" in order["id"] @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_buy_prod(default_conf, mocker, exchange_name): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' - order_type = 'market' - time_in_force = 'gtc' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" + order_type = "market" + time_in_force = "gtc" api_mock.options = {} - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'symbol': 'ETH/BTC', - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) + api_mock.create_order = MagicMock( + return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}} + ) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + order = exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'buy' + assert api_mock.create_order.call_args[0][2] == "buy" assert api_mock.create_order.call_args[0][3] == 1 if exchange._order_needs_price(order_type): assert api_mock.create_order.call_args[0][4] == 200 @@ -1380,19 +1467,19 @@ def test_buy_prod(default_conf, mocker, exchange_name): assert api_mock.create_order.call_args[0][4] is None api_mock.create_order.reset_mock() - order_type = 'limit' + order_type = "limit" order = exchange.create_order( - pair='ETH/BTC', + pair="ETH/BTC", ordertype=order_type, side="buy", amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force + time_in_force=time_in_force, ) - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'buy' + assert api_mock.create_order.call_args[0][2] == "buy" assert api_mock.create_order.call_args[0][3] == 1 assert api_mock.create_order.call_args[0][4] == 200 @@ -1400,88 +1487,126 @@ def test_buy_prod(default_conf, mocker, exchange_name): with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("Not enough funds")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype='limit', side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + exchange.create_order( + pair="ETH/BTC", + ordertype="limit", + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype='market', side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + exchange.create_order( + pair="ETH/BTC", + ordertype="market", + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) with pytest.raises(TemporaryError): api_mock.create_order = MagicMock(side_effect=ccxt.NetworkError("Network disconnect")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) with pytest.raises(OperationalException): api_mock.create_order = MagicMock(side_effect=ccxt.BaseError("Unknown error")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_buy_considers_time_in_force(default_conf, mocker, exchange_name): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" api_mock.options = {} - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'symbol': 'ETH/BTC', - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) + api_mock.create_order = MagicMock( + return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}} + ) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - order_type = 'limit' - time_in_force = 'ioc' + order_type = "limit" + time_in_force = "ioc" - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + order = exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) - assert 'id' in order - assert 'info' in order - assert order['status'] == 'open' - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["status"] == "open" + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'buy' + assert api_mock.create_order.call_args[0][2] == "buy" assert api_mock.create_order.call_args[0][3] == 1 assert api_mock.create_order.call_args[0][4] == 200 assert "timeInForce" in api_mock.create_order.call_args[0][5] assert api_mock.create_order.call_args[0][5]["timeInForce"] == time_in_force.upper() - order_type = 'market' - time_in_force = 'ioc' + order_type = "market" + time_in_force = "ioc" - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="buy", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + order = exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="buy", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'buy' + assert api_mock.create_order.call_args[0][2] == "buy" assert api_mock.create_order.call_args[0][3] == 1 if exchange._order_needs_price(order_type): assert api_mock.create_order.call_args[0][4] == 200 @@ -1492,43 +1617,41 @@ def test_buy_considers_time_in_force(default_conf, mocker, exchange_name): def test_sell_dry_run(default_conf, mocker): - default_conf['dry_run'] = True + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf) - order = exchange.create_order(pair='ETH/BTC', ordertype='limit', - side="sell", amount=1, rate=200, leverage=1.0) - assert 'id' in order - assert 'dry_run_sell_' in order['id'] + order = exchange.create_order( + pair="ETH/BTC", ordertype="limit", side="sell", amount=1, rate=200, leverage=1.0 + ) + assert "id" in order + assert "dry_run_sell_" in order["id"] @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_sell_prod(default_conf, mocker, exchange_name): api_mock = MagicMock() - order_id = f'test_prod_sell_{randint(0, 10 ** 6)}' - order_type = 'market' + order_id = f"test_prod_sell_{randint(0, 10 ** 6)}" + order_type = "market" api_mock.options = {} - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'symbol': 'ETH/BTC', - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False + api_mock.create_order = MagicMock( + return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}} + ) + default_conf["dry_run"] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, - side="sell", amount=1, rate=200, leverage=1.0) + order = exchange.create_order( + pair="ETH/BTC", ordertype=order_type, side="sell", amount=1, rate=200, leverage=1.0 + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'sell' + assert api_mock.create_order.call_args[0][2] == "sell" assert api_mock.create_order.call_args[0][3] == 1 if exchange._order_needs_price(order_type): assert api_mock.create_order.call_args[0][4] == 200 @@ -1536,13 +1659,13 @@ def test_sell_prod(default_conf, mocker, exchange_name): assert api_mock.create_order.call_args[0][4] is None api_mock.create_order.reset_mock() - order_type = 'limit' - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, - side="sell", amount=1, rate=200, - leverage=1.0) - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + order_type = "limit" + order = exchange.create_order( + pair="ETH/BTC", ordertype=order_type, side="sell", amount=1, rate=200, leverage=1.0 + ) + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'sell' + assert api_mock.create_order.call_args[0][2] == "sell" assert api_mock.create_order.call_args[0][3] == 1 assert api_mock.create_order.call_args[0][4] == 200 @@ -1550,82 +1673,95 @@ def test_sell_prod(default_conf, mocker, exchange_name): with pytest.raises(InsufficientFundsError): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="sell", amount=1, rate=200, - leverage=1.0) + exchange.create_order( + pair="ETH/BTC", ordertype=order_type, side="sell", amount=1, rate=200, leverage=1.0 + ) with pytest.raises(InvalidOrderException): api_mock.create_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype='limit', side="sell", amount=1, rate=200, - leverage=1.0) + exchange.create_order( + pair="ETH/BTC", ordertype="limit", side="sell", amount=1, rate=200, leverage=1.0 + ) # Market orders don't require price, so the behaviour is slightly different with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype='market', side="sell", amount=1, rate=200, - leverage=1.0) + exchange.create_order( + pair="ETH/BTC", ordertype="market", side="sell", amount=1, rate=200, leverage=1.0 + ) with pytest.raises(TemporaryError): api_mock.create_order = MagicMock(side_effect=ccxt.NetworkError("No Connection")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="sell", amount=1, rate=200, - leverage=1.0) + exchange.create_order( + pair="ETH/BTC", ordertype=order_type, side="sell", amount=1, rate=200, leverage=1.0 + ) with pytest.raises(OperationalException): api_mock.create_order = MagicMock(side_effect=ccxt.BaseError("DeadBeef")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="sell", amount=1, rate=200, - leverage=1.0) + exchange.create_order( + pair="ETH/BTC", ordertype=order_type, side="sell", amount=1, rate=200, leverage=1.0 + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_sell_considers_time_in_force(default_conf, mocker, exchange_name): api_mock = MagicMock() - order_id = f'test_prod_sell_{randint(0, 10 ** 6)}' - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'symbol': 'ETH/BTC', - 'info': { - 'foo': 'bar' - } - }) + order_id = f"test_prod_sell_{randint(0, 10 ** 6)}" + api_mock.create_order = MagicMock( + return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}} + ) api_mock.options = {} - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - order_type = 'limit' - time_in_force = 'ioc' + order_type = "limit" + time_in_force = "ioc" - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="sell", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + order = exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="sell", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'sell' + assert api_mock.create_order.call_args[0][2] == "sell" assert api_mock.create_order.call_args[0][3] == 1 assert api_mock.create_order.call_args[0][4] == 200 assert "timeInForce" in api_mock.create_order.call_args[0][5] assert api_mock.create_order.call_args[0][5]["timeInForce"] == time_in_force.upper() - order_type = 'market' - time_in_force = 'IOC' - order = exchange.create_order(pair='ETH/BTC', ordertype=order_type, side="sell", - amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force) + order_type = "market" + time_in_force = "IOC" + order = exchange.create_order( + pair="ETH/BTC", + ordertype=order_type, + side="sell", + amount=1, + rate=200, + leverage=1.0, + time_in_force=time_in_force, + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'sell' + assert api_mock.create_order.call_args[0][2] == "sell" assert api_mock.create_order.call_args[0][3] == 1 if exchange._order_needs_price(order_type): assert api_mock.create_order.call_args[0][4] == 200 @@ -1637,116 +1773,123 @@ def test_sell_considers_time_in_force(default_conf, mocker, exchange_name): @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_balances_prod(default_conf, mocker, exchange_name): - balance_item = { - 'free': 10.0, - 'total': 10.0, - 'used': 0.0 - } + balance_item = {"free": 10.0, "total": 10.0, "used": 0.0} api_mock = MagicMock() - api_mock.fetch_balance = MagicMock(return_value={ - '1ST': balance_item, - '2ST': balance_item, - '3ST': balance_item - }) - default_conf['dry_run'] = False + api_mock.fetch_balance = MagicMock( + return_value={"1ST": balance_item, "2ND": balance_item, "3RD": balance_item} + ) + default_conf["dry_run"] = False exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert len(exchange.get_balances()) == 3 - assert exchange.get_balances()['1ST']['free'] == 10.0 - assert exchange.get_balances()['1ST']['total'] == 10.0 - assert exchange.get_balances()['1ST']['used'] == 0.0 + assert exchange.get_balances()["1ST"]["free"] == 10.0 + assert exchange.get_balances()["1ST"]["total"] == 10.0 + assert exchange.get_balances()["1ST"]["used"] == 0.0 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "get_balances", "fetch_balance") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "get_balances", "fetch_balance" + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_positions(default_conf, mocker, exchange_name): - mocker.patch(f'{EXMS}.validate_trading_mode_and_margin_mode') + mocker.patch(f"{EXMS}.validate_trading_mode_and_margin_mode") api_mock = MagicMock() - api_mock.fetch_positions = MagicMock(return_value=[ - {'symbol': 'ETH/USDT:USDT', 'leverage': 5}, - {'symbol': 'XRP/USDT:USDT', 'leverage': 5}, - ]) + api_mock.fetch_positions = MagicMock( + return_value=[ + {"symbol": "ETH/USDT:USDT", "leverage": 5}, + {"symbol": "XRP/USDT:USDT", "leverage": 5}, + ] + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert exchange.fetch_positions() == [] - default_conf['dry_run'] = False - default_conf['trading_mode'] = 'futures' + default_conf["dry_run"] = False + default_conf["trading_mode"] = "futures" exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) res = exchange.fetch_positions() assert len(res) == 2 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "fetch_positions", "fetch_positions") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "fetch_positions", "fetch_positions" + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_orders(default_conf, mocker, exchange_name, limit_order): - api_mock = MagicMock() - api_mock.fetch_orders = MagicMock(return_value=[ - limit_order['buy'], - limit_order['sell'], - ]) - api_mock.fetch_open_orders = MagicMock(return_value=[limit_order['buy']]) - api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order['buy']]) + api_mock.fetch_orders = MagicMock( + return_value=[ + limit_order["buy"], + limit_order["sell"], + ] + ) + api_mock.fetch_open_orders = MagicMock(return_value=[limit_order["buy"]]) + api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order["buy"]]) - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) start_time = datetime.now(timezone.utc) - timedelta(days=20) expected = 1 - if exchange_name == 'bybit': + if exchange_name == "bybit": expected = 3 exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) # Not available in dry-run - assert exchange.fetch_orders('mocked', start_time) == [] + assert exchange.fetch_orders("mocked", start_time) == [] assert api_mock.fetch_orders.call_count == 0 - default_conf['dry_run'] = False + default_conf["dry_run"] = False exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - res = exchange.fetch_orders('mocked', start_time) + res = exchange.fetch_orders("mocked", start_time) assert api_mock.fetch_orders.call_count == expected assert api_mock.fetch_open_orders.call_count == 0 assert api_mock.fetch_closed_orders.call_count == 0 assert len(res) == 2 * expected - res = exchange.fetch_orders('mocked', start_time) + res = exchange.fetch_orders("mocked", start_time) api_mock.fetch_orders.reset_mock() def has_resp(_, endpoint): - if endpoint == 'fetchOrders': + if endpoint == "fetchOrders": return False - if endpoint == 'fetchClosedOrders': + if endpoint == "fetchClosedOrders": return True - if endpoint == 'fetchOpenOrders': + if endpoint == "fetchOpenOrders": return True - if exchange_name == 'okx': + if exchange_name == "okx": # Special OKX case is tested separately return - mocker.patch(f'{EXMS}.exchange_has', has_resp) + mocker.patch(f"{EXMS}.exchange_has", has_resp) # happy path without fetchOrders - exchange.fetch_orders('mocked', start_time) + exchange.fetch_orders("mocked", start_time) assert api_mock.fetch_orders.call_count == 0 assert api_mock.fetch_open_orders.call_count == expected assert api_mock.fetch_closed_orders.call_count == expected - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "fetch_orders", "fetch_orders", retries=1, - pair='mocked', since=start_time) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "fetch_orders", + "fetch_orders", + retries=1, + pair="mocked", + since=start_time, + ) # Unhappy path - first fetch-orders call fails. api_mock.fetch_orders = MagicMock(side_effect=ccxt.NotSupported()) api_mock.fetch_open_orders.reset_mock() api_mock.fetch_closed_orders.reset_mock() - exchange.fetch_orders('mocked', start_time) + exchange.fetch_orders("mocked", start_time) assert api_mock.fetch_orders.call_count == expected assert api_mock.fetch_open_orders.call_count == expected @@ -1756,86 +1899,95 @@ def test_fetch_orders(default_conf, mocker, exchange_name, limit_order): def test_fetch_trading_fees(default_conf, mocker): api_mock = MagicMock() tick = { - '1INCH/USDT:USDT': { - 'info': {'user_id': '', - 'taker_fee': '0.0018', - 'maker_fee': '0.0018', - 'gt_discount': False, - 'gt_taker_fee': '0', - 'gt_maker_fee': '0', - 'loan_fee': '0.18', - 'point_type': '1', - 'futures_taker_fee': '0.0005', - 'futures_maker_fee': '0'}, - 'symbol': '1INCH/USDT:USDT', - 'maker': 0.0, - 'taker': 0.0005}, - 'ETH/USDT:USDT': { - 'info': {'user_id': '', - 'taker_fee': '0.0018', - 'maker_fee': '0.0018', - 'gt_discount': False, - 'gt_taker_fee': '0', - 'gt_maker_fee': '0', - 'loan_fee': '0.18', - 'point_type': '1', - 'futures_taker_fee': '0.0005', - 'futures_maker_fee': '0'}, - 'symbol': 'ETH/USDT:USDT', - 'maker': 0.0, - 'taker': 0.0005} + "1INCH/USDT:USDT": { + "info": { + "user_id": "", + "taker_fee": "0.0018", + "maker_fee": "0.0018", + "gt_discount": False, + "gt_taker_fee": "0", + "gt_maker_fee": "0", + "loan_fee": "0.18", + "point_type": "1", + "futures_taker_fee": "0.0005", + "futures_maker_fee": "0", + }, + "symbol": "1INCH/USDT:USDT", + "maker": 0.0, + "taker": 0.0005, + }, + "ETH/USDT:USDT": { + "info": { + "user_id": "", + "taker_fee": "0.0018", + "maker_fee": "0.0018", + "gt_discount": False, + "gt_taker_fee": "0", + "gt_maker_fee": "0", + "loan_fee": "0.18", + "point_type": "1", + "futures_taker_fee": "0.0005", + "futures_maker_fee": "0", + }, + "symbol": "ETH/USDT:USDT", + "maker": 0.0, + "taker": 0.0005, + }, } - exchange_name = 'gate' - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + exchange_name = "gate" + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED api_mock.fetch_trading_fees = MagicMock(return_value=tick) - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert '1INCH/USDT:USDT' in exchange._trading_fees - assert 'ETH/USDT:USDT' in exchange._trading_fees + assert "1INCH/USDT:USDT" in exchange._trading_fees + assert "ETH/USDT:USDT" in exchange._trading_fees assert api_mock.fetch_trading_fees.call_count == 1 api_mock.fetch_trading_fees.reset_mock() - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "fetch_trading_fees", "fetch_trading_fees") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "fetch_trading_fees", "fetch_trading_fees" + ) api_mock.fetch_trading_fees = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange.fetch_trading_fees() - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) assert exchange.fetch_trading_fees() == {} def test_fetch_bids_asks(default_conf, mocker): api_mock = MagicMock() - tick = {'ETH/BTC': { - 'symbol': 'ETH/BTC', - 'bid': 0.5, - 'ask': 1, - 'last': 42, - }, 'BCH/BTC': { - 'symbol': 'BCH/BTC', - 'bid': 0.6, - 'ask': 0.5, - 'last': 41, + tick = { + "ETH/BTC": { + "symbol": "ETH/BTC", + "bid": 0.5, + "ask": 1, + "last": 42, + }, + "BCH/BTC": { + "symbol": "BCH/BTC", + "bid": 0.6, + "ask": 0.5, + "last": 41, + }, } - } - exchange_name = 'binance' + exchange_name = "binance" api_mock.fetch_bids_asks = MagicMock(return_value=tick) - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) # retrieve original ticker bidsasks = exchange.fetch_bids_asks() - assert 'ETH/BTC' in bidsasks - assert 'BCH/BTC' in bidsasks - assert bidsasks['ETH/BTC']['bid'] == 0.5 - assert bidsasks['ETH/BTC']['ask'] == 1 - assert bidsasks['BCH/BTC']['bid'] == 0.6 - assert bidsasks['BCH/BTC']['ask'] == 0.5 + assert "ETH/BTC" in bidsasks + assert "BCH/BTC" in bidsasks + assert bidsasks["ETH/BTC"]["bid"] == 0.5 + assert bidsasks["ETH/BTC"]["ask"] == 1 + assert bidsasks["BCH/BTC"]["bid"] == 0.6 + assert bidsasks["BCH/BTC"]["ask"] == 0.5 assert api_mock.fetch_bids_asks.call_count == 1 api_mock.fetch_bids_asks.reset_mock() @@ -1847,8 +1999,9 @@ def test_fetch_bids_asks(default_conf, mocker): tickers2 = exchange.fetch_bids_asks(cached=False) assert api_mock.fetch_bids_asks.call_count == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "fetch_bids_asks", "fetch_bids_asks") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "fetch_bids_asks", "fetch_bids_asks" + ) with pytest.raises(OperationalException): api_mock.fetch_bids_asks = MagicMock(side_effect=ccxt.NotSupported("DeadBeef")) @@ -1858,38 +2011,40 @@ def test_fetch_bids_asks(default_conf, mocker): api_mock.fetch_bids_asks = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange.fetch_bids_asks() - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) assert exchange.fetch_bids_asks() == {} @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_tickers(default_conf, mocker, exchange_name, caplog): api_mock = MagicMock() - tick = {'ETH/BTC': { - 'symbol': 'ETH/BTC', - 'bid': 0.5, - 'ask': 1, - 'last': 42, - }, 'BCH/BTC': { - 'symbol': 'BCH/BTC', - 'bid': 0.6, - 'ask': 0.5, - 'last': 41, + tick = { + "ETH/BTC": { + "symbol": "ETH/BTC", + "bid": 0.5, + "ask": 1, + "last": 42, + }, + "BCH/BTC": { + "symbol": "BCH/BTC", + "bid": 0.6, + "ask": 0.5, + "last": 41, + }, } - } - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) api_mock.fetch_tickers = MagicMock(return_value=tick) api_mock.fetch_bids_asks = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) # retrieve original ticker tickers = exchange.get_tickers() - assert 'ETH/BTC' in tickers - assert 'BCH/BTC' in tickers - assert tickers['ETH/BTC']['bid'] == 0.5 - assert tickers['ETH/BTC']['ask'] == 1 - assert tickers['BCH/BTC']['bid'] == 0.6 - assert tickers['BCH/BTC']['ask'] == 0.5 + assert "ETH/BTC" in tickers + assert "BCH/BTC" in tickers + assert tickers["ETH/BTC"]["bid"] == 0.5 + assert tickers["ETH/BTC"]["ask"] == 1 + assert tickers["BCH/BTC"]["bid"] == 0.6 + assert tickers["BCH/BTC"]["ask"] == 0.5 assert api_mock.fetch_tickers.call_count == 1 assert api_mock.fetch_bids_asks.call_count == 0 @@ -1904,8 +2059,9 @@ def test_get_tickers(default_conf, mocker, exchange_name, caplog): assert api_mock.fetch_tickers.call_count == 1 assert api_mock.fetch_bids_asks.call_count == 0 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "get_tickers", "fetch_tickers") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "get_tickers", "fetch_tickers" + ) with pytest.raises(OperationalException): api_mock.fetch_tickers = MagicMock(side_effect=ccxt.NotSupported("DeadBeef")) @@ -1917,7 +2073,7 @@ def test_get_tickers(default_conf, mocker, exchange_name, caplog): exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) x = exchange.get_tickers() assert x == [] - assert log_has_re(r'Could not load tickers due to BadSymbol\..*SomeSymbol', caplog) + assert log_has_re(r"Could not load tickers due to BadSymbol\..*SomeSymbol", caplog) caplog.clear() api_mock.fetch_tickers = MagicMock(return_value={}) @@ -1926,18 +2082,18 @@ def test_get_tickers(default_conf, mocker, exchange_name, caplog): api_mock.fetch_tickers.reset_mock() api_mock.fetch_bids_asks.reset_mock() - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange.get_tickers() assert api_mock.fetch_tickers.call_count == 1 - assert api_mock.fetch_bids_asks.call_count == (1 if exchange_name == 'binance' else 0) + assert api_mock.fetch_bids_asks.call_count == (1 if exchange_name == "binance" else 0) api_mock.fetch_tickers.reset_mock() api_mock.fetch_bids_asks.reset_mock() - mocker.patch(f'{EXMS}.exchange_has', return_value=False) + mocker.patch(f"{EXMS}.exchange_has", return_value=False) assert exchange.get_tickers() == {} @@ -1945,80 +2101,86 @@ def test_get_tickers(default_conf, mocker, exchange_name, caplog): def test_fetch_ticker(default_conf, mocker, exchange_name): api_mock = MagicMock() tick = { - 'symbol': 'ETH/BTC', - 'bid': 0.00001098, - 'ask': 0.00001099, - 'last': 0.0001, + "symbol": "ETH/BTC", + "bid": 0.00001098, + "ask": 0.00001099, + "last": 0.0001, } api_mock.fetch_ticker = MagicMock(return_value=tick) - api_mock.markets = {'ETH/BTC': {'active': True}} + api_mock.markets = {"ETH/BTC": {"active": True}} exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) # retrieve original ticker - ticker = exchange.fetch_ticker(pair='ETH/BTC') + ticker = exchange.fetch_ticker(pair="ETH/BTC") - assert ticker['bid'] == 0.00001098 - assert ticker['ask'] == 0.00001099 + assert ticker["bid"] == 0.00001098 + assert ticker["ask"] == 0.00001099 # change the ticker tick = { - 'symbol': 'ETH/BTC', - 'bid': 0.5, - 'ask': 1, - 'last': 42, + "symbol": "ETH/BTC", + "bid": 0.5, + "ask": 1, + "last": 42, } api_mock.fetch_ticker = MagicMock(return_value=tick) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) # if not caching the result we should get the same ticker # if not fetching a new result we should get the cached ticker - ticker = exchange.fetch_ticker(pair='ETH/BTC') + ticker = exchange.fetch_ticker(pair="ETH/BTC") assert api_mock.fetch_ticker.call_count == 1 - assert ticker['bid'] == 0.5 - assert ticker['ask'] == 1 + assert ticker["bid"] == 0.5 + assert ticker["ask"] == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "fetch_ticker", "fetch_ticker", - pair='ETH/BTC') + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "fetch_ticker", + "fetch_ticker", + pair="ETH/BTC", + ) api_mock.fetch_ticker = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_ticker(pair='ETH/BTC') + exchange.fetch_ticker(pair="ETH/BTC") - with pytest.raises(DependencyException, match=r'Pair XRP/ETH not available'): - exchange.fetch_ticker(pair='XRP/ETH') + with pytest.raises(DependencyException, match=r"Pair XRP/ETH not available"): + exchange.fetch_ticker(pair="XRP/ETH") @pytest.mark.parametrize("exchange_name", EXCHANGES) def test___now_is_time_to_refresh(default_conf, mocker, exchange_name, time_machine): exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - pair = 'BTC/USDT' + pair = "BTC/USDT" candle_type = CandleType.SPOT start_dt = datetime(2023, 12, 1, 0, 10, 0, tzinfo=timezone.utc) time_machine.move_to(start_dt, tick=False) - assert (pair, '5m', candle_type) not in exchange._pairs_last_refresh_time + assert (pair, "5m", candle_type) not in exchange._pairs_last_refresh_time # not refreshed yet - assert exchange._now_is_time_to_refresh(pair, '5m', candle_type) is True + assert exchange._now_is_time_to_refresh(pair, "5m", candle_type) is True last_closed_candle = (start_dt - timedelta(minutes=5)).timestamp() - exchange._pairs_last_refresh_time[(pair, '5m', candle_type)] = last_closed_candle + exchange._pairs_last_refresh_time[(pair, "5m", candle_type)] = last_closed_candle # next candle not closed yet time_machine.move_to(start_dt + timedelta(minutes=4, seconds=59), tick=False) - assert exchange._now_is_time_to_refresh(pair, '5m', candle_type) is False + assert exchange._now_is_time_to_refresh(pair, "5m", candle_type) is False # next candle closed time_machine.move_to(start_dt + timedelta(minutes=5, seconds=0), tick=False) - assert exchange._now_is_time_to_refresh(pair, '5m', candle_type) is True + assert exchange._now_is_time_to_refresh(pair, "5m", candle_type) is True # 1 second later (last_refresh_time didn't change) time_machine.move_to(start_dt + timedelta(minutes=5, seconds=1), tick=False) - assert exchange._now_is_time_to_refresh(pair, '5m', candle_type) is True + assert exchange._now_is_time_to_refresh(pair, "5m", candle_type) is True @pytest.mark.parametrize("exchange_name", EXCHANGES) -@pytest.mark.parametrize('candle_type', ['mark', '']) +@pytest.mark.parametrize("candle_type", ["mark", ""]) def test_get_historic_ohlcv(default_conf, mocker, caplog, exchange_name, candle_type): exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) ohlcv = [ @@ -2031,7 +2193,7 @@ def test_get_historic_ohlcv(default_conf, mocker, caplog, exchange_name, candle_ 5, # volume (in quote currency) ] ] - pair = 'ETH/BTC' + pair = "ETH/BTC" async def mock_candle_hist(pair, timeframe, candle_type, since_ms): return pair, timeframe, candle_type, ohlcv, True @@ -2039,18 +2201,15 @@ def test_get_historic_ohlcv(default_conf, mocker, caplog, exchange_name, candle_ exchange._async_get_candle_history = Mock(wraps=mock_candle_hist) # one_call calculation * 1.8 should do 2 calls - since = 5 * 60 * exchange.ohlcv_candle_limit('5m', candle_type) * 1.8 + since = 5 * 60 * exchange.ohlcv_candle_limit("5m", candle_type) * 1.8 ret = exchange.get_historic_ohlcv( - pair, - "5m", - dt_ts(dt_now() - timedelta(seconds=since)), - candle_type=candle_type + pair, "5m", dt_ts(dt_now() - timedelta(seconds=since)), candle_type=candle_type ) assert exchange._async_get_candle_history.call_count == 2 # Returns twice the above OHLCV data assert len(ret) == 2 - assert log_has_re(r'Downloaded data for .* with length .*\.', caplog) + assert log_has_re(r"Downloaded data for .* with length .*\.", caplog) caplog.clear() @@ -2059,17 +2218,14 @@ def test_get_historic_ohlcv(default_conf, mocker, caplog, exchange_name, candle_ exchange._async_get_candle_history = MagicMock(side_effect=mock_get_candle_hist_error) ret = exchange.get_historic_ohlcv( - pair, - "5m", - dt_ts(dt_now() - timedelta(seconds=since)), - candle_type=candle_type + pair, "5m", dt_ts(dt_now() - timedelta(seconds=since)), candle_type=candle_type ) assert log_has_re(r"Async code raised an exception: .*", caplog) @pytest.mark.asyncio @pytest.mark.parametrize("exchange_name", EXCHANGES) -@pytest.mark.parametrize('candle_type', [CandleType.MARK, CandleType.SPOT]) +@pytest.mark.parametrize("candle_type", [CandleType.MARK, CandleType.SPOT]) async def test__async_get_historic_ohlcv(default_conf, mocker, caplog, exchange_name, candle_type): ohlcv = [ [ @@ -2085,11 +2241,12 @@ async def test__async_get_historic_ohlcv(default_conf, mocker, caplog, exchange_ # Monkey-patch async function exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - pair = 'ETH/USDT' + pair = "ETH/USDT" respair, restf, _, res, _ = await exchange._async_get_historic_ohlcv( - pair, "5m", 1500000000000, candle_type=candle_type, is_new_pair=False) + pair, "5m", 1500000000000, candle_type=candle_type, is_new_pair=False + ) assert respair == pair - assert restf == '5m' + assert restf == "5m" # Call with very old timestamp - causes tons of requests assert exchange._api_async.fetch_ohlcv.call_count > 200 assert res[0] == ohlcv[0] @@ -2098,18 +2255,17 @@ async def test__async_get_historic_ohlcv(default_conf, mocker, caplog, exchange_ end_ts = 1_500_500_000_000 start_ts = 1_500_000_000_000 respair, restf, _, res, _ = await exchange._async_get_historic_ohlcv( - pair, "5m", since_ms=start_ts, candle_type=candle_type, is_new_pair=False, - until_ms=end_ts - ) + pair, "5m", since_ms=start_ts, candle_type=candle_type, is_new_pair=False, until_ms=end_ts + ) # Required candles candles = (end_ts - start_ts) / 300_000 - exp = candles // exchange.ohlcv_candle_limit('5m', candle_type, start_ts) + 1 + exp = candles // exchange.ohlcv_candle_limit("5m", candle_type, start_ts) + 1 # Depending on the exchange, this should be called between 1 and 6 times. assert exchange._api_async.fetch_ohlcv.call_count == exp -@pytest.mark.parametrize('candle_type', [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT]) +@pytest.mark.parametrize("candle_type", [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT]) def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None: ohlcv = [ [ @@ -2127,14 +2283,14 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None 4, # low 6, # close 5, # volume (in quote currency) - ] + ], ] caplog.set_level(logging.DEBUG) exchange = get_patched_exchange(mocker, default_conf) exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - pairs = [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)] + pairs = [("IOTA/ETH", "5m", candle_type), ("XRP/ETH", "5m", candle_type)] # empty dicts assert not exchange._klines res = exchange.refresh_latest_ohlcv(pairs, cache=False) @@ -2149,7 +2305,7 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None res = exchange.refresh_latest_ohlcv(pairs) assert len(res) == len(pairs) - assert log_has(f'Refreshing candle (OHLCV) data for {len(pairs)} pairs', caplog) + assert log_has(f"Refreshing candle (OHLCV) data for {len(pairs)} pairs", caplog) assert exchange._klines assert exchange._api_async.fetch_ohlcv.call_count == 4 exchange._api_async.fetch_ohlcv.reset_mock() @@ -2166,18 +2322,21 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None # test caching res = exchange.refresh_latest_ohlcv( - [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)]) + [("IOTA/ETH", "5m", candle_type), ("XRP/ETH", "5m", candle_type)] + ) assert len(res) == len(pairs) assert exchange._api_async.fetch_ohlcv.call_count == 0 - assert log_has(f"Using cached candle (OHLCV) data for {pairs[0][0]}, " - f"{pairs[0][1]}, {candle_type} ...", - caplog) + assert log_has( + f"Using cached candle (OHLCV) data for {pairs[0][0]}, {pairs[0][1]}, {candle_type} ...", + caplog, + ) caplog.clear() # Reset refresh times - must do 2 call per pair as cache is expired exchange._pairs_last_refresh_time = {} res = exchange.refresh_latest_ohlcv( - [('IOTA/ETH', '5m', candle_type), ('XRP/ETH', '5m', candle_type)]) + [("IOTA/ETH", "5m", candle_type), ("XRP/ETH", "5m", candle_type)] + ) assert len(res) == len(pairs) assert exchange._api_async.fetch_ohlcv.call_count == 4 @@ -2187,9 +2346,10 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None exchange.required_candle_call_count = 1 pairlist = [ - ('IOTA/ETH', '5m', candle_type), - ('XRP/ETH', '5m', candle_type), - ('XRP/ETH', '1d', candle_type)] + ("IOTA/ETH", "5m", candle_type), + ("XRP/ETH", "5m", candle_type), + ("XRP/ETH", "1d", candle_type), + ] res = exchange.refresh_latest_ohlcv(pairlist, cache=False) assert len(res) == 3 assert exchange._api_async.fetch_ohlcv.call_count == 3 @@ -2203,19 +2363,19 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None caplog.clear() # Call with invalid timeframe - res = exchange.refresh_latest_ohlcv([('IOTA/ETH', '3m', candle_type)], cache=False) + res = exchange.refresh_latest_ohlcv([("IOTA/ETH", "3m", candle_type)], cache=False) if candle_type != CandleType.MARK: assert not res assert len(res) == 0 - assert log_has_re(r'Cannot download \(IOTA\/ETH, 3m\).*', caplog) + assert log_has_re(r"Cannot download \(IOTA\/ETH, 3m\).*", caplog) else: assert len(res) == 1 -@pytest.mark.parametrize('candle_type', [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT]) +@pytest.mark.parametrize("candle_type", [CandleType.FUTURES, CandleType.MARK, CandleType.SPOT]) def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_machine) -> None: start = datetime(2021, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc) - ohlcv = generate_test_data_raw('1h', 100, start.strftime('%Y-%m-%d')) + ohlcv = generate_test_data_raw("1h", 100, start.strftime("%Y-%m-%d")) time_machine.move_to(start + timedelta(hours=99, minutes=30)) exchange = get_patched_exchange(mocker, default_conf) @@ -2223,8 +2383,8 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach assert exchange._startup_candle_count == 0 exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - pair1 = ('IOTA/ETH', '1h', candle_type) - pair2 = ('XRP/ETH', '1h', candle_type) + pair1 = ("IOTA/ETH", "1h", candle_type) + pair2 = ("XRP/ETH", "1h", candle_type) pairs = [pair1, pair2] # No caching @@ -2262,15 +2422,15 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach assert len(res) == 2 assert len(res[pair1]) == 99 assert len(res[pair2]) == 99 - assert res[pair2].at[0, 'open'] + assert res[pair2].at[0, "open"] assert exchange._pairs_last_refresh_time[pair1] == ohlcv[-2][0] // 1000 refresh_pior = exchange._pairs_last_refresh_time[pair1] # New candle on exchange - return 100 candles - but skip one candle so we actually get 2 candles # in one go - new_startdate = (start + timedelta(hours=2)).strftime('%Y-%m-%d %H:%M') + new_startdate = (start + timedelta(hours=2)).strftime("%Y-%m-%d %H:%M") # mocker.patch(f"{EXMS}.ohlcv_candle_limit", return_value=100) - ohlcv = generate_test_data_raw('1h', 100, new_startdate) + ohlcv = generate_test_data_raw("1h", 100, new_startdate) exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) res = exchange.refresh_latest_ohlcv(pairs) assert exchange._api_async.fetch_ohlcv.call_count == 2 @@ -2278,7 +2438,7 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach assert len(res[pair1]) == 100 assert len(res[pair2]) == 100 # Verify index starts at 0 - assert res[pair2].at[0, 'open'] + assert res[pair2].at[0, "open"] assert refresh_pior != exchange._pairs_last_refresh_time[pair1] assert exchange._pairs_last_refresh_time[pair1] == ohlcv[-2][0] // 1000 @@ -2291,11 +2451,11 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach assert len(res) == 2 assert len(res[pair1]) == 100 assert len(res[pair2]) == 100 - assert res[pair2].at[0, 'open'] + assert res[pair2].at[0, "open"] # Move to distant future (so a 1 call would cause a hole in the data) time_machine.move_to(start + timedelta(hours=2000)) - ohlcv = generate_test_data_raw('1h', 100, start + timedelta(hours=1900)) + ohlcv = generate_test_data_raw("1h", 100, start + timedelta(hours=1900)) exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) res = exchange.refresh_latest_ohlcv(pairs) @@ -2304,24 +2464,22 @@ def test_refresh_latest_ohlcv_cache(mocker, default_conf, candle_type, time_mach # Cache eviction - new data. assert len(res[pair1]) == 99 assert len(res[pair2]) == 99 - assert res[pair2].at[0, 'open'] + assert res[pair2].at[0, "open"] def test_refresh_ohlcv_with_cache(mocker, default_conf, time_machine) -> None: start = datetime(2021, 8, 1, 0, 0, 0, 0, tzinfo=timezone.utc) - ohlcv = generate_test_data_raw('1h', 100, start.strftime('%Y-%m-%d')) + ohlcv = generate_test_data_raw("1h", 100, start.strftime("%Y-%m-%d")) time_machine.move_to(start, tick=False) pairs = [ - ('ETH/BTC', '1d', CandleType.SPOT), - ('TKN/BTC', '1d', CandleType.SPOT), - ('LTC/BTC', '1d', CandleType.SPOT), - ('LTC/BTC', '5m', CandleType.SPOT), - ('LTC/BTC', '1h', CandleType.SPOT), + ("ETH/BTC", "1d", CandleType.SPOT), + ("TKN/BTC", "1d", CandleType.SPOT), + ("LTC/BTC", "1d", CandleType.SPOT), + ("LTC/BTC", "5m", CandleType.SPOT), + ("LTC/BTC", "1h", CandleType.SPOT), ] - ohlcv_data = { - p: ohlcv for p in pairs - } + ohlcv_data = {p: ohlcv for p in pairs} ohlcv_mock = mocker.patch(f"{EXMS}.refresh_latest_ohlcv", return_value=ohlcv_data) mocker.patch(f"{EXMS}.ohlcv_candle_limit", return_value=100) exchange = get_patched_exchange(mocker, default_conf) @@ -2385,7 +2543,7 @@ async def test__async_get_candle_history(default_conf, mocker, caplog, exchange_ # Monkey-patch async function exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - pair = 'ETH/BTC' + pair = "ETH/BTC" res = await exchange._async_get_candle_history(pair, "5m", CandleType.SPOT) assert type(res) is tuple assert len(res) == 5 @@ -2397,67 +2555,95 @@ async def test__async_get_candle_history(default_conf, mocker, caplog, exchange_ assert not log_has(f"Using cached candle (OHLCV) data for {pair} ...", caplog) exchange.close() # exchange = Exchange(default_conf) - await async_ccxt_exception(mocker, default_conf, MagicMock(), - "_async_get_candle_history", "fetch_ohlcv", - pair='ABCD/BTC', timeframe=default_conf['timeframe'], - candle_type=CandleType.SPOT) + await async_ccxt_exception( + mocker, + default_conf, + MagicMock(), + "_async_get_candle_history", + "fetch_ohlcv", + pair="ABCD/BTC", + timeframe=default_conf["timeframe"], + candle_type=CandleType.SPOT, + ) api_mock = MagicMock() - with pytest.raises(OperationalException, - match=r'Could not fetch historical candle \(OHLCV\) data.*'): + with pytest.raises( + OperationalException, match=r"Could not fetch historical candle \(OHLCV\) data.*" + ): api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.BaseError("Unknown error")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - await exchange._async_get_candle_history(pair, "5m", CandleType.SPOT, - dt_ts(dt_now() - timedelta(seconds=2000))) + await exchange._async_get_candle_history( + pair, "5m", CandleType.SPOT, dt_ts(dt_now() - timedelta(seconds=2000)) + ) exchange.close() - with pytest.raises(OperationalException, match=r'Exchange.* does not support fetching ' - r'historical candle \(OHLCV\) data\..*'): + with pytest.raises( + OperationalException, + match=r"Exchange.* does not support fetching " r"historical candle \(OHLCV\) data\..*", + ): api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.NotSupported("Not supported")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - await exchange._async_get_candle_history(pair, "5m", CandleType.SPOT, - dt_ts(dt_now() - timedelta(seconds=2000))) + await exchange._async_get_candle_history( + pair, "5m", CandleType.SPOT, dt_ts(dt_now() - timedelta(seconds=2000)) + ) exchange.close() async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog): from freqtrade.exchange.common import _reset_logging_mixin + _reset_logging_mixin() caplog.set_level(logging.INFO) api_mock = MagicMock() - api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( - "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" - "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" - "429 Too Many Requests" '{"code":"429000","msg":"Too Many Requests"}')) + api_mock.fetch_ohlcv = MagicMock( + side_effect=ccxt.DDoSProtection( + "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" + "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" + "429 Too Many Requests" + '{"code":"429000","msg":"Too Many Requests"}' + ) + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kucoin") - mocker.patch(f'{EXMS}.name', PropertyMock(return_value='KuCoin')) + mocker.patch(f"{EXMS}.name", PropertyMock(return_value="KuCoin")) msg = "Kucoin 429 error, avoid triggering DDosProtection backoff delay" assert not num_log_has_re(msg, caplog) for _ in range(3): - with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): + with pytest.raises(DDosProtection, match=r"429 Too Many Requests"): await exchange._async_get_candle_history( - "ETH/BTC", "5m", CandleType.SPOT, - since_ms=dt_ts(dt_now() - timedelta(seconds=2000)), count=3) + "ETH/BTC", + "5m", + CandleType.SPOT, + since_ms=dt_ts(dt_now() - timedelta(seconds=2000)), + count=3, + ) assert num_log_has_re(msg, caplog) == 3 caplog.clear() # Test regular non-kucoin message - api_mock.fetch_ohlcv = MagicMock(side_effect=ccxt.DDoSProtection( - "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" - "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" - "429 Too Many Requests" '{"code":"2222222","msg":"Too Many Requests"}')) + api_mock.fetch_ohlcv = MagicMock( + side_effect=ccxt.DDoSProtection( + "kucoin GET https://openapi-v2.kucoin.com/api/v1/market/candles?" + "symbol=ETH-BTC&type=5min&startAt=1640268735&endAt=1640418735" + "429 Too Many Requests" + '{"code":"2222222","msg":"Too Many Requests"}' + ) + ) - msg = r'_async_get_candle_history\(\) returned exception: .*' - msg2 = r'Applying DDosProtection backoff delay: .*' - with patch('freqtrade.exchange.common.asyncio.sleep', get_mock_coro(None)): + msg = r"_async_get_candle_history\(\) returned exception: .*" + msg2 = r"Applying DDosProtection backoff delay: .*" + with patch("freqtrade.exchange.common.asyncio.sleep", get_mock_coro(None)): for _ in range(3): - with pytest.raises(DDosProtection, match=r'429 Too Many Requests'): + with pytest.raises(DDosProtection, match=r"429 Too Many Requests"): await exchange._async_get_candle_history( - "ETH/BTC", "5m", CandleType.SPOT, - dt_ts(dt_now() - timedelta(seconds=2000)), count=3) + "ETH/BTC", + "5m", + CandleType.SPOT, + dt_ts(dt_now() - timedelta(seconds=2000)), + count=3, + ) # Expect the "returned exception" message 12 times (4 retries * 3 (loop)) assert num_log_has_re(msg, caplog) == 12 assert num_log_has_re(msg2, caplog) == 9 @@ -2465,7 +2651,7 @@ async def test__async_kucoin_get_candle_history(default_conf, mocker, caplog): async def test__async_get_candle_history_empty(default_conf, mocker, caplog): - """ Test empty exchange result """ + """Test empty exchange result""" ohlcv = [] caplog.set_level(logging.DEBUG) @@ -2474,7 +2660,7 @@ async def test__async_get_candle_history_empty(default_conf, mocker, caplog): exchange._api_async.fetch_ohlcv = get_mock_coro([]) exchange = Exchange(default_conf) - pair = 'ETH/BTC' + pair = "ETH/BTC" res = await exchange._async_get_candle_history(pair, "5m", CandleType.SPOT) assert type(res) is tuple assert len(res) == 5 @@ -2487,9 +2673,8 @@ async def test__async_get_candle_history_empty(default_conf, mocker, caplog): def test_refresh_latest_ohlcv_inv_result(default_conf, mocker, caplog): - async def mock_get_candle_hist(pair, *args, **kwargs): - if pair == 'ETH/BTC': + if pair == "ETH/BTC": return [[]] else: raise TypeError() @@ -2499,7 +2684,7 @@ def test_refresh_latest_ohlcv_inv_result(default_conf, mocker, caplog): # Monkey-patch async function with empty result exchange._api_async.fetch_ohlcv = MagicMock(side_effect=mock_get_candle_hist) - pairs = [("ETH/BTC", "5m", ''), ("XRP/BTC", "5m", '')] + pairs = [("ETH/BTC", "5m", ""), ("XRP/BTC", "5m", "")] res = exchange.refresh_latest_ohlcv(pairs) assert exchange._klines assert exchange._api_async.fetch_ohlcv.call_count == 2 @@ -2537,29 +2722,30 @@ def test_get_next_limit_in_list(): @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_l2_order_book(default_conf, mocker, order_book_l2, exchange_name): - default_conf['exchange']['name'] = exchange_name + default_conf["exchange"]["name"] = exchange_name api_mock = MagicMock() api_mock.fetch_l2_order_book = order_book_l2 exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - order_book = exchange.fetch_l2_order_book(pair='ETH/BTC', limit=10) - assert 'bids' in order_book - assert 'asks' in order_book - assert len(order_book['bids']) == 10 - assert len(order_book['asks']) == 10 - assert api_mock.fetch_l2_order_book.call_args_list[0][0][0] == 'ETH/BTC' + order_book = exchange.fetch_l2_order_book(pair="ETH/BTC", limit=10) + assert "bids" in order_book + assert "asks" in order_book + assert len(order_book["bids"]) == 10 + assert len(order_book["asks"]) == 10 + assert api_mock.fetch_l2_order_book.call_args_list[0][0][0] == "ETH/BTC" for val in [1, 5, 10, 12, 20, 50, 100]: api_mock.fetch_l2_order_book.reset_mock() - order_book = exchange.fetch_l2_order_book(pair='ETH/BTC', limit=val) - assert api_mock.fetch_l2_order_book.call_args_list[0][0][0] == 'ETH/BTC' + order_book = exchange.fetch_l2_order_book(pair="ETH/BTC", limit=val) + assert api_mock.fetch_l2_order_book.call_args_list[0][0][0] == "ETH/BTC" # Not all exchanges support all limits for orderbook - if (not exchange.get_option('l2_limit_range') - or val in exchange.get_option('l2_limit_range')): + if not exchange.get_option("l2_limit_range") or val in exchange.get_option( + "l2_limit_range" + ): assert api_mock.fetch_l2_order_book.call_args_list[0][0][1] == val else: - next_limit = exchange.get_next_limit_in_list(val, exchange.get_option('l2_limit_range')) + next_limit = exchange.get_next_limit_in_list(val, exchange.get_option("l2_limit_range")) assert api_mock.fetch_l2_order_book.call_args_list[0][0][1] == next_limit @@ -2569,64 +2755,66 @@ def test_fetch_l2_order_book_exception(default_conf, mocker, exchange_name): with pytest.raises(OperationalException): api_mock.fetch_l2_order_book = MagicMock(side_effect=ccxt.NotSupported("Not supported")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_l2_order_book(pair='ETH/BTC', limit=50) + exchange.fetch_l2_order_book(pair="ETH/BTC", limit=50) with pytest.raises(TemporaryError): api_mock.fetch_l2_order_book = MagicMock(side_effect=ccxt.NetworkError("DeadBeef")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_l2_order_book(pair='ETH/BTC', limit=50) + exchange.fetch_l2_order_book(pair="ETH/BTC", limit=50) with pytest.raises(OperationalException): api_mock.fetch_l2_order_book = MagicMock(side_effect=ccxt.BaseError("DeadBeef")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_l2_order_book(pair='ETH/BTC', limit=50) + exchange.fetch_l2_order_book(pair="ETH/BTC", limit=50) @pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_entry_rate_data) -def test_get_entry_rate(mocker, default_conf, caplog, side, ask, bid, - last, last_ab, expected, time_machine) -> None: +def test_get_entry_rate( + mocker, default_conf, caplog, side, ask, bid, last, last_ab, expected, time_machine +) -> None: caplog.set_level(logging.DEBUG) start_dt = datetime(2023, 12, 1, 0, 10, 0, tzinfo=timezone.utc) time_machine.move_to(start_dt, tick=False) if last_ab is None: - del default_conf['entry_pricing']['price_last_balance'] + del default_conf["entry_pricing"]["price_last_balance"] else: - default_conf['entry_pricing']['price_last_balance'] = last_ab - default_conf['entry_pricing']['price_side'] = side + default_conf["entry_pricing"]["price_last_balance"] = last_ab + default_conf["entry_pricing"]["price_side"] = side exchange = get_patched_exchange(mocker, default_conf) - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'ask': ask, 'last': last, 'bid': bid}) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": ask, "last": last, "bid": bid}) log_msg = "Using cached entry rate for ETH/BTC." - assert exchange.get_rate('ETH/BTC', side="entry", is_short=False, refresh=True) == expected + assert exchange.get_rate("ETH/BTC", side="entry", is_short=False, refresh=True) == expected assert not log_has(log_msg, caplog) time_machine.move_to(start_dt + timedelta(minutes=4), tick=False) # Running a 2nd time without Refresh! caplog.clear() - assert exchange.get_rate('ETH/BTC', side="entry", is_short=False, refresh=False) == expected + assert exchange.get_rate("ETH/BTC", side="entry", is_short=False, refresh=False) == expected assert log_has(log_msg, caplog) time_machine.move_to(start_dt + timedelta(minutes=6), tick=False) # Running a 2nd time - forces refresh due to ttl timeout caplog.clear() - assert exchange.get_rate('ETH/BTC', side="entry", is_short=False, refresh=False) == expected + assert exchange.get_rate("ETH/BTC", side="entry", is_short=False, refresh=False) == expected assert not log_has(log_msg, caplog) # Running a 2nd time with Refresh on! caplog.clear() - assert exchange.get_rate('ETH/BTC', side="entry", is_short=False, refresh=True) == expected + assert exchange.get_rate("ETH/BTC", side="entry", is_short=False, refresh=True) == expected assert not log_has(log_msg, caplog) -@pytest.mark.parametrize('side,ask,bid,last,last_ab,expected', get_exit_rate_data) -def test_get_exit_rate(default_conf, mocker, caplog, side, bid, ask, - last, last_ab, expected, time_machine) -> None: +@pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_exit_rate_data) +def test_get_exit_rate( + default_conf, mocker, caplog, side, bid, ask, last, last_ab, expected, time_machine +) -> None: caplog.set_level(logging.DEBUG) start_dt = datetime(2023, 12, 1, 0, 10, 0, tzinfo=timezone.utc) time_machine.move_to(start_dt, tick=False) - default_conf['exit_pricing']['price_side'] = side + default_conf["exit_pricing"]["price_side"] = side if last_ab is not None: - default_conf['exit_pricing']['price_last_balance'] = last_ab - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'ask': ask, 'bid': bid, 'last': last}) + default_conf["exit_pricing"]["price_last_balance"] = last_ab + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": ask, "bid": bid, "last": last}) pair = "ETH/BTC" log_msg = "Using cached exit rate for ETH/BTC." @@ -2654,49 +2842,57 @@ def test_get_exit_rate(default_conf, mocker, caplog, side, bid, ask, assert not log_has(log_msg, caplog) -@pytest.mark.parametrize("entry,is_short,side,ask,bid,last,last_ab,expected", [ - ('entry', False, 'ask', None, 4, 4, 0, 4), # ask not available - ('entry', False, 'ask', None, None, 4, 0, 4), # ask not available - ('entry', False, 'bid', 6, None, 4, 0, 5), # bid not available - ('entry', False, 'bid', None, None, 4, 0, 5), # No rate available - ('exit', False, 'ask', None, 4, 4, 0, 4), # ask not available - ('exit', False, 'ask', None, None, 4, 0, 4), # ask not available - ('exit', False, 'bid', 6, None, 4, 0, 5), # bid not available - ('exit', False, 'bid', None, None, 4, 0, 5), # bid not available -]) -def test_get_ticker_rate_error(mocker, entry, default_conf, caplog, side, is_short, ask, bid, - last, last_ab, expected) -> None: +@pytest.mark.parametrize( + "entry,is_short,side,ask,bid,last,last_ab,expected", + [ + ("entry", False, "ask", None, 4, 4, 0, 4), # ask not available + ("entry", False, "ask", None, None, 4, 0, 4), # ask not available + ("entry", False, "bid", 6, None, 4, 0, 5), # bid not available + ("entry", False, "bid", None, None, 4, 0, 5), # No rate available + ("exit", False, "ask", None, 4, 4, 0, 4), # ask not available + ("exit", False, "ask", None, None, 4, 0, 4), # ask not available + ("exit", False, "bid", 6, None, 4, 0, 5), # bid not available + ("exit", False, "bid", None, None, 4, 0, 5), # bid not available + ], +) +def test_get_ticker_rate_error( + mocker, entry, default_conf, caplog, side, is_short, ask, bid, last, last_ab, expected +) -> None: caplog.set_level(logging.DEBUG) - default_conf['entry_pricing']['price_last_balance'] = last_ab - default_conf['entry_pricing']['price_side'] = side - default_conf['exit_pricing']['price_side'] = side - default_conf['exit_pricing']['price_last_balance'] = last_ab + default_conf["entry_pricing"]["price_last_balance"] = last_ab + default_conf["entry_pricing"]["price_side"] = side + default_conf["exit_pricing"]["price_side"] = side + default_conf["exit_pricing"]["price_last_balance"] = last_ab exchange = get_patched_exchange(mocker, default_conf) - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'ask': ask, 'last': last, 'bid': bid}) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": ask, "last": last, "bid": bid}) with pytest.raises(PricingError): - exchange.get_rate('ETH/BTC', refresh=True, side=entry, is_short=is_short) + exchange.get_rate("ETH/BTC", refresh=True, side=entry, is_short=is_short) -@pytest.mark.parametrize('is_short,side,expected', [ - (False, 'bid', 0.043936), # Value from order_book_l2 fixture - bids side - (False, 'ask', 0.043949), # Value from order_book_l2 fixture - asks side - (False, 'other', 0.043936), # Value from order_book_l2 fixture - bids side - (False, 'same', 0.043949), # Value from order_book_l2 fixture - asks side - (True, 'bid', 0.043936), # Value from order_book_l2 fixture - bids side - (True, 'ask', 0.043949), # Value from order_book_l2 fixture - asks side - (True, 'other', 0.043949), # Value from order_book_l2 fixture - asks side - (True, 'same', 0.043936), # Value from order_book_l2 fixture - bids side -]) +@pytest.mark.parametrize( + "is_short,side,expected", + [ + (False, "bid", 0.043936), # Value from order_book_l2 fixture - bids side + (False, "ask", 0.043949), # Value from order_book_l2 fixture - asks side + (False, "other", 0.043936), # Value from order_book_l2 fixture - bids side + (False, "same", 0.043949), # Value from order_book_l2 fixture - asks side + (True, "bid", 0.043936), # Value from order_book_l2 fixture - bids side + (True, "ask", 0.043949), # Value from order_book_l2 fixture - asks side + (True, "other", 0.043949), # Value from order_book_l2 fixture - asks side + (True, "same", 0.043936), # Value from order_book_l2 fixture - bids side + ], +) def test_get_exit_rate_orderbook( - default_conf, mocker, caplog, is_short, side, expected, order_book_l2): + default_conf, mocker, caplog, is_short, side, expected, order_book_l2 +): caplog.set_level(logging.DEBUG) # Test orderbook mode - default_conf['exit_pricing']['price_side'] = side - default_conf['exit_pricing']['use_order_book'] = True - default_conf['exit_pricing']['order_book_top'] = 1 + default_conf["exit_pricing"]["price_side"] = side + default_conf["exit_pricing"]["use_order_book"] = True + default_conf["exit_pricing"]["order_book_top"] = 1 pair = "ETH/BTC" - mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2) + mocker.patch(f"{EXMS}.fetch_l2_order_book", order_book_l2) exchange = get_patched_exchange(mocker, default_conf) rate = exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) assert not log_has("Using cached exit rate for ETH/BTC.", caplog) @@ -2709,98 +2905,119 @@ def test_get_exit_rate_orderbook( def test_get_exit_rate_orderbook_exception(default_conf, mocker, caplog): # Test orderbook mode - default_conf['exit_pricing']['price_side'] = 'ask' - default_conf['exit_pricing']['use_order_book'] = True - default_conf['exit_pricing']['order_book_top'] = 1 + default_conf["exit_pricing"]["price_side"] = "ask" + default_conf["exit_pricing"]["use_order_book"] = True + default_conf["exit_pricing"]["order_book_top"] = 1 pair = "ETH/BTC" # Test What happens if the exchange returns an empty orderbook. - mocker.patch(f'{EXMS}.fetch_l2_order_book', return_value={'bids': [[]], 'asks': [[]]}) + mocker.patch(f"{EXMS}.fetch_l2_order_book", return_value={"bids": [[]], "asks": [[]]}) exchange = get_patched_exchange(mocker, default_conf) with pytest.raises(PricingError): exchange.get_rate(pair, refresh=True, side="exit", is_short=False) - assert log_has_re(rf"{pair} - Exit Price at location 1 from orderbook " - rf"could not be determined\..*", - caplog) + assert log_has_re( + rf"{pair} - Exit Price at location 1 from orderbook " rf"could not be determined\..*", + caplog, + ) -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_get_exit_rate_exception(default_conf, mocker, is_short): # Ticker on one side can be empty in certain circumstances. - default_conf['exit_pricing']['price_side'] = 'ask' + default_conf["exit_pricing"]["price_side"] = "ask" pair = "ETH/BTC" - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'ask': None, 'bid': 0.12, 'last': None}) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": None, "bid": 0.12, "last": None}) exchange = get_patched_exchange(mocker, default_conf) with pytest.raises(PricingError, match=r"Exit-Rate for ETH/BTC was empty."): exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) - exchange._config['exit_pricing']['price_side'] = 'bid' + exchange._config["exit_pricing"]["price_side"] = "bid" assert exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) == 0.12 # Reverse sides - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'ask': 0.13, 'bid': None, 'last': None}) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": 0.13, "bid": None, "last": None}) with pytest.raises(PricingError, match=r"Exit-Rate for ETH/BTC was empty."): exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) - exchange._config['exit_pricing']['price_side'] = 'ask' + exchange._config["exit_pricing"]["price_side"] = "ask" assert exchange.get_rate(pair, refresh=True, side="exit", is_short=is_short) == 0.13 @pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_entry_rate_data) -@pytest.mark.parametrize("side2", ['bid', 'ask']) +@pytest.mark.parametrize("side2", ["bid", "ask"]) @pytest.mark.parametrize("use_order_book", [True, False]) -def test_get_rates_testing_entry(mocker, default_conf, caplog, side, ask, bid, - last, last_ab, expected, - side2, use_order_book, order_book_l2) -> None: +def test_get_rates_testing_entry( + mocker, + default_conf, + caplog, + side, + ask, + bid, + last, + last_ab, + expected, + side2, + use_order_book, + order_book_l2, +) -> None: caplog.set_level(logging.DEBUG) if last_ab is None: - del default_conf['entry_pricing']['price_last_balance'] + del default_conf["entry_pricing"]["price_last_balance"] else: - default_conf['entry_pricing']['price_last_balance'] = last_ab - default_conf['entry_pricing']['price_side'] = side - default_conf['exit_pricing']['price_side'] = side2 - default_conf['exit_pricing']['use_order_book'] = use_order_book + default_conf["entry_pricing"]["price_last_balance"] = last_ab + default_conf["entry_pricing"]["price_side"] = side + default_conf["exit_pricing"]["price_side"] = side2 + default_conf["exit_pricing"]["use_order_book"] = use_order_book api_mock = MagicMock() api_mock.fetch_l2_order_book = order_book_l2 - api_mock.fetch_ticker = MagicMock( - return_value={'ask': ask, 'last': last, 'bid': bid}) + api_mock.fetch_ticker = MagicMock(return_value={"ask": ask, "last": last, "bid": bid}) exchange = get_patched_exchange(mocker, default_conf, api_mock) - assert exchange.get_rates('ETH/BTC', refresh=True, is_short=False)[0] == expected + assert exchange.get_rates("ETH/BTC", refresh=True, is_short=False)[0] == expected assert not log_has("Using cached buy rate for ETH/BTC.", caplog) api_mock.fetch_l2_order_book.reset_mock() api_mock.fetch_ticker.reset_mock() - assert exchange.get_rates('ETH/BTC', refresh=False, is_short=False)[0] == expected + assert exchange.get_rates("ETH/BTC", refresh=False, is_short=False)[0] == expected assert log_has("Using cached buy rate for ETH/BTC.", caplog) assert api_mock.fetch_l2_order_book.call_count == 0 assert api_mock.fetch_ticker.call_count == 0 # Running a 2nd time with Refresh on! caplog.clear() - assert exchange.get_rates('ETH/BTC', refresh=True, is_short=False)[0] == expected + assert exchange.get_rates("ETH/BTC", refresh=True, is_short=False)[0] == expected assert not log_has("Using cached buy rate for ETH/BTC.", caplog) assert api_mock.fetch_l2_order_book.call_count == int(use_order_book) assert api_mock.fetch_ticker.call_count == 1 -@pytest.mark.parametrize('side,ask,bid,last,last_ab,expected', get_exit_rate_data) -@pytest.mark.parametrize("side2", ['bid', 'ask']) +@pytest.mark.parametrize("side,ask,bid,last,last_ab,expected", get_exit_rate_data) +@pytest.mark.parametrize("side2", ["bid", "ask"]) @pytest.mark.parametrize("use_order_book", [True, False]) -def test_get_rates_testing_exit(default_conf, mocker, caplog, side, bid, ask, - last, last_ab, expected, - side2, use_order_book, order_book_l2) -> None: +def test_get_rates_testing_exit( + default_conf, + mocker, + caplog, + side, + bid, + ask, + last, + last_ab, + expected, + side2, + use_order_book, + order_book_l2, +) -> None: caplog.set_level(logging.DEBUG) - default_conf['exit_pricing']['price_side'] = side + default_conf["exit_pricing"]["price_side"] = side if last_ab is not None: - default_conf['exit_pricing']['price_last_balance'] = last_ab + default_conf["exit_pricing"]["price_last_balance"] = last_ab - default_conf['entry_pricing']['price_side'] = side2 - default_conf['entry_pricing']['use_order_book'] = use_order_book + default_conf["entry_pricing"]["price_side"] = side2 + default_conf["entry_pricing"]["use_order_book"] = use_order_book api_mock = MagicMock() api_mock.fetch_l2_order_book = order_book_l2 - api_mock.fetch_ticker = MagicMock( - return_value={'ask': ask, 'last': last, 'bid': bid}) + api_mock.fetch_ticker = MagicMock(return_value={"ask": ask, "last": last, "bid": bid}) exchange = get_patched_exchange(mocker, default_conf, api_mock) pair = "ETH/BTC" @@ -2840,15 +3057,16 @@ async def test___async_get_candle_history_sort(default_conf, mocker, exchange_na [1527831300000, 0.07655, 0.07657, 0.07655, 0.07657, 1.1753], [1527831000000, 0.07654, 0.07654, 0.07651, 0.07651, 0.8073060299999999], [1527830700000, 0.07652, 0.07652, 0.07651, 0.07652, 10.04822687], - [1527830400000, 0.07649, 0.07651, 0.07649, 0.07651, 2.5734867] + [1527830400000, 0.07649, 0.07651, 0.07649, 0.07651, 2.5734867], ] exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) - sort_mock = mocker.patch('freqtrade.exchange.exchange.sorted', MagicMock(side_effect=sort_data)) + sort_mock = mocker.patch("freqtrade.exchange.exchange.sorted", MagicMock(side_effect=sort_data)) # Test the OHLCV data sort res = await exchange._async_get_candle_history( - 'ETH/BTC', default_conf['timeframe'], CandleType.SPOT) - assert res[0] == 'ETH/BTC' + "ETH/BTC", default_conf["timeframe"], CandleType.SPOT + ) + assert res[0] == "ETH/BTC" res_ohlcv = res[3] assert sort_mock.call_count == 1 @@ -2877,16 +3095,17 @@ async def test___async_get_candle_history_sort(default_conf, mocker, exchange_na [1527829500000, 0.0766, 0.07675, 0.0765, 0.07675, 8.36203831], [1527829800000, 0.07675, 0.07677999, 0.07620002, 0.076695, 119.22963884], [1527830100000, 0.076695, 0.07671, 0.07624171, 0.07671, 1.80689244], - [1527830400000, 0.07671, 0.07674399, 0.07629216, 0.07655213, 2.31452783] + [1527830400000, 0.07671, 0.07674399, 0.07629216, 0.07655213, 2.31452783], ] exchange._api_async.fetch_ohlcv = get_mock_coro(ohlcv) # Reset sort mock - sort_mock = mocker.patch('freqtrade.exchange.sorted', MagicMock(side_effect=sort_data)) + sort_mock = mocker.patch("freqtrade.exchange.sorted", MagicMock(side_effect=sort_data)) # Test the OHLCV data sort res = await exchange._async_get_candle_history( - 'ETH/BTC', default_conf['timeframe'], CandleType.SPOT) - assert res[0] == 'ETH/BTC' - assert res[1] == default_conf['timeframe'] + "ETH/BTC", default_conf["timeframe"], CandleType.SPOT + ) + assert res[0] == "ETH/BTC" + assert res[1] == default_conf["timeframe"] res_ohlcv = res[3] # Sorted not called again - data is already in order assert sort_mock.call_count == 0 @@ -2906,63 +3125,72 @@ async def test___async_get_candle_history_sort(default_conf, mocker, exchange_na @pytest.mark.parametrize("exchange_name", EXCHANGES) -async def test__async_fetch_trades(default_conf, mocker, caplog, exchange_name, - fetch_trades_result): +async def test__async_fetch_trades( + default_conf, mocker, caplog, exchange_name, fetch_trades_result +): caplog.set_level(logging.DEBUG) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) # Monkey-patch async function exchange._api_async.fetch_trades = get_mock_coro(fetch_trades_result) - pair = 'ETH/BTC' + pair = "ETH/BTC" res, pagid = await exchange._async_fetch_trades(pair, since=None, params=None) assert isinstance(res, list) assert isinstance(res[0], list) assert isinstance(res[1], list) - if exchange._trades_pagination == 'id': - if exchange_name == 'kraken': + if exchange._trades_pagination == "id": + if exchange_name == "kraken": assert pagid == 1565798399872512133 else: - assert pagid == '126181333' + assert pagid == "126181333" else: assert pagid == 1565798399872 assert exchange._api_async.fetch_trades.call_count == 1 assert exchange._api_async.fetch_trades.call_args[0][0] == pair - assert exchange._api_async.fetch_trades.call_args[1]['limit'] == 1000 + assert exchange._api_async.fetch_trades.call_args[1]["limit"] == 1000 assert log_has_re(f"Fetching trades for pair {pair}, since .*", caplog) caplog.clear() exchange._api_async.fetch_trades.reset_mock() - res, pagid = await exchange._async_fetch_trades(pair, since=None, params={'from': '123'}) + res, pagid = await exchange._async_fetch_trades(pair, since=None, params={"from": "123"}) assert exchange._api_async.fetch_trades.call_count == 1 assert exchange._api_async.fetch_trades.call_args[0][0] == pair - assert exchange._api_async.fetch_trades.call_args[1]['limit'] == 1000 - assert exchange._api_async.fetch_trades.call_args[1]['params'] == {'from': '123'} + assert exchange._api_async.fetch_trades.call_args[1]["limit"] == 1000 + assert exchange._api_async.fetch_trades.call_args[1]["params"] == {"from": "123"} - if exchange._trades_pagination == 'id': - if exchange_name == 'kraken': + if exchange._trades_pagination == "id": + if exchange_name == "kraken": assert pagid == 1565798399872512133 else: - assert pagid == '126181333' + assert pagid == "126181333" else: assert pagid == 1565798399872 assert log_has_re(f"Fetching trades for pair {pair}, params: .*", caplog) exchange.close() - await async_ccxt_exception(mocker, default_conf, MagicMock(), - "_async_fetch_trades", "fetch_trades", - pair='ABCD/BTC', since=None) + await async_ccxt_exception( + mocker, + default_conf, + MagicMock(), + "_async_fetch_trades", + "fetch_trades", + pair="ABCD/BTC", + since=None, + ) api_mock = MagicMock() - with pytest.raises(OperationalException, match=r'Could not fetch trade data*'): + with pytest.raises(OperationalException, match=r"Could not fetch trade data*"): api_mock.fetch_trades = MagicMock(side_effect=ccxt.BaseError("Unknown error")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) await exchange._async_fetch_trades(pair, since=dt_ts(dt_now() - timedelta(seconds=2000))) exchange.close() - with pytest.raises(OperationalException, match=r'Exchange.* does not support fetching ' - r'historical trade data\..*'): + with pytest.raises( + OperationalException, + match=r"Exchange.* does not support fetching " r"historical trade data\..*", + ): api_mock.fetch_trades = MagicMock(side_effect=ccxt.NotSupported("Not supported")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) await exchange._async_fetch_trades(pair, since=dt_ts(dt_now() - timedelta(seconds=2000))) @@ -2970,37 +3198,44 @@ async def test__async_fetch_trades(default_conf, mocker, caplog, exchange_name, @pytest.mark.parametrize("exchange_name", EXCHANGES) -async def test__async_fetch_trades_contract_size(default_conf, mocker, caplog, exchange_name, - fetch_trades_result): +async def test__async_fetch_trades_contract_size( + default_conf, mocker, caplog, exchange_name, fetch_trades_result +): caplog.set_level(logging.DEBUG) - default_conf['margin_mode'] = 'isolated' - default_conf['trading_mode'] = 'futures' + default_conf["margin_mode"] = "isolated" + default_conf["trading_mode"] = "futures" exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) # Monkey-patch async function - exchange._api_async.fetch_trades = get_mock_coro([ - {'info': {'a': 126181333, - 'p': '0.01952600', - 'q': '0.01200000', - 'f': 138604158, - 'l': 138604158, - 'T': 1565798399872, - 'm': True, - 'M': True}, - 'timestamp': 1565798399872, - 'datetime': '2019-08-14T15:59:59.872Z', - 'symbol': 'ETH/USDT:USDT', - 'id': '126181383', - 'order': None, - 'type': None, - 'takerOrMaker': None, - 'side': 'sell', - 'price': 2.0, - 'amount': 30.0, - 'cost': 60.0, - 'fee': None}] + exchange._api_async.fetch_trades = get_mock_coro( + [ + { + "info": { + "a": 126181333, + "p": "0.01952600", + "q": "0.01200000", + "f": 138604158, + "l": 138604158, + "T": 1565798399872, + "m": True, + "M": True, + }, + "timestamp": 1565798399872, + "datetime": "2019-08-14T15:59:59.872Z", + "symbol": "ETH/USDT:USDT", + "id": "126181383", + "order": None, + "type": None, + "takerOrMaker": None, + "side": "sell", + "price": 2.0, + "amount": 30.0, + "cost": 60.0, + "fee": None, + } + ] ) - pair = 'ETH/USDT:USDT' + pair = "ETH/USDT:USDT" res, pagid = await exchange._async_fetch_trades(pair, since=None, params=None) assert res[0][5] == 300 assert pagid is not None @@ -3009,90 +3244,99 @@ async def test__async_fetch_trades_contract_size(default_conf, mocker, caplog, e @pytest.mark.asyncio @pytest.mark.parametrize("exchange_name", EXCHANGES) -async def test__async_get_trade_history_id(default_conf, mocker, exchange_name, - fetch_trades_result): - +async def test__async_get_trade_history_id( + default_conf, mocker, exchange_name, fetch_trades_result +): exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - if exchange._trades_pagination != 'id': + if exchange._trades_pagination != "id": exchange.close() pytest.skip("Exchange does not support pagination by trade id") pagination_arg = exchange._trades_pagination_arg async def mock_get_trade_hist(pair, *args, **kwargs): - if 'since' in kwargs: + if "since" in kwargs: # Return first 3 return fetch_trades_result[:-2] - elif kwargs.get('params', {}).get(pagination_arg) in ( - fetch_trades_result[-3]['id'], 1565798399752): + elif kwargs.get("params", {}).get(pagination_arg) in ( + fetch_trades_result[-3]["id"], + 1565798399752, + ): # Return 2 return fetch_trades_result[-3:-1] else: # Return last 2 return fetch_trades_result[-2:] + # Monkey-patch async function exchange._api_async.fetch_trades = MagicMock(side_effect=mock_get_trade_hist) - pair = 'ETH/BTC' - ret = await exchange._async_get_trade_history_id(pair, - since=fetch_trades_result[0]['timestamp'], - until=fetch_trades_result[-1]['timestamp'] - 1) + pair = "ETH/BTC" + ret = await exchange._async_get_trade_history_id( + pair, + since=fetch_trades_result[0]["timestamp"], + until=fetch_trades_result[-1]["timestamp"] - 1, + ) assert isinstance(ret, tuple) assert ret[0] == pair assert isinstance(ret[1], list) - if exchange_name != 'kraken': + if exchange_name != "kraken": assert len(ret[1]) == len(fetch_trades_result) assert exchange._api_async.fetch_trades.call_count == 3 fetch_trades_cal = exchange._api_async.fetch_trades.call_args_list # first call (using since, not fromId) assert fetch_trades_cal[0][0][0] == pair - assert fetch_trades_cal[0][1]['since'] == fetch_trades_result[0]['timestamp'] + assert fetch_trades_cal[0][1]["since"] == fetch_trades_result[0]["timestamp"] # 2nd call assert fetch_trades_cal[1][0][0] == pair - assert 'params' in fetch_trades_cal[1][1] - assert exchange._ft_has['trades_pagination_arg'] in fetch_trades_cal[1][1]['params'] + assert "params" in fetch_trades_cal[1][1] + assert exchange._ft_has["trades_pagination_arg"] in fetch_trades_cal[1][1]["params"] -@pytest.mark.parametrize('trade_id, expected', [ - ('1234', True), - ('170544369512007228', True), - ('1705443695120072285', True), - ('170544369512007228555', True), -]) +@pytest.mark.parametrize( + "trade_id, expected", + [ + ("1234", True), + ("170544369512007228", True), + ("1705443695120072285", True), + ("170544369512007228555", True), + ], +) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test__valid_trade_pagination_id(mocker, default_conf_usdt, exchange_name, trade_id, expected): - if exchange_name == 'kraken': + if exchange_name == "kraken": pytest.skip("Kraken has a different pagination id format, and an explicit test.") exchange = get_patched_exchange(mocker, default_conf_usdt, id=exchange_name) - assert exchange._valid_trade_pagination_id('XRP/USDT', trade_id) == expected + assert exchange._valid_trade_pagination_id("XRP/USDT", trade_id) == expected @pytest.mark.asyncio @pytest.mark.parametrize("exchange_name", EXCHANGES) -async def test__async_get_trade_history_time(default_conf, mocker, caplog, exchange_name, - fetch_trades_result): - +async def test__async_get_trade_history_time( + default_conf, mocker, caplog, exchange_name, fetch_trades_result +): caplog.set_level(logging.DEBUG) async def mock_get_trade_hist(pair, *args, **kwargs): - if kwargs['since'] == fetch_trades_result[0]['timestamp']: + if kwargs["since"] == fetch_trades_result[0]["timestamp"]: return fetch_trades_result[:-1] else: return fetch_trades_result[-1:] caplog.set_level(logging.DEBUG) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - if exchange._trades_pagination != 'time': + if exchange._trades_pagination != "time": exchange.close() pytest.skip("Exchange does not support pagination by timestamp") # Monkey-patch async function exchange._api_async.fetch_trades = MagicMock(side_effect=mock_get_trade_hist) - pair = 'ETH/BTC' + pair = "ETH/BTC" ret = await exchange._async_get_trade_history_time( pair, - since=fetch_trades_result[0]['timestamp'], - until=fetch_trades_result[-1]['timestamp'] - 1) + since=fetch_trades_result[0]["timestamp"], + until=fetch_trades_result[-1]["timestamp"] - 1, + ) assert isinstance(ret, tuple) assert ret[0] == pair assert isinstance(ret[1], list) @@ -3101,23 +3345,23 @@ async def test__async_get_trade_history_time(default_conf, mocker, caplog, excha fetch_trades_cal = exchange._api_async.fetch_trades.call_args_list # first call (using since, not fromId) assert fetch_trades_cal[0][0][0] == pair - assert fetch_trades_cal[0][1]['since'] == fetch_trades_result[0]['timestamp'] + assert fetch_trades_cal[0][1]["since"] == fetch_trades_result[0]["timestamp"] # 2nd call assert fetch_trades_cal[1][0][0] == pair - assert fetch_trades_cal[1][1]['since'] == fetch_trades_result[-2]['timestamp'] + assert fetch_trades_cal[1][1]["since"] == fetch_trades_result[-2]["timestamp"] assert log_has_re(r"Stopping because until was reached.*", caplog) @pytest.mark.asyncio @pytest.mark.parametrize("exchange_name", EXCHANGES) -async def test__async_get_trade_history_time_empty(default_conf, mocker, caplog, exchange_name, - trades_history): - +async def test__async_get_trade_history_time_empty( + default_conf, mocker, caplog, exchange_name, trades_history +): caplog.set_level(logging.DEBUG) async def mock_get_trade_hist(pair, *args, **kwargs): - if kwargs['since'] == trades_history[0][0]: + if kwargs["since"] == trades_history[0][0]: return trades_history[:-1], trades_history[:-1][-1][0] else: return [], None @@ -3126,9 +3370,10 @@ async def test__async_get_trade_history_time_empty(default_conf, mocker, caplog, exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) # Monkey-patch async function exchange._async_fetch_trades = MagicMock(side_effect=mock_get_trade_hist) - pair = 'ETH/BTC' - ret = await exchange._async_get_trade_history_time(pair, since=trades_history[0][0], - until=trades_history[-1][0] - 1) + pair = "ETH/BTC" + ret = await exchange._async_get_trade_history_time( + pair, since=trades_history[0][0], until=trades_history[-1][0] - 1 + ) assert isinstance(ret, tuple) assert ret[0] == pair assert isinstance(ret[1], list) @@ -3137,24 +3382,32 @@ async def test__async_get_trade_history_time_empty(default_conf, mocker, caplog, fetch_trades_cal = exchange._async_fetch_trades.call_args_list # first call (using since, not fromId) assert fetch_trades_cal[0][0][0] == pair - assert fetch_trades_cal[0][1]['since'] == trades_history[0][0] + assert fetch_trades_cal[0][1]["since"] == trades_history[0][0] @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_historic_trades(default_conf, mocker, caplog, exchange_name, trades_history): - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - pair = 'ETH/BTC' + pair = "ETH/BTC" exchange._async_get_trade_history_id = get_mock_coro((pair, trades_history)) exchange._async_get_trade_history_time = get_mock_coro((pair, trades_history)) - ret = exchange.get_historic_trades(pair, since=trades_history[0][0], - until=trades_history[-1][0]) + ret = exchange.get_historic_trades( + pair, since=trades_history[0][0], until=trades_history[-1][0] + ) # Depending on the exchange, one or the other method should be called - assert sum([exchange._async_get_trade_history_id.call_count, - exchange._async_get_trade_history_time.call_count]) == 1 + assert ( + sum( + [ + exchange._async_get_trade_history_id.call_count, + exchange._async_get_trade_history_time.call_count, + ] + ) + == 1 + ) assert len(ret) == 2 assert ret[0] == pair @@ -3162,91 +3415,101 @@ def test_get_historic_trades(default_conf, mocker, caplog, exchange_name, trades @pytest.mark.parametrize("exchange_name", EXCHANGES) -def test_get_historic_trades_notsupported(default_conf, mocker, caplog, exchange_name, - trades_history): - mocker.patch(f'{EXMS}.exchange_has', return_value=False) +def test_get_historic_trades_notsupported( + default_conf, mocker, caplog, exchange_name, trades_history +): + mocker.patch(f"{EXMS}.exchange_has", return_value=False) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - pair = 'ETH/BTC' + pair = "ETH/BTC" - with pytest.raises(OperationalException, - match="This exchange does not support downloading Trades."): - exchange.get_historic_trades(pair, since=trades_history[0][0], - until=trades_history[-1][0]) + with pytest.raises( + OperationalException, match="This exchange does not support downloading Trades." + ): + exchange.get_historic_trades(pair, since=trades_history[0][0], until=trades_history[-1][0]) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_cancel_order_dry_run(default_conf, mocker, exchange_name): - default_conf['dry_run'] = True + default_conf["dry_run"] = True exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=True) - assert exchange.cancel_order(order_id='123', pair='TKN/BTC') == {} - assert exchange.cancel_stoploss_order(order_id='123', pair='TKN/BTC') == {} + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=True) + assert exchange.cancel_order(order_id="123", pair="TKN/BTC") == {} + assert exchange.cancel_stoploss_order(order_id="123", pair="TKN/BTC") == {} order = exchange.create_order( - pair='ETH/BTC', - ordertype='limit', - side='buy', + pair="ETH/BTC", + ordertype="limit", + side="buy", amount=5, rate=0.55, - time_in_force='gtc', + time_in_force="gtc", leverage=1.0, ) - cancel_order = exchange.cancel_order(order_id=order['id'], pair='ETH/BTC') - assert order['id'] == cancel_order['id'] - assert order['amount'] == cancel_order['amount'] - assert order['symbol'] == cancel_order['symbol'] - assert cancel_order['status'] == 'canceled' + cancel_order = exchange.cancel_order(order_id=order["id"], pair="ETH/BTC") + assert order["id"] == cancel_order["id"] + assert order["amount"] == cancel_order["amount"] + assert order["symbol"] == cancel_order["symbol"] + assert cancel_order["status"] == "canceled" @pytest.mark.parametrize("exchange_name", EXCHANGES) -@pytest.mark.parametrize("order,result", [ - ({'status': 'closed', 'filled': 10}, False), - ({'status': 'closed', 'filled': 0.0}, True), - ({'status': 'canceled', 'filled': 0.0}, True), - ({'status': 'canceled', 'filled': 10.0}, False), - ({'status': 'unknown', 'filled': 10.0}, False), - ({'result': 'testest123'}, False), -]) +@pytest.mark.parametrize( + "order,result", + [ + ({"status": "closed", "filled": 10}, False), + ({"status": "closed", "filled": 0.0}, True), + ({"status": "canceled", "filled": 0.0}, True), + ({"status": "canceled", "filled": 10.0}, False), + ({"status": "unknown", "filled": 10.0}, False), + ({"result": "testest123"}, False), + ], +) def test_check_order_canceled_empty(mocker, default_conf, exchange_name, order, result): exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) assert exchange.check_order_canceled_empty(order) == result @pytest.mark.parametrize("exchange_name", EXCHANGES) -@pytest.mark.parametrize("order,result", [ - ({'status': 'closed', 'amount': 10, 'fee': {}}, True), - ({'status': 'closed', 'amount': 0.0, 'fee': {}}, True), - ({'status': 'canceled', 'amount': 0.0, 'fee': {}}, True), - ({'status': 'canceled', 'amount': 10.0}, False), - ({'amount': 10.0, 'fee': {}}, False), - ({'result': 'testest123'}, False), - ('hello_world', False), - ({'status': 'canceled', 'amount': None, 'fee': None}, False), - ({'status': 'canceled', 'filled': None, 'amount': None, 'fee': None}, False), - -]) +@pytest.mark.parametrize( + "order,result", + [ + ({"status": "closed", "amount": 10, "fee": {}}, True), + ({"status": "closed", "amount": 0.0, "fee": {}}, True), + ({"status": "canceled", "amount": 0.0, "fee": {}}, True), + ({"status": "canceled", "amount": 10.0}, False), + ({"amount": 10.0, "fee": {}}, False), + ({"result": "testest123"}, False), + ("hello_world", False), + ({"status": "canceled", "amount": None, "fee": None}, False), + ({"status": "canceled", "filled": None, "amount": None, "fee": None}, False), + ], +) def test_is_cancel_order_result_suitable(mocker, default_conf, exchange_name, order, result): exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) assert exchange.is_cancel_order_result_suitable(order) == result @pytest.mark.parametrize("exchange_name", EXCHANGES) -@pytest.mark.parametrize("corder,call_corder,call_forder", [ - ({'status': 'closed', 'amount': 10, 'fee': {}}, 1, 0), - ({'amount': 10, 'fee': {}}, 1, 1), -]) -def test_cancel_order_with_result(default_conf, mocker, exchange_name, corder, - call_corder, call_forder): - default_conf['dry_run'] = False +@pytest.mark.parametrize( + "corder,call_corder,call_forder", + [ + ({"status": "closed", "amount": 10, "fee": {}}, 1, 0), + ({"amount": 10, "fee": {}}, 1, 1), + ], +) +def test_cancel_order_with_result( + default_conf, mocker, exchange_name, corder, call_corder, call_forder +): + default_conf["dry_run"] = False mocker.patch(f"{EXMS}.exchange_has", return_value=True) api_mock = MagicMock() api_mock.cancel_order = MagicMock(return_value=corder) api_mock.fetch_order = MagicMock(return_value={}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - res = exchange.cancel_order_with_result('1234', 'ETH/BTC', 1234) + res = exchange.cancel_order_with_result("1234", "ETH/BTC", 1234) assert isinstance(res, dict) assert api_mock.cancel_order.call_count == call_corder assert api_mock.fetch_order.call_count == call_forder @@ -3254,136 +3517,145 @@ def test_cancel_order_with_result(default_conf, mocker, exchange_name, corder, @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_cancel_order_with_result_error(default_conf, mocker, exchange_name, caplog): - default_conf['dry_run'] = False + default_conf["dry_run"] = False mocker.patch(f"{EXMS}.exchange_has", return_value=True) api_mock = MagicMock() api_mock.cancel_order = MagicMock(side_effect=ccxt.InvalidOrder("Did not find order")) api_mock.fetch_order = MagicMock(side_effect=ccxt.InvalidOrder("Did not find order")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - res = exchange.cancel_order_with_result('1234', 'ETH/BTC', 1541) + res = exchange.cancel_order_with_result("1234", "ETH/BTC", 1541) assert isinstance(res, dict) assert log_has("Could not cancel order 1234 for ETH/BTC.", caplog) assert log_has("Could not fetch cancelled order 1234.", caplog) - assert res['amount'] == 1541 + assert res["amount"] == 1541 # Ensure that if not dry_run, we should call API @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_cancel_order(default_conf, mocker, exchange_name): - default_conf['dry_run'] = False + default_conf["dry_run"] = False api_mock = MagicMock() - api_mock.cancel_order = MagicMock(return_value={'id': '123'}) + api_mock.cancel_order = MagicMock(return_value={"id": "123"}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert exchange.cancel_order(order_id='_', pair='TKN/BTC') == {'id': '123'} + assert exchange.cancel_order(order_id="_", pair="TKN/BTC") == {"id": "123"} with pytest.raises(InvalidOrderException): api_mock.cancel_order = MagicMock(side_effect=ccxt.InvalidOrder("Did not find order")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.cancel_order(order_id='_', pair='TKN/BTC') + exchange.cancel_order(order_id="_", pair="TKN/BTC") assert api_mock.cancel_order.call_count == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "cancel_order", "cancel_order", - order_id='_', pair='TKN/BTC') + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "cancel_order", + "cancel_order", + order_id="_", + pair="TKN/BTC", + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_cancel_stoploss_order(default_conf, mocker, exchange_name): - default_conf['dry_run'] = False + default_conf["dry_run"] = False api_mock = MagicMock() - api_mock.cancel_order = MagicMock(return_value={'id': '123'}) + api_mock.cancel_order = MagicMock(return_value={"id": "123"}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert exchange.cancel_stoploss_order(order_id='_', pair='TKN/BTC') == {'id': '123'} + assert exchange.cancel_stoploss_order(order_id="_", pair="TKN/BTC") == {"id": "123"} with pytest.raises(InvalidOrderException): api_mock.cancel_order = MagicMock(side_effect=ccxt.InvalidOrder("Did not find order")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.cancel_stoploss_order(order_id='_', pair='TKN/BTC') + exchange.cancel_stoploss_order(order_id="_", pair="TKN/BTC") assert api_mock.cancel_order.call_count == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - "cancel_stoploss_order", "cancel_order", - order_id='_', pair='TKN/BTC') + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "cancel_stoploss_order", + "cancel_order", + order_id="_", + pair="TKN/BTC", + ) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_cancel_stoploss_order_with_result(default_conf, mocker, exchange_name): - default_conf['dry_run'] = False - mock_prefix = 'freqtrade.exchange.gate.Gate' - if exchange_name == 'okx': - mock_prefix = 'freqtrade.exchange.okx.Okx' - mocker.patch(f'{EXMS}.fetch_stoploss_order', return_value={'for': 123}) - mocker.patch(f'{mock_prefix}.fetch_stoploss_order', return_value={'for': 123}) + default_conf["dry_run"] = False + mock_prefix = "freqtrade.exchange.gate.Gate" + if exchange_name == "okx": + mock_prefix = "freqtrade.exchange.okx.Okx" + mocker.patch(f"{EXMS}.fetch_stoploss_order", return_value={"for": 123}) + mocker.patch(f"{mock_prefix}.fetch_stoploss_order", return_value={"for": 123}) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - res = {'fee': {}, 'status': 'canceled', 'amount': 1234} - mocker.patch(f'{EXMS}.cancel_stoploss_order', return_value=res) - mocker.patch(f'{mock_prefix}.cancel_stoploss_order', return_value=res) - co = exchange.cancel_stoploss_order_with_result(order_id='_', pair='TKN/BTC', amount=555) + res = {"fee": {}, "status": "canceled", "amount": 1234} + mocker.patch(f"{EXMS}.cancel_stoploss_order", return_value=res) + mocker.patch(f"{mock_prefix}.cancel_stoploss_order", return_value=res) + co = exchange.cancel_stoploss_order_with_result(order_id="_", pair="TKN/BTC", amount=555) assert co == res - mocker.patch(f'{EXMS}.cancel_stoploss_order', return_value='canceled') - mocker.patch(f'{mock_prefix}.cancel_stoploss_order', return_value='canceled') + mocker.patch(f"{EXMS}.cancel_stoploss_order", return_value="canceled") + mocker.patch(f"{mock_prefix}.cancel_stoploss_order", return_value="canceled") # Fall back to fetch_stoploss_order - co = exchange.cancel_stoploss_order_with_result(order_id='_', pair='TKN/BTC', amount=555) - assert co == {'for': 123} + co = exchange.cancel_stoploss_order_with_result(order_id="_", pair="TKN/BTC", amount=555) + assert co == {"for": 123} exc = InvalidOrderException("") - mocker.patch(f'{EXMS}.fetch_stoploss_order', side_effect=exc) - mocker.patch(f'{mock_prefix}.fetch_stoploss_order', side_effect=exc) - co = exchange.cancel_stoploss_order_with_result(order_id='_', pair='TKN/BTC', amount=555) - assert co['amount'] == 555 - assert co == {'id': '_', 'fee': {}, 'status': 'canceled', 'amount': 555, 'info': {}} + mocker.patch(f"{EXMS}.fetch_stoploss_order", side_effect=exc) + mocker.patch(f"{mock_prefix}.fetch_stoploss_order", side_effect=exc) + co = exchange.cancel_stoploss_order_with_result(order_id="_", pair="TKN/BTC", amount=555) + assert co["amount"] == 555 + assert co == {"id": "_", "fee": {}, "status": "canceled", "amount": 555, "info": {}} with pytest.raises(InvalidOrderException): exc = InvalidOrderException("Did not find order") - mocker.patch(f'{EXMS}.cancel_stoploss_order', side_effect=exc) - mocker.patch(f'{mock_prefix}.cancel_stoploss_order', side_effect=exc) + mocker.patch(f"{EXMS}.cancel_stoploss_order", side_effect=exc) + mocker.patch(f"{mock_prefix}.cancel_stoploss_order", side_effect=exc) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - exchange.cancel_stoploss_order_with_result(order_id='_', pair='TKN/BTC', amount=123) + exchange.cancel_stoploss_order_with_result(order_id="_", pair="TKN/BTC", amount=123) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_order(default_conf, mocker, exchange_name, caplog): - default_conf['dry_run'] = True - default_conf['exchange']['log_responses'] = True + default_conf["dry_run"] = True + default_conf["exchange"]["log_responses"] = True order = MagicMock() order.myid = 123 - order.symbol = 'TKN/BTC' + order.symbol = "TKN/BTC" mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - exchange._dry_run_open_orders['X'] = order - assert exchange.fetch_order('X', 'TKN/BTC').myid == 123 + exchange._dry_run_open_orders["X"] = order + assert exchange.fetch_order("X", "TKN/BTC").myid == 123 - with pytest.raises(InvalidOrderException, match=r'Tried to get an invalid dry-run-order.*'): - exchange.fetch_order('Y', 'TKN/BTC') + with pytest.raises(InvalidOrderException, match=r"Tried to get an invalid dry-run-order.*"): + exchange.fetch_order("Y", "TKN/BTC") - default_conf['dry_run'] = False + default_conf["dry_run"] = False api_mock = MagicMock() - api_mock.fetch_order = MagicMock(return_value={'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}) + api_mock.fetch_order = MagicMock(return_value={"id": "123", "amount": 2, "symbol": "TKN/BTC"}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert exchange.fetch_order( - 'X', 'TKN/BTC') == {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'} - assert log_has( - ("API fetch_order: {\'id\': \'123\', \'amount\': 2, \'symbol\': \'TKN/BTC\'}" - ), - caplog - ) + assert exchange.fetch_order("X", "TKN/BTC") == {"id": "123", "amount": 2, "symbol": "TKN/BTC"} + assert log_has(("API fetch_order: {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}"), caplog) with pytest.raises(InvalidOrderException): api_mock.fetch_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_order(order_id='_', pair='TKN/BTC') + exchange.fetch_order(order_id="_", pair="TKN/BTC") assert api_mock.fetch_order.call_count == 1 api_mock.fetch_order = MagicMock(side_effect=ccxt.OrderNotFound("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - with patch('freqtrade.exchange.common.time.sleep') as tm: + with patch("freqtrade.exchange.common.time.sleep") as tm: with pytest.raises(InvalidOrderException): - exchange.fetch_order(order_id='_', pair='TKN/BTC') + exchange.fetch_order(order_id="_", pair="TKN/BTC") # Ensure backoff is called assert tm.call_args_list[0][0][0] == 1 assert tm.call_args_list[1][0][0] == 2 @@ -3393,43 +3665,50 @@ def test_fetch_order(default_conf, mocker, exchange_name, caplog): assert tm.call_args_list[3][0][0] == 10 assert api_mock.fetch_order.call_count == API_FETCH_ORDER_RETRY_COUNT + 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - 'fetch_order', 'fetch_order', retries=API_FETCH_ORDER_RETRY_COUNT + 1, - order_id='_', pair='TKN/BTC') + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "fetch_order", + "fetch_order", + retries=API_FETCH_ORDER_RETRY_COUNT + 1, + order_id="_", + pair="TKN/BTC", + ) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_order_emulated(default_conf, mocker, exchange_name, caplog): - default_conf['dry_run'] = True - default_conf['exchange']['log_responses'] = True + default_conf["dry_run"] = True + default_conf["exchange"]["log_responses"] = True order = MagicMock() order.myid = 123 - order.symbol = 'TKN/BTC' + order.symbol = "TKN/BTC" exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - mocker.patch(f'{EXMS}.exchange_has', return_value=False) - exchange._dry_run_open_orders['X'] = order + mocker.patch(f"{EXMS}.exchange_has", return_value=False) + exchange._dry_run_open_orders["X"] = order # Dry run - regular fetch_order behavior - assert exchange.fetch_order('X', 'TKN/BTC').myid == 123 + assert exchange.fetch_order("X", "TKN/BTC").myid == 123 - with pytest.raises(InvalidOrderException, match=r'Tried to get an invalid dry-run-order.*'): - exchange.fetch_order('Y', 'TKN/BTC') + with pytest.raises(InvalidOrderException, match=r"Tried to get an invalid dry-run-order.*"): + exchange.fetch_order("Y", "TKN/BTC") - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.exchange_has', return_value=False) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.exchange_has", return_value=False) api_mock = MagicMock() api_mock.fetch_open_order = MagicMock( - return_value={'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}) + return_value={"id": "123", "amount": 2, "symbol": "TKN/BTC"} + ) api_mock.fetch_closed_order = MagicMock( - return_value={'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}) + return_value={"id": "123", "amount": 2, "symbol": "TKN/BTC"} + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert exchange.fetch_order( - 'X', 'TKN/BTC') == {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'} + assert exchange.fetch_order("X", "TKN/BTC") == {"id": "123", "amount": 2, "symbol": "TKN/BTC"} assert log_has( - ("API fetch_open_order: {\'id\': \'123\', \'amount\': 2, \'symbol\': \'TKN/BTC\'}" - ), - caplog + ("API fetch_open_order: {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}"), caplog ) assert api_mock.fetch_open_order.call_count == 1 assert api_mock.fetch_closed_order.call_count == 0 @@ -3438,14 +3717,12 @@ def test_fetch_order_emulated(default_conf, mocker, exchange_name, caplog): # open_order doesn't find order api_mock.fetch_open_order = MagicMock(side_effect=ccxt.OrderNotFound("Order not found")) api_mock.fetch_closed_order = MagicMock( - return_value={'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}) + return_value={"id": "123", "amount": 2, "symbol": "TKN/BTC"} + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - assert exchange.fetch_order( - 'X', 'TKN/BTC') == {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'} + assert exchange.fetch_order("X", "TKN/BTC") == {"id": "123", "amount": 2, "symbol": "TKN/BTC"} assert log_has( - ("API fetch_closed_order: {\'id\': \'123\', \'amount\': 2, \'symbol\': \'TKN/BTC\'}" - ), - caplog + ("API fetch_closed_order: {'id': '123', 'amount': 2, 'symbol': 'TKN/BTC'}"), caplog ) assert api_mock.fetch_open_order.call_count == 1 assert api_mock.fetch_closed_order.call_count == 1 @@ -3455,79 +3732,95 @@ def test_fetch_order_emulated(default_conf, mocker, exchange_name, caplog): api_mock.fetch_open_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) api_mock.fetch_closed_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_order(order_id='_', pair='TKN/BTC') + exchange.fetch_order(order_id="_", pair="TKN/BTC") assert api_mock.fetch_open_order.call_count == 1 api_mock.fetch_open_order = MagicMock(side_effect=ccxt.OrderNotFound("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - 'fetch_order_emulated', 'fetch_open_order', - retries=1, - order_id='_', pair='TKN/BTC', params={}) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "fetch_order_emulated", + "fetch_open_order", + retries=1, + order_id="_", + pair="TKN/BTC", + params={}, + ) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_fetch_stoploss_order(default_conf, mocker, exchange_name): - default_conf['dry_run'] = True + default_conf["dry_run"] = True mocker.patch(f"{EXMS}.exchange_has", return_value=True) order = MagicMock() order.myid = 123 exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - exchange._dry_run_open_orders['X'] = order - assert exchange.fetch_stoploss_order('X', 'TKN/BTC').myid == 123 + exchange._dry_run_open_orders["X"] = order + assert exchange.fetch_stoploss_order("X", "TKN/BTC").myid == 123 - with pytest.raises(InvalidOrderException, match=r'Tried to get an invalid dry-run-order.*'): - exchange.fetch_stoploss_order('Y', 'TKN/BTC') + with pytest.raises(InvalidOrderException, match=r"Tried to get an invalid dry-run-order.*"): + exchange.fetch_stoploss_order("Y", "TKN/BTC") - default_conf['dry_run'] = False + default_conf["dry_run"] = False api_mock = MagicMock() - api_mock.fetch_order = MagicMock(return_value={'id': '123', 'symbol': 'TKN/BTC'}) + api_mock.fetch_order = MagicMock(return_value={"id": "123", "symbol": "TKN/BTC"}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - res = {'id': '123', 'symbol': 'TKN/BTC'} - if exchange_name == 'okx': - res = {'id': '123', 'symbol': 'TKN/BTC', 'type': 'stoploss'} - assert exchange.fetch_stoploss_order('X', 'TKN/BTC') == res + res = {"id": "123", "symbol": "TKN/BTC"} + if exchange_name == "okx": + res = {"id": "123", "symbol": "TKN/BTC", "type": "stoploss"} + assert exchange.fetch_stoploss_order("X", "TKN/BTC") == res - if exchange_name == 'okx': + if exchange_name == "okx": # Tested separately. return with pytest.raises(InvalidOrderException): api_mock.fetch_order = MagicMock(side_effect=ccxt.InvalidOrder("Order not found")) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange.fetch_stoploss_order(order_id='_', pair='TKN/BTC') + exchange.fetch_stoploss_order(order_id="_", pair="TKN/BTC") assert api_mock.fetch_order.call_count == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - 'fetch_stoploss_order', 'fetch_order', - retries=API_FETCH_ORDER_RETRY_COUNT + 1, - order_id='_', pair='TKN/BTC') + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "fetch_stoploss_order", + "fetch_order", + retries=API_FETCH_ORDER_RETRY_COUNT + 1, + order_id="_", + pair="TKN/BTC", + ) def test_fetch_order_or_stoploss_order(default_conf, mocker): - exchange = get_patched_exchange(mocker, default_conf, id='binance') + exchange = get_patched_exchange(mocker, default_conf, id="binance") fetch_order_mock = MagicMock() fetch_stoploss_order_mock = MagicMock() - mocker.patch.multiple(EXMS, - fetch_order=fetch_order_mock, - fetch_stoploss_order=fetch_stoploss_order_mock, - ) + mocker.patch.multiple( + EXMS, + fetch_order=fetch_order_mock, + fetch_stoploss_order=fetch_stoploss_order_mock, + ) - exchange.fetch_order_or_stoploss_order('1234', 'ETH/BTC', False) + exchange.fetch_order_or_stoploss_order("1234", "ETH/BTC", False) assert fetch_order_mock.call_count == 1 - assert fetch_order_mock.call_args_list[0][0][0] == '1234' - assert fetch_order_mock.call_args_list[0][0][1] == 'ETH/BTC' + assert fetch_order_mock.call_args_list[0][0][0] == "1234" + assert fetch_order_mock.call_args_list[0][0][1] == "ETH/BTC" assert fetch_stoploss_order_mock.call_count == 0 fetch_order_mock.reset_mock() fetch_stoploss_order_mock.reset_mock() - exchange.fetch_order_or_stoploss_order('1234', 'ETH/BTC', True) + exchange.fetch_order_or_stoploss_order("1234", "ETH/BTC", True) assert fetch_order_mock.call_count == 0 assert fetch_stoploss_order_mock.call_count == 1 - assert fetch_stoploss_order_mock.call_args_list[0][0][0] == '1234' - assert fetch_stoploss_order_mock.call_args_list[0][0][1] == 'ETH/BTC' + assert fetch_stoploss_order_mock.call_args_list[0][0][0] == "1234" + assert fetch_stoploss_order_mock.call_args_list[0][0][1] == "ETH/BTC" @pytest.mark.parametrize("exchange_name", EXCHANGES) @@ -3538,119 +3831,135 @@ def test_name(default_conf, mocker, exchange_name): assert exchange.id == exchange_name -@pytest.mark.parametrize("trading_mode,amount", [ - ('spot', 0.2340606), - ('futures', 2.340606), -]) +@pytest.mark.parametrize( + "trading_mode,amount", + [ + ("spot", 0.2340606), + ("futures", 2.340606), + ], +) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_trades_for_order(default_conf, mocker, exchange_name, trading_mode, amount): - order_id = 'ABCD-ABCD' + order_id = "ABCD-ABCD" since = datetime(2018, 5, 5, 0, 0, 0) default_conf["dry_run"] = False default_conf["trading_mode"] = trading_mode - default_conf["margin_mode"] = 'isolated' - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.exchange_has", return_value=True) api_mock = MagicMock() - api_mock.fetch_my_trades = MagicMock(return_value=[{'id': 'TTR67E-3PFBD-76IISV', - 'order': 'ABCD-ABCD', - 'info': {'pair': 'XLTCZBTC', - 'time': 1519860024.4388, - 'type': 'buy', - 'ordertype': 'limit', - 'price': '20.00000', - 'cost': '38.62000', - 'fee': '0.06179', - 'vol': '5', - 'id': 'ABCD-ABCD'}, - 'timestamp': 1519860024438, - 'datetime': '2018-02-28T23:20:24.438Z', - 'symbol': 'ETH/USDT:USDT', - 'type': 'limit', - 'side': 'buy', - 'price': 165.0, - 'amount': 0.2340606, - 'fee': {'cost': 0.06179, 'currency': 'BTC'} - }]) + api_mock.fetch_my_trades = MagicMock( + return_value=[ + { + "id": "TTR67E-3PFBD-76IISV", + "order": "ABCD-ABCD", + "info": { + "pair": "XLTCZBTC", + "time": 1519860024.4388, + "type": "buy", + "ordertype": "limit", + "price": "20.00000", + "cost": "38.62000", + "fee": "0.06179", + "vol": "5", + "id": "ABCD-ABCD", + }, + "timestamp": 1519860024438, + "datetime": "2018-02-28T23:20:24.438Z", + "symbol": "ETH/USDT:USDT", + "type": "limit", + "side": "buy", + "price": 165.0, + "amount": 0.2340606, + "fee": {"cost": 0.06179, "currency": "BTC"}, + } + ] + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - orders = exchange.get_trades_for_order(order_id, 'ETH/USDT:USDT', since) + orders = exchange.get_trades_for_order(order_id, "ETH/USDT:USDT", since) assert len(orders) == 1 - assert orders[0]['price'] == 165 - assert pytest.approx(orders[0]['amount']) == amount + assert orders[0]["price"] == 165 + assert pytest.approx(orders[0]["amount"]) == amount assert api_mock.fetch_my_trades.call_count == 1 # since argument should be assert isinstance(api_mock.fetch_my_trades.call_args[0][1], int) - assert api_mock.fetch_my_trades.call_args[0][0] == 'ETH/USDT:USDT' + assert api_mock.fetch_my_trades.call_args[0][0] == "ETH/USDT:USDT" # Same test twice, hardcoded number and doing the same calculation assert api_mock.fetch_my_trades.call_args[0][1] == 1525478395000 - assert api_mock.fetch_my_trades.call_args[0][1] == int(since.replace( - tzinfo=timezone.utc).timestamp() - 5) * 1000 + assert ( + api_mock.fetch_my_trades.call_args[0][1] + == int(since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000 + ) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - 'get_trades_for_order', 'fetch_my_trades', - order_id=order_id, pair='ETH/USDT:USDT', since=since) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + exchange_name, + "get_trades_for_order", + "fetch_my_trades", + order_id=order_id, + pair="ETH/USDT:USDT", + since=since, + ) - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=False)) - assert exchange.get_trades_for_order(order_id, 'ETH/USDT:USDT', since) == [] + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=False)) + assert exchange.get_trades_for_order(order_id, "ETH/USDT:USDT", since) == [] @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_fee(default_conf, mocker, exchange_name): api_mock = MagicMock() - api_mock.calculate_fee = MagicMock(return_value={ - 'type': 'taker', - 'currency': 'BTC', - 'rate': 0.025, - 'cost': 0.05 - }) + api_mock.calculate_fee = MagicMock( + return_value={"type": "taker", "currency": "BTC", "rate": 0.025, "cost": 0.05} + ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange._config.pop('fee', None) + exchange._config.pop("fee", None) - assert exchange.get_fee('ETH/BTC') == 0.025 + assert exchange.get_fee("ETH/BTC") == 0.025 assert api_mock.calculate_fee.call_count == 1 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name, - 'get_fee', 'calculate_fee', symbol="ETH/BTC") + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, exchange_name, "get_fee", "calculate_fee", symbol="ETH/BTC" + ) api_mock.calculate_fee.reset_mock() - exchange._config['fee'] = 0.001 + exchange._config["fee"] = 0.001 - assert exchange.get_fee('ETH/BTC') == 0.001 + assert exchange.get_fee("ETH/BTC") == 0.001 assert api_mock.calculate_fee.call_count == 0 def test_stoploss_order_unsupported_exchange(default_conf, mocker): - exchange = get_patched_exchange(mocker, default_conf, id='bitpanda') + exchange = get_patched_exchange(mocker, default_conf, id="bitpanda") with pytest.raises(OperationalException, match=r"stoploss is not implemented .*"): exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side="sell", - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side="sell", leverage=1.0 ) with pytest.raises(OperationalException, match=r"stoploss is not implemented .*"): exchange.stoploss_adjust(1, {}, side="sell") -@pytest.mark.parametrize('side,ratio,expected', [ - ('sell', 0.99, 99.0), # Default - ('sell', 0.999, 99.9), - ('sell', 1, 100), - ('sell', 1.1, InvalidOrderException), - ('buy', 0.99, 101.0), # Default - ('buy', 0.999, 100.1), - ('buy', 1, 100), - ('buy', 1.1, InvalidOrderException), - ]) +@pytest.mark.parametrize( + "side,ratio,expected", + [ + ("sell", 0.99, 99.0), # Default + ("sell", 0.999, 99.9), + ("sell", 1, 100), + ("sell", 1.1, InvalidOrderException), + ("buy", 0.99, 101.0), # Default + ("buy", 0.999, 100.1), + ("buy", 1, 100), + ("buy", 1.1, InvalidOrderException), + ], +) def test__get_stop_limit_rate(default_conf_usdt, mocker, side, ratio, expected): - exchange = get_patched_exchange(mocker, default_conf_usdt, id='binance') + exchange = get_patched_exchange(mocker, default_conf_usdt, id="binance") - order_types = {'stoploss_on_exchange_limit_ratio': ratio} + order_types = {"stoploss_on_exchange_limit_ratio": ratio} if isinstance(expected, type) and issubclass(expected, Exception): with pytest.raises(expected): exchange._get_stop_limit_rate(100, order_types, side) @@ -3659,48 +3968,50 @@ def test__get_stop_limit_rate(default_conf_usdt, mocker, side, ratio, expected): def test_merge_ft_has_dict(default_conf, mocker): - mocker.patch.multiple(EXMS, - _init_ccxt=MagicMock(return_value=MagicMock()), - _load_async_markets=MagicMock(), - validate_pairs=MagicMock(), - validate_timeframes=MagicMock(), - validate_stakecurrency=MagicMock(), - validate_pricing=MagicMock(), - ) + mocker.patch.multiple( + EXMS, + _init_ccxt=MagicMock(return_value=MagicMock()), + _load_async_markets=MagicMock(), + validate_pairs=MagicMock(), + validate_timeframes=MagicMock(), + validate_stakecurrency=MagicMock(), + validate_pricing=MagicMock(), + ) ex = Exchange(default_conf) assert ex._ft_has == Exchange._ft_has_default ex = Kraken(default_conf) assert ex._ft_has != Exchange._ft_has_default - assert ex.get_option('trades_pagination') == 'id' - assert ex.get_option('trades_pagination_arg') == 'since' + assert ex.get_option("trades_pagination") == "id" + assert ex.get_option("trades_pagination_arg") == "since" # Binance defines different values ex = Binance(default_conf) assert ex._ft_has != Exchange._ft_has_default - assert ex.get_option('stoploss_on_exchange') - assert ex.get_option('order_time_in_force') == ['GTC', 'FOK', 'IOC', 'PO'] - assert ex.get_option('trades_pagination') == 'id' - assert ex.get_option('trades_pagination_arg') == 'fromId' + assert ex.get_option("stoploss_on_exchange") + assert ex.get_option("order_time_in_force") == ["GTC", "FOK", "IOC", "PO"] + assert ex.get_option("trades_pagination") == "id" + assert ex.get_option("trades_pagination_arg") == "fromId" conf = copy.deepcopy(default_conf) - conf['exchange']['_ft_has_params'] = {"DeadBeef": 20, - "stoploss_on_exchange": False} + conf["exchange"]["_ft_has_params"] = {"DeadBeef": 20, "stoploss_on_exchange": False} # Use settings from configuration (overriding stoploss_on_exchange) ex = Binance(conf) assert ex._ft_has != Exchange._ft_has_default - assert not ex._ft_has['stoploss_on_exchange'] - assert ex._ft_has['DeadBeef'] == 20 + assert not ex._ft_has["stoploss_on_exchange"] + assert ex._ft_has["DeadBeef"] == 20 def test_get_valid_pair_combination(default_conf, mocker, markets): - mocker.patch.multiple(EXMS, - _init_ccxt=MagicMock(return_value=MagicMock()), - _load_async_markets=MagicMock(), - validate_pairs=MagicMock(), - validate_timeframes=MagicMock(), - validate_pricing=MagicMock(), - markets=PropertyMock(return_value=markets)) + mocker.patch.multiple( + EXMS, + _init_ccxt=MagicMock(return_value=MagicMock()), + _load_async_markets=MagicMock(), + validate_pairs=MagicMock(), + validate_timeframes=MagicMock(), + validate_pricing=MagicMock(), + markets=PropertyMock(return_value=markets), + ) ex = Exchange(default_conf) assert ex.get_valid_pair_combination("ETH", "BTC") == "ETH/BTC" @@ -3711,7 +4022,8 @@ def test_get_valid_pair_combination(default_conf, mocker, markets): @pytest.mark.parametrize( "base_currencies,quote_currencies,tradable_only,active_only,spot_only," - "futures_only,expected_keys,test_comment", [ + "futures_only,expected_keys,test_comment", + [ # Testing markets (in conftest.py): # 'BLK/BTC': 'active': True # 'BTT/BTC': 'active': True @@ -3725,98 +4037,287 @@ def test_get_valid_pair_combination(default_conf, mocker, markets): # 'TKN/BTC': 'active' not set # 'XLTCUSDT': 'active': True, not a pair # 'XRP/BTC': 'active': False - ([], [], False, False, False, False, - ['BLK/BTC', 'BTT/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', 'LTC/USDT', - 'NEO/BTC', 'TKN/BTC', 'XLTCUSDT', 'XRP/BTC', 'ADA/USDT:USDT', - 'ETH/USDT:USDT'], - 'all markets'), - ([], [], False, False, True, False, - ['BLK/BTC', 'BTT/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', - 'LTC/USDT', 'NEO/BTC', 'TKN/BTC', 'XRP/BTC'], - 'all markets, only spot pairs'), - ([], [], False, True, False, False, - ['BLK/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', 'NEO/BTC', - 'TKN/BTC', 'XLTCUSDT', 'XRP/BTC', 'ADA/USDT:USDT', 'ETH/USDT:USDT'], - 'active markets'), - ([], [], True, False, False, False, - ['BLK/BTC', 'BTT/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', - 'LTC/USDT', 'NEO/BTC', 'TKN/BTC', 'XRP/BTC'], - 'all pairs'), - ([], [], True, True, False, False, - ['BLK/BTC', 'ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', 'NEO/BTC', - 'TKN/BTC', 'XRP/BTC'], - 'active pairs'), - (['ETH', 'LTC'], [], False, False, False, False, - ['ETH/BTC', 'ETH/USDT', 'LTC/BTC', 'LTC/ETH', 'LTC/USD', 'LTC/USDT', 'XLTCUSDT', - 'ETH/USDT:USDT'], - 'all markets, base=ETH, LTC'), - (['LTC'], [], False, False, False, False, - ['LTC/BTC', 'LTC/ETH', 'LTC/USD', 'LTC/USDT', 'XLTCUSDT'], - 'all markets, base=LTC'), - (['LTC'], [], False, False, True, False, - ['LTC/BTC', 'LTC/ETH', 'LTC/USD', 'LTC/USDT'], - 'spot markets, base=LTC'), - ([], ['USDT'], False, False, False, False, - ['ETH/USDT', 'LTC/USDT', 'XLTCUSDT', 'ADA/USDT:USDT', 'ETH/USDT:USDT'], - 'all markets, quote=USDT'), - ([], ['USDT'], False, False, False, True, - ['ADA/USDT:USDT', 'ETH/USDT:USDT'], - 'Futures markets, quote=USDT'), - ([], ['USDT', 'USD'], False, False, False, False, - ['ETH/USDT', 'LTC/USD', 'LTC/USDT', 'XLTCUSDT', 'ADA/USDT:USDT', 'ETH/USDT:USDT'], - 'all markets, quote=USDT, USD'), - ([], ['USDT', 'USD'], False, False, True, False, - ['ETH/USDT', 'LTC/USD', 'LTC/USDT'], - 'spot markets, quote=USDT, USD'), - (['LTC'], ['USDT'], False, False, False, False, - ['LTC/USDT', 'XLTCUSDT'], - 'all markets, base=LTC, quote=USDT'), - (['LTC'], ['USDT'], True, False, False, False, - ['LTC/USDT'], - 'all pairs, base=LTC, quote=USDT'), - (['LTC'], ['USDT', 'NONEXISTENT'], False, False, False, False, - ['LTC/USDT', 'XLTCUSDT'], - 'all markets, base=LTC, quote=USDT, NONEXISTENT'), - (['LTC'], ['NONEXISTENT'], False, False, False, False, - [], - 'all markets, base=LTC, quote=NONEXISTENT'), - ]) -def test_get_markets(default_conf, mocker, markets_static, - base_currencies, quote_currencies, tradable_only, active_only, - spot_only, futures_only, expected_keys, - test_comment # Here for debugging purposes (Not used within method) - ): - mocker.patch.multiple(EXMS, - _init_ccxt=MagicMock(return_value=MagicMock()), - _load_async_markets=MagicMock(), - validate_pairs=MagicMock(), - validate_timeframes=MagicMock(), - validate_pricing=MagicMock(), - markets=PropertyMock(return_value=markets_static)) + ( + [], + [], + False, + False, + False, + False, + [ + "BLK/BTC", + "BTT/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "LTC/USDT", + "NEO/BTC", + "TKN/BTC", + "XLTCUSDT", + "XRP/BTC", + "ADA/USDT:USDT", + "ETH/USDT:USDT", + ], + "all markets", + ), + ( + [], + [], + False, + False, + True, + False, + [ + "BLK/BTC", + "BTT/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "LTC/USDT", + "NEO/BTC", + "TKN/BTC", + "XRP/BTC", + ], + "all markets, only spot pairs", + ), + ( + [], + [], + False, + True, + False, + False, + [ + "BLK/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "NEO/BTC", + "TKN/BTC", + "XLTCUSDT", + "XRP/BTC", + "ADA/USDT:USDT", + "ETH/USDT:USDT", + ], + "active markets", + ), + ( + [], + [], + True, + False, + False, + False, + [ + "BLK/BTC", + "BTT/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "LTC/USDT", + "NEO/BTC", + "TKN/BTC", + "XRP/BTC", + ], + "all pairs", + ), + ( + [], + [], + True, + True, + False, + False, + [ + "BLK/BTC", + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "NEO/BTC", + "TKN/BTC", + "XRP/BTC", + ], + "active pairs", + ), + ( + ["ETH", "LTC"], + [], + False, + False, + False, + False, + [ + "ETH/BTC", + "ETH/USDT", + "LTC/BTC", + "LTC/ETH", + "LTC/USD", + "LTC/USDT", + "XLTCUSDT", + "ETH/USDT:USDT", + ], + "all markets, base=ETH, LTC", + ), + ( + ["LTC"], + [], + False, + False, + False, + False, + ["LTC/BTC", "LTC/ETH", "LTC/USD", "LTC/USDT", "XLTCUSDT"], + "all markets, base=LTC", + ), + ( + ["LTC"], + [], + False, + False, + True, + False, + ["LTC/BTC", "LTC/ETH", "LTC/USD", "LTC/USDT"], + "spot markets, base=LTC", + ), + ( + [], + ["USDT"], + False, + False, + False, + False, + ["ETH/USDT", "LTC/USDT", "XLTCUSDT", "ADA/USDT:USDT", "ETH/USDT:USDT"], + "all markets, quote=USDT", + ), + ( + [], + ["USDT"], + False, + False, + False, + True, + ["ADA/USDT:USDT", "ETH/USDT:USDT"], + "Futures markets, quote=USDT", + ), + ( + [], + ["USDT", "USD"], + False, + False, + False, + False, + ["ETH/USDT", "LTC/USD", "LTC/USDT", "XLTCUSDT", "ADA/USDT:USDT", "ETH/USDT:USDT"], + "all markets, quote=USDT, USD", + ), + ( + [], + ["USDT", "USD"], + False, + False, + True, + False, + ["ETH/USDT", "LTC/USD", "LTC/USDT"], + "spot markets, quote=USDT, USD", + ), + ( + ["LTC"], + ["USDT"], + False, + False, + False, + False, + ["LTC/USDT", "XLTCUSDT"], + "all markets, base=LTC, quote=USDT", + ), + ( + ["LTC"], + ["USDT"], + True, + False, + False, + False, + ["LTC/USDT"], + "all pairs, base=LTC, quote=USDT", + ), + ( + ["LTC"], + ["USDT", "NONEXISTENT"], + False, + False, + False, + False, + ["LTC/USDT", "XLTCUSDT"], + "all markets, base=LTC, quote=USDT, NONEXISTENT", + ), + ( + ["LTC"], + ["NONEXISTENT"], + False, + False, + False, + False, + [], + "all markets, base=LTC, quote=NONEXISTENT", + ), + ], +) +def test_get_markets( + default_conf, + mocker, + markets_static, + base_currencies, + quote_currencies, + tradable_only, + active_only, + spot_only, + futures_only, + expected_keys, + test_comment, # Here for debugging purposes (Not used within method) +): + mocker.patch.multiple( + EXMS, + _init_ccxt=MagicMock(return_value=MagicMock()), + _load_async_markets=MagicMock(), + validate_pairs=MagicMock(), + validate_timeframes=MagicMock(), + validate_pricing=MagicMock(), + markets=PropertyMock(return_value=markets_static), + ) ex = Exchange(default_conf) - pairs = ex.get_markets(base_currencies, - quote_currencies, - tradable_only=tradable_only, - spot_only=spot_only, - futures_only=futures_only, - active_only=active_only) + pairs = ex.get_markets( + base_currencies, + quote_currencies, + tradable_only=tradable_only, + spot_only=spot_only, + futures_only=futures_only, + active_only=active_only, + ) assert sorted(pairs.keys()) == sorted(expected_keys) def test_get_markets_error(default_conf, mocker): ex = get_patched_exchange(mocker, default_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=None)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=None)) with pytest.raises(OperationalException, match="Markets were not loaded."): - ex.get_markets('LTC', 'USDT', True, False) + ex.get_markets("LTC", "USDT", True, False) @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_ohlcv_candle_limit(default_conf, mocker, exchange_name): - if exchange_name == 'okx': + if exchange_name == "okx": pytest.skip("Tested separately for okx") exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - timeframes = ('1m', '5m', '1h') - expected = exchange._ft_has['ohlcv_candle_limit'] + timeframes = ("1m", "5m", "1h") + expected = exchange._ft_has["ohlcv_candle_limit"] for timeframe in timeframes: # if 'ohlcv_candle_limit_per_timeframe' in exchange._ft_has: # expected = exchange._ft_has['ohlcv_candle_limit_per_timeframe'][timeframe] @@ -3828,159 +4329,323 @@ def test_ohlcv_candle_limit(default_conf, mocker, exchange_name): @pytest.mark.parametrize( "market_symbol,base,quote,exchange,spot,margin,futures,trademode,add_dict,expected_result", [ - ("BTC/USDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True), - ("USDT/BTC", 'USDT', 'BTC', "binance", True, False, False, 'spot', {}, True), + ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, "spot", {}, True), + ("USDT/BTC", "USDT", "BTC", "binance", True, False, False, "spot", {}, True), # No separating / - ("BTCUSDT", 'BTC', 'USDT', "binance", True, False, False, 'spot', {}, True), - ("BTCUSDT", None, "USDT", "binance", True, False, False, 'spot', {}, False), - ("USDT/BTC", "BTC", None, "binance", True, False, False, 'spot', {}, False), - ("BTCUSDT", "BTC", None, "binance", True, False, False, 'spot', {}, False), - ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'spot', {}, True), + ("BTCUSDT", "BTC", "USDT", "binance", True, False, False, "spot", {}, True), + ("BTCUSDT", None, "USDT", "binance", True, False, False, "spot", {}, False), + ("USDT/BTC", "BTC", None, "binance", True, False, False, "spot", {}, False), + ("BTCUSDT", "BTC", None, "binance", True, False, False, "spot", {}, False), + ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, "spot", {}, True), # Futures mode, spot pair - ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'futures', {}, False), - ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, 'margin', {}, False), - ("BTC/USDT", "BTC", "USDT", "binance", True, True, True, 'margin', {}, True), - ("BTC/USDT", "BTC", "USDT", "binance", False, True, False, 'margin', {}, True), + ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, "futures", {}, False), + ("BTC/USDT", "BTC", "USDT", "binance", True, False, False, "margin", {}, False), + ("BTC/USDT", "BTC", "USDT", "binance", True, True, True, "margin", {}, True), + ("BTC/USDT", "BTC", "USDT", "binance", False, True, False, "margin", {}, True), # Futures mode, futures pair - ("BTC/USDT", "BTC", "USDT", "binance", False, False, True, 'futures', {}, True), + ("BTC/USDT", "BTC", "USDT", "binance", False, False, True, "futures", {}, True), # Futures market - ("BTC/UNK", "BTC", 'UNK', "binance", False, False, True, 'spot', {}, False), - ("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot', {"darkpool": False}, True), - ("EUR/BTC", 'EUR', 'BTC', "kraken", True, False, False, 'spot', {"darkpool": False}, True), + ("BTC/UNK", "BTC", "UNK", "binance", False, False, True, "spot", {}, False), + ("BTC/EUR", "BTC", "EUR", "kraken", True, False, False, "spot", {"darkpool": False}, True), + ("EUR/BTC", "EUR", "BTC", "kraken", True, False, False, "spot", {"darkpool": False}, True), # no darkpools - ("BTC/EUR", 'BTC', 'EUR', "kraken", True, False, False, 'spot', - {"darkpool": True}, False), + ("BTC/EUR", "BTC", "EUR", "kraken", True, False, False, "spot", {"darkpool": True}, False), # no darkpools - ("BTC/EUR.d", 'BTC', 'EUR', "kraken", True, False, False, 'spot', - {"darkpool": True}, False), - ("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'spot', {}, False), - ("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'margin', {}, False), - ("BTC/USDT:USDT", 'BTC', 'USD', "okx", False, False, True, 'futures', {}, True), - ]) + ( + "BTC/EUR.d", + "BTC", + "EUR", + "kraken", + True, + False, + False, + "spot", + {"darkpool": True}, + False, + ), + ("BTC/USDT:USDT", "BTC", "USD", "okx", False, False, True, "spot", {}, False), + ("BTC/USDT:USDT", "BTC", "USD", "okx", False, False, True, "margin", {}, False), + ("BTC/USDT:USDT", "BTC", "USD", "okx", False, False, True, "futures", {}, True), + ], +) def test_market_is_tradable( - mocker, default_conf, market_symbol, base, - quote, spot, margin, futures, trademode, add_dict, exchange, expected_result + mocker, + default_conf, + market_symbol, + base, + quote, + spot, + margin, + futures, + trademode, + add_dict, + exchange, + expected_result, ) -> None: - default_conf['trading_mode'] = trademode - mocker.patch(f'{EXMS}.validate_trading_mode_and_margin_mode') + default_conf["trading_mode"] = trademode + mocker.patch(f"{EXMS}.validate_trading_mode_and_margin_mode") ex = get_patched_exchange(mocker, default_conf, id=exchange) market = { - 'symbol': market_symbol, - 'base': base, - 'quote': quote, - 'spot': spot, - 'future': futures, - 'swap': futures, - 'margin': margin, - 'linear': True, + "symbol": market_symbol, + "base": base, + "quote": quote, + "spot": spot, + "future": futures, + "swap": futures, + "margin": margin, + "linear": True, **(add_dict), } assert ex.market_is_tradable(market) == expected_result -@pytest.mark.parametrize("market,expected_result", [ - ({'symbol': 'ETH/BTC', 'active': True}, True), - ({'symbol': 'ETH/BTC', 'active': False}, False), - ({'symbol': 'ETH/BTC', }, True), -]) +@pytest.mark.parametrize( + "market,expected_result", + [ + ({"symbol": "ETH/BTC", "active": True}, True), + ({"symbol": "ETH/BTC", "active": False}, False), + ( + { + "symbol": "ETH/BTC", + }, + True, + ), + ], +) def test_market_is_active(market, expected_result) -> None: assert market_is_active(market) == expected_result -@pytest.mark.parametrize("order,expected", [ - ([{'fee'}], False), - ({'fee': None}, False), - ({'fee': {'currency': 'ETH/BTC'}}, False), - ({'fee': {'currency': 'ETH/BTC', 'cost': None}}, False), - ({'fee': {'currency': 'ETH/BTC', 'cost': 0.01}}, True), -]) +@pytest.mark.parametrize( + "order,expected", + [ + ([{"fee"}], False), + ({"fee": None}, False), + ({"fee": {"currency": "ETH/BTC"}}, False), + ({"fee": {"currency": "ETH/BTC", "cost": None}}, False), + ({"fee": {"currency": "ETH/BTC", "cost": 0.01}}, True), + ], +) def test_order_has_fee(order, expected) -> None: assert Exchange.order_has_fee(order) == expected -@pytest.mark.parametrize("order,expected", [ - ({'symbol': 'ETH/BTC', 'fee': {'currency': 'ETH', 'cost': 0.43}}, - (0.43, 'ETH', 0.01)), - ({'symbol': 'ETH/USDT', 'fee': {'currency': 'USDT', 'cost': 0.01}}, - (0.01, 'USDT', 0.01)), - ({'symbol': 'BTC/USDT', 'fee': {'currency': 'USDT', 'cost': 0.34, 'rate': 0.01}}, - (0.34, 'USDT', 0.01)), -]) +@pytest.mark.parametrize( + "order,expected", + [ + ({"symbol": "ETH/BTC", "fee": {"currency": "ETH", "cost": 0.43}}, (0.43, "ETH", 0.01)), + ({"symbol": "ETH/USDT", "fee": {"currency": "USDT", "cost": 0.01}}, (0.01, "USDT", 0.01)), + ( + {"symbol": "BTC/USDT", "fee": {"currency": "USDT", "cost": 0.34, "rate": 0.01}}, + (0.34, "USDT", 0.01), + ), + ], +) def test_extract_cost_curr_rate(mocker, default_conf, order, expected) -> None: - mocker.patch(f'{EXMS}.calculate_fee_rate', MagicMock(return_value=0.01)) + mocker.patch(f"{EXMS}.calculate_fee_rate", MagicMock(return_value=0.01)) ex = get_patched_exchange(mocker, default_conf) - assert ex.extract_cost_curr_rate(order['fee'], order['symbol'], cost=20, amount=1) == expected + assert ex.extract_cost_curr_rate(order["fee"], order["symbol"], cost=20, amount=1) == expected -@pytest.mark.parametrize("order,unknown_fee_rate,expected", [ - # Using base-currency - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'ETH', 'cost': 0.004, 'rate': None}}, None, 0.1), - ({'symbol': 'ETH/BTC', 'amount': 0.05, 'cost': 0.05, - 'fee': {'currency': 'ETH', 'cost': 0.004, 'rate': None}}, None, 0.08), - # Using quote currency - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'BTC', 'cost': 0.005}}, None, 0.1), - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'BTC', 'cost': 0.002, 'rate': None}}, None, 0.04), - # Using foreign currency - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'NEO', 'cost': 0.0012}}, None, 0.001944), - ({'symbol': 'ETH/BTC', 'amount': 2.21, 'cost': 0.02992561, - 'fee': {'currency': 'NEO', 'cost': 0.00027452}}, None, 0.00074305), - # Rate included in return - return as is - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'USDT', 'cost': 0.34, 'rate': 0.01}}, None, 0.01), - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': 'USDT', 'cost': 0.34, 'rate': 0.005}}, None, 0.005), - # 0.1% filled - no costs (kraken - #3431) - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.0, - 'fee': {'currency': 'BTC', 'cost': 0.0, 'rate': None}}, None, None), - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.0, - 'fee': {'currency': 'ETH', 'cost': 0.0, 'rate': None}}, None, 0.0), - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.0, - 'fee': {'currency': 'NEO', 'cost': 0.0, 'rate': None}}, None, None), - # Invalid pair combination - POINT/BTC is not a pair - ({'symbol': 'POINT/BTC', 'amount': 0.04, 'cost': 0.5, - 'fee': {'currency': 'POINT', 'cost': 2.0, 'rate': None}}, None, None), - ({'symbol': 'POINT/BTC', 'amount': 0.04, 'cost': 0.5, - 'fee': {'currency': 'POINT', 'cost': 2.0, 'rate': None}}, 1, 4.0), - ({'symbol': 'POINT/BTC', 'amount': 0.04, 'cost': 0.5, - 'fee': {'currency': 'POINT', 'cost': 2.0, 'rate': None}}, 2, 8.0), - # Missing currency - ({'symbol': 'ETH/BTC', 'amount': 0.04, 'cost': 0.05, - 'fee': {'currency': None, 'cost': 0.005}}, None, None), -]) +@pytest.mark.parametrize( + "order,unknown_fee_rate,expected", + [ + # Using base-currency + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "ETH", "cost": 0.004, "rate": None}, + }, + None, + 0.1, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 0.05, + "cost": 0.05, + "fee": {"currency": "ETH", "cost": 0.004, "rate": None}, + }, + None, + 0.08, + ), + # Using quote currency + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "BTC", "cost": 0.005}, + }, + None, + 0.1, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "BTC", "cost": 0.002, "rate": None}, + }, + None, + 0.04, + ), + # Using foreign currency + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "NEO", "cost": 0.0012}, + }, + None, + 0.001944, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 2.21, + "cost": 0.02992561, + "fee": {"currency": "NEO", "cost": 0.00027452}, + }, + None, + 0.00074305, + ), + # Rate included in return - return as is + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "USDT", "cost": 0.34, "rate": 0.01}, + }, + None, + 0.01, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": "USDT", "cost": 0.34, "rate": 0.005}, + }, + None, + 0.005, + ), + # 0.1% filled - no costs (kraken - #3431) + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.0, + "fee": {"currency": "BTC", "cost": 0.0, "rate": None}, + }, + None, + None, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.0, + "fee": {"currency": "ETH", "cost": 0.0, "rate": None}, + }, + None, + 0.0, + ), + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.0, + "fee": {"currency": "NEO", "cost": 0.0, "rate": None}, + }, + None, + None, + ), + # Invalid pair combination - POINT/BTC is not a pair + ( + { + "symbol": "POINT/BTC", + "amount": 0.04, + "cost": 0.5, + "fee": {"currency": "POINT", "cost": 2.0, "rate": None}, + }, + None, + None, + ), + ( + { + "symbol": "POINT/BTC", + "amount": 0.04, + "cost": 0.5, + "fee": {"currency": "POINT", "cost": 2.0, "rate": None}, + }, + 1, + 4.0, + ), + ( + { + "symbol": "POINT/BTC", + "amount": 0.04, + "cost": 0.5, + "fee": {"currency": "POINT", "cost": 2.0, "rate": None}, + }, + 2, + 8.0, + ), + # Missing currency + ( + { + "symbol": "ETH/BTC", + "amount": 0.04, + "cost": 0.05, + "fee": {"currency": None, "cost": 0.005}, + }, + None, + None, + ), + ], +) def test_calculate_fee_rate(mocker, default_conf, order, expected, unknown_fee_rate) -> None: - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'last': 0.081}) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"last": 0.081}) if unknown_fee_rate: - default_conf['exchange']['unknown_fee_rate'] = unknown_fee_rate + default_conf["exchange"]["unknown_fee_rate"] = unknown_fee_rate ex = get_patched_exchange(mocker, default_conf) - assert ex.calculate_fee_rate(order['fee'], order['symbol'], - cost=order['cost'], amount=order['amount']) == expected + assert ( + ex.calculate_fee_rate( + order["fee"], order["symbol"], cost=order["cost"], amount=order["amount"] + ) + == expected + ) -@pytest.mark.parametrize('retrycount,max_retries,expected', [ - (0, 3, 10), - (1, 3, 5), - (2, 3, 2), - (3, 3, 1), - (0, 1, 2), - (1, 1, 1), - (0, 4, 17), - (1, 4, 10), - (2, 4, 5), - (3, 4, 2), - (4, 4, 1), - (0, 5, 26), - (1, 5, 17), - (2, 5, 10), - (3, 5, 5), - (4, 5, 2), - (5, 5, 1), -]) +@pytest.mark.parametrize( + "retrycount,max_retries,expected", + [ + (0, 3, 10), + (1, 3, 5), + (2, 3, 2), + (3, 3, 1), + (0, 1, 2), + (1, 1, 1), + (0, 4, 17), + (1, 4, 10), + (2, 4, 5), + (3, 4, 2), + (4, 4, 1), + (0, 5, 26), + (1, 5, 17), + (2, 5, 10), + (3, 5, 5), + (4, 5, 2), + (5, 5, 1), + ], +) def test_calculate_backoff(retrycount, max_retries, expected): assert calculate_backoff(retrycount, max_retries) == expected @@ -3988,67 +4653,67 @@ def test_calculate_backoff(retrycount, max_retries, expected): @pytest.mark.parametrize("exchange_name", EXCHANGES) def test_get_funding_fees(default_conf_usdt, mocker, exchange_name, caplog): now = datetime.now(timezone.utc) - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf_usdt, id=exchange_name) exchange._fetch_and_calculate_funding_fees = MagicMock(side_effect=ExchangeError) - assert exchange.get_funding_fees('BTC/USDT:USDT', 1, False, now) == 0.0 + assert exchange.get_funding_fees("BTC/USDT:USDT", 1, False, now) == 0.0 assert exchange._fetch_and_calculate_funding_fees.call_count == 1 assert log_has("Could not update funding fees for BTC/USDT:USDT.", caplog) -@pytest.mark.parametrize("exchange_name", ['binance']) +@pytest.mark.parametrize("exchange_name", ["binance"]) def test__get_funding_fees_from_exchange(default_conf, mocker, exchange_name): api_mock = MagicMock() - api_mock.fetch_funding_history = MagicMock(return_value=[ - { - 'amount': 0.14542, - 'code': 'USDT', - 'datetime': '2021-09-01T08:00:01.000Z', - 'id': '485478', - 'info': {'asset': 'USDT', - 'income': '0.14542', - 'incomeType': 'FUNDING_FEE', - 'info': 'FUNDING_FEE', - 'symbol': 'XRPUSDT', - 'time': '1630382001000', - 'tradeId': '', - 'tranId': '993203'}, - 'symbol': 'XRP/USDT', - 'timestamp': 1630382001000 - }, - { - 'amount': -0.14642, - 'code': 'USDT', - 'datetime': '2021-09-01T16:00:01.000Z', - 'id': '485479', - 'info': {'asset': 'USDT', - 'income': '-0.14642', - 'incomeType': 'FUNDING_FEE', - 'info': 'FUNDING_FEE', - 'symbol': 'XRPUSDT', - 'time': '1630314001000', - 'tradeId': '', - 'tranId': '993204'}, - 'symbol': 'XRP/USDT', - 'timestamp': 1630314001000 - } - ]) - type(api_mock).has = PropertyMock(return_value={'fetchFundingHistory': True}) + api_mock.fetch_funding_history = MagicMock( + return_value=[ + { + "amount": 0.14542, + "code": "USDT", + "datetime": "2021-09-01T08:00:01.000Z", + "id": "485478", + "info": { + "asset": "USDT", + "income": "0.14542", + "incomeType": "FUNDING_FEE", + "info": "FUNDING_FEE", + "symbol": "XRPUSDT", + "time": "1630382001000", + "tradeId": "", + "tranId": "993203", + }, + "symbol": "XRP/USDT", + "timestamp": 1630382001000, + }, + { + "amount": -0.14642, + "code": "USDT", + "datetime": "2021-09-01T16:00:01.000Z", + "id": "485479", + "info": { + "asset": "USDT", + "income": "-0.14642", + "incomeType": "FUNDING_FEE", + "info": "FUNDING_FEE", + "symbol": "XRPUSDT", + "time": "1630314001000", + "tradeId": "", + "tranId": "993204", + }, + "symbol": "XRP/USDT", + "timestamp": 1630314001000, + }, + ] + ) + type(api_mock).has = PropertyMock(return_value={"fetchFundingHistory": True}) # mocker.patch(f'{EXMS}.get_funding_fees', lambda pair, since: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - date_time = datetime.strptime("2021-09-01T00:00:01.000Z", '%Y-%m-%dT%H:%M:%S.%fZ') + date_time = datetime.strptime("2021-09-01T00:00:01.000Z", "%Y-%m-%dT%H:%M:%S.%fZ") unix_time = int(date_time.timestamp()) expected_fees = -0.001 # 0.14542341 + -0.14642341 - fees_from_datetime = exchange._get_funding_fees_from_exchange( - pair='XRP/USDT', - since=date_time - ) - fees_from_unix_time = exchange._get_funding_fees_from_exchange( - pair='XRP/USDT', - since=unix_time - ) + fees_from_datetime = exchange._get_funding_fees_from_exchange(pair="XRP/USDT", since=date_time) + fees_from_unix_time = exchange._get_funding_fees_from_exchange(pair="XRP/USDT", since=unix_time) assert pytest.approx(expected_fees) == fees_from_datetime assert pytest.approx(expected_fees) == fees_from_unix_time @@ -4061,39 +4726,31 @@ def test__get_funding_fees_from_exchange(default_conf, mocker, exchange_name): "_get_funding_fees_from_exchange", "fetch_funding_history", pair="XRP/USDT", - since=unix_time + since=unix_time, ) -@pytest.mark.parametrize('exchange', ['binance', 'kraken']) -@pytest.mark.parametrize('stake_amount,leverage,min_stake_with_lev', [ - (9.0, 3.0, 3.0), - (20.0, 5.0, 4.0), - (100.0, 100.0, 1.0) -]) +@pytest.mark.parametrize("exchange", ["binance", "kraken"]) +@pytest.mark.parametrize( + "stake_amount,leverage,min_stake_with_lev", + [(9.0, 3.0, 3.0), (20.0, 5.0, 4.0), (100.0, 100.0, 1.0)], +) def test_get_stake_amount_considering_leverage( - exchange, - stake_amount, - leverage, - min_stake_with_lev, - mocker, - default_conf + exchange, stake_amount, leverage, min_stake_with_lev, mocker, default_conf ): exchange = get_patched_exchange(mocker, default_conf, id=exchange) - assert exchange._get_stake_amount_considering_leverage( - stake_amount, leverage) == min_stake_with_lev + assert ( + exchange._get_stake_amount_considering_leverage(stake_amount, leverage) + == min_stake_with_lev + ) -@pytest.mark.parametrize("margin_mode", [ - (MarginMode.CROSS), - (MarginMode.ISOLATED) -]) +@pytest.mark.parametrize("margin_mode", [(MarginMode.CROSS), (MarginMode.ISOLATED)]) def test_set_margin_mode(mocker, default_conf, margin_mode): - api_mock = MagicMock() api_mock.set_margin_mode = MagicMock() - type(api_mock).has = PropertyMock(return_value={'setMarginMode': True}) - default_conf['dry_run'] = False + type(api_mock).has = PropertyMock(return_value={"setMarginMode": True}) + default_conf["dry_run"] = False ccxt_exceptionhandlers( mocker, @@ -4103,146 +4760,144 @@ def test_set_margin_mode(mocker, default_conf, margin_mode): "set_margin_mode", "set_margin_mode", pair="XRP/USDT", - margin_mode=margin_mode + margin_mode=margin_mode, ) -@pytest.mark.parametrize("exchange_name, trading_mode, margin_mode, exception_thrown", [ - ("binance", TradingMode.SPOT, None, False), - ("binance", TradingMode.MARGIN, MarginMode.ISOLATED, True), - ("kraken", TradingMode.SPOT, None, False), - ("kraken", TradingMode.MARGIN, MarginMode.ISOLATED, True), - ("kraken", TradingMode.FUTURES, MarginMode.ISOLATED, True), - ("bitmart", TradingMode.SPOT, None, False), - ("bitmart", TradingMode.MARGIN, MarginMode.CROSS, True), - ("bitmart", TradingMode.MARGIN, MarginMode.ISOLATED, True), - ("bitmart", TradingMode.FUTURES, MarginMode.CROSS, True), - ("bitmart", TradingMode.FUTURES, MarginMode.ISOLATED, True), - ("gate", TradingMode.MARGIN, MarginMode.ISOLATED, True), - ("okx", TradingMode.SPOT, None, False), - ("okx", TradingMode.MARGIN, MarginMode.CROSS, True), - ("okx", TradingMode.MARGIN, MarginMode.ISOLATED, True), - ("okx", TradingMode.FUTURES, MarginMode.CROSS, True), - - ("binance", TradingMode.FUTURES, MarginMode.ISOLATED, False), - ("gate", TradingMode.FUTURES, MarginMode.ISOLATED, False), - ("okx", TradingMode.FUTURES, MarginMode.ISOLATED, False), - - # * Remove once implemented - ("binance", TradingMode.MARGIN, MarginMode.CROSS, True), - ("binance", TradingMode.FUTURES, MarginMode.CROSS, True), - ("kraken", TradingMode.MARGIN, MarginMode.CROSS, True), - ("kraken", TradingMode.FUTURES, MarginMode.CROSS, True), - ("gate", TradingMode.MARGIN, MarginMode.CROSS, True), - ("gate", TradingMode.FUTURES, MarginMode.CROSS, True), - - # * Uncomment once implemented - # ("binance", TradingMode.MARGIN, MarginMode.CROSS, False), - # ("binance", TradingMode.FUTURES, MarginMode.CROSS, False), - # ("kraken", TradingMode.MARGIN, MarginMode.CROSS, False), - # ("kraken", TradingMode.FUTURES, MarginMode.CROSS, False), - # ("gate", TradingMode.MARGIN, MarginMode.CROSS, False), - # ("gate", TradingMode.FUTURES, MarginMode.CROSS, False), -]) +@pytest.mark.parametrize( + "exchange_name, trading_mode, margin_mode, exception_thrown", + [ + ("binance", TradingMode.SPOT, None, False), + ("binance", TradingMode.MARGIN, MarginMode.ISOLATED, True), + ("kraken", TradingMode.SPOT, None, False), + ("kraken", TradingMode.MARGIN, MarginMode.ISOLATED, True), + ("kraken", TradingMode.FUTURES, MarginMode.ISOLATED, True), + ("bitmart", TradingMode.SPOT, None, False), + ("bitmart", TradingMode.MARGIN, MarginMode.CROSS, True), + ("bitmart", TradingMode.MARGIN, MarginMode.ISOLATED, True), + ("bitmart", TradingMode.FUTURES, MarginMode.CROSS, True), + ("bitmart", TradingMode.FUTURES, MarginMode.ISOLATED, True), + ("gate", TradingMode.MARGIN, MarginMode.ISOLATED, True), + ("okx", TradingMode.SPOT, None, False), + ("okx", TradingMode.MARGIN, MarginMode.CROSS, True), + ("okx", TradingMode.MARGIN, MarginMode.ISOLATED, True), + ("okx", TradingMode.FUTURES, MarginMode.CROSS, True), + ("binance", TradingMode.FUTURES, MarginMode.ISOLATED, False), + ("gate", TradingMode.FUTURES, MarginMode.ISOLATED, False), + ("okx", TradingMode.FUTURES, MarginMode.ISOLATED, False), + # * Remove once implemented + ("binance", TradingMode.MARGIN, MarginMode.CROSS, True), + ("binance", TradingMode.FUTURES, MarginMode.CROSS, True), + ("kraken", TradingMode.MARGIN, MarginMode.CROSS, True), + ("kraken", TradingMode.FUTURES, MarginMode.CROSS, True), + ("gate", TradingMode.MARGIN, MarginMode.CROSS, True), + ("gate", TradingMode.FUTURES, MarginMode.CROSS, True), + # * Uncomment once implemented + # ("binance", TradingMode.MARGIN, MarginMode.CROSS, False), + # ("binance", TradingMode.FUTURES, MarginMode.CROSS, False), + # ("kraken", TradingMode.MARGIN, MarginMode.CROSS, False), + # ("kraken", TradingMode.FUTURES, MarginMode.CROSS, False), + # ("gate", TradingMode.MARGIN, MarginMode.CROSS, False), + # ("gate", TradingMode.FUTURES, MarginMode.CROSS, False), + ], +) def test_validate_trading_mode_and_margin_mode( - default_conf, - mocker, - exchange_name, - trading_mode, - margin_mode, - exception_thrown + default_conf, mocker, exchange_name, trading_mode, margin_mode, exception_thrown ): exchange = get_patched_exchange( - mocker, default_conf, id=exchange_name, mock_supported_modes=False) - if (exception_thrown): + mocker, default_conf, id=exchange_name, mock_supported_modes=False + ) + if exception_thrown: with pytest.raises(OperationalException): exchange.validate_trading_mode_and_margin_mode(trading_mode, margin_mode) else: exchange.validate_trading_mode_and_margin_mode(trading_mode, margin_mode) -@pytest.mark.parametrize("exchange_name,trading_mode,ccxt_config", [ - ("binance", "spot", {}), - ("binance", "margin", {"options": {"defaultType": "margin"}}), - ("binance", "futures", {"options": {"defaultType": "swap"}}), - ("bybit", "spot", {"options": {"defaultType": "spot"}}), - ("bybit", "futures", {"options": {"defaultType": "swap"}}), - ("gate", "futures", {"options": {"defaultType": "swap"}}), - ("hitbtc", "futures", {"options": {"defaultType": "swap"}}), - ("kraken", "futures", {"options": {"defaultType": "swap"}}), - ("kucoin", "futures", {"options": {"defaultType": "swap"}}), - ("okx", "futures", {"options": {"defaultType": "swap"}}), -]) -def test__ccxt_config( - default_conf, - mocker, - exchange_name, - trading_mode, - ccxt_config -): - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = 'isolated' +@pytest.mark.parametrize( + "exchange_name,trading_mode,ccxt_config", + [ + ("binance", "spot", {}), + ("binance", "margin", {"options": {"defaultType": "margin"}}), + ("binance", "futures", {"options": {"defaultType": "swap"}}), + ("bybit", "spot", {"options": {"defaultType": "spot"}}), + ("bybit", "futures", {"options": {"defaultType": "swap"}}), + ("gate", "futures", {"options": {"defaultType": "swap"}}), + ("hitbtc", "futures", {"options": {"defaultType": "swap"}}), + ("kraken", "futures", {"options": {"defaultType": "swap"}}), + ("kucoin", "futures", {"options": {"defaultType": "swap"}}), + ("okx", "futures", {"options": {"defaultType": "swap"}}), + ], +) +def test__ccxt_config(default_conf, mocker, exchange_name, trading_mode, ccxt_config): + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) assert exchange._ccxt_config == ccxt_config -@pytest.mark.parametrize('pair,nominal_value,max_lev', [ - ("ETH/BTC", 0.0, 2.0), - ("TKN/BTC", 100.0, 5.0), - ("BLK/BTC", 173.31, 3.0), - ("LTC/BTC", 0.0, 1.0), - ("TKN/USDT", 210.30, 1.0), -]) +@pytest.mark.parametrize( + "pair,nominal_value,max_lev", + [ + ("ETH/BTC", 0.0, 2.0), + ("TKN/BTC", 100.0, 5.0), + ("BLK/BTC", 173.31, 3.0), + ("LTC/BTC", 0.0, 1.0), + ("TKN/USDT", 210.30, 1.0), + ], +) def test_get_max_leverage_from_margin(default_conf, mocker, pair, nominal_value, max_lev): - default_conf['trading_mode'] = 'margin' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "margin" + default_conf["margin_mode"] = "isolated" api_mock = MagicMock() - type(api_mock).has = PropertyMock(return_value={'fetchLeverageTiers': False}) + type(api_mock).has = PropertyMock(return_value={"fetchLeverageTiers": False}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="gate") assert exchange.get_max_leverage(pair, nominal_value) == max_lev @pytest.mark.parametrize( - 'size,funding_rate,mark_price,time_in_ratio,funding_fee,kraken_fee', [ + "size,funding_rate,mark_price,time_in_ratio,funding_fee,kraken_fee", + [ (10, 0.0001, 2.0, 1.0, 0.002, 0.002), (10, 0.0002, 2.0, 0.01, 0.004, 0.00004), (10, 0.0002, 2.5, None, 0.005, None), (10, 0.0002, NaN, None, 0.0, None), - ]) + ], +) def test_calculate_funding_fees( - default_conf, - mocker, - size, - funding_rate, - mark_price, - funding_fee, - kraken_fee, - time_in_ratio + default_conf, mocker, size, funding_rate, mark_price, funding_fee, kraken_fee, time_in_ratio ): exchange = get_patched_exchange(mocker, default_conf) kraken = get_patched_exchange(mocker, default_conf, id="kraken") - prior_date = timeframe_to_prev_date('1h', datetime.now(timezone.utc) - timedelta(hours=1)) - trade_date = timeframe_to_prev_date('1h', datetime.now(timezone.utc)) - funding_rates = DataFrame([ - {'date': prior_date, 'open': funding_rate}, # Line not used. - {'date': trade_date, 'open': funding_rate}, - ]) - mark_rates = DataFrame([ - {'date': prior_date, 'open': mark_price}, - {'date': trade_date, 'open': mark_price}, - ]) + prior_date = timeframe_to_prev_date("1h", datetime.now(timezone.utc) - timedelta(hours=1)) + trade_date = timeframe_to_prev_date("1h", datetime.now(timezone.utc)) + funding_rates = DataFrame( + [ + {"date": prior_date, "open": funding_rate}, # Line not used. + {"date": trade_date, "open": funding_rate}, + ] + ) + mark_rates = DataFrame( + [ + {"date": prior_date, "open": mark_price}, + {"date": trade_date, "open": mark_price}, + ] + ) df = exchange.combine_funding_and_mark(funding_rates, mark_rates) - assert exchange.calculate_funding_fees( - df, - amount=size, - is_short=True, - open_date=trade_date, - close_date=trade_date, - time_in_ratio=time_in_ratio, - ) == funding_fee + assert ( + exchange.calculate_funding_fees( + df, + amount=size, + is_short=True, + open_date=trade_date, + close_date=trade_date, + time_in_ratio=time_in_ratio, + ) + == funding_fee + ) - if (kraken_fee is None): + if kraken_fee is None: with pytest.raises(OperationalException): kraken.calculate_funding_fees( df, @@ -4254,23 +4909,28 @@ def test_calculate_funding_fees( ) else: - assert kraken.calculate_funding_fees( - df, - amount=size, - is_short=True, - open_date=trade_date, - close_date=trade_date, - time_in_ratio=time_in_ratio, - ) == kraken_fee + assert ( + kraken.calculate_funding_fees( + df, + amount=size, + is_short=True, + open_date=trade_date, + close_date=trade_date, + time_in_ratio=time_in_ratio, + ) + == kraken_fee + ) @pytest.mark.parametrize( - 'mark_price,funding_rate,futures_funding_rate', [ + "mark_price,funding_rate,futures_funding_rate", + [ (1000, 0.001, None), (1000, 0.001, 0.01), (1000, 0.001, 0.0), (1000, 0.001, -0.01), - ]) + ], +) def test_combine_funding_and_mark( default_conf, mocker, @@ -4279,88 +4939,99 @@ def test_combine_funding_and_mark( futures_funding_rate, ): exchange = get_patched_exchange(mocker, default_conf) - prior2_date = timeframe_to_prev_date('1h', datetime.now(timezone.utc) - timedelta(hours=2)) - prior_date = timeframe_to_prev_date('1h', datetime.now(timezone.utc) - timedelta(hours=1)) - trade_date = timeframe_to_prev_date('1h', datetime.now(timezone.utc)) - funding_rates = DataFrame([ - {'date': prior2_date, 'open': funding_rate}, - {'date': prior_date, 'open': funding_rate}, - {'date': trade_date, 'open': funding_rate}, - ]) - mark_rates = DataFrame([ - {'date': prior2_date, 'open': mark_price}, - {'date': prior_date, 'open': mark_price}, - {'date': trade_date, 'open': mark_price}, - ]) + prior2_date = timeframe_to_prev_date("1h", datetime.now(timezone.utc) - timedelta(hours=2)) + prior_date = timeframe_to_prev_date("1h", datetime.now(timezone.utc) - timedelta(hours=1)) + trade_date = timeframe_to_prev_date("1h", datetime.now(timezone.utc)) + funding_rates = DataFrame( + [ + {"date": prior2_date, "open": funding_rate}, + {"date": prior_date, "open": funding_rate}, + {"date": trade_date, "open": funding_rate}, + ] + ) + mark_rates = DataFrame( + [ + {"date": prior2_date, "open": mark_price}, + {"date": prior_date, "open": mark_price}, + {"date": trade_date, "open": mark_price}, + ] + ) df = exchange.combine_funding_and_mark(funding_rates, mark_rates, futures_funding_rate) - assert 'open_mark' in df.columns - assert 'open_fund' in df.columns + assert "open_mark" in df.columns + assert "open_fund" in df.columns assert len(df) == 3 - funding_rates = DataFrame([ - {'date': trade_date, 'open': funding_rate}, - ]) - mark_rates = DataFrame([ - {'date': prior2_date, 'open': mark_price}, - {'date': prior_date, 'open': mark_price}, - {'date': trade_date, 'open': mark_price}, - ]) + funding_rates = DataFrame( + [ + {"date": trade_date, "open": funding_rate}, + ] + ) + mark_rates = DataFrame( + [ + {"date": prior2_date, "open": mark_price}, + {"date": prior_date, "open": mark_price}, + {"date": trade_date, "open": mark_price}, + ] + ) df = exchange.combine_funding_and_mark(funding_rates, mark_rates, futures_funding_rate) if futures_funding_rate is not None: assert len(df) == 3 - assert df.iloc[0]['open_fund'] == futures_funding_rate - assert df.iloc[1]['open_fund'] == futures_funding_rate - assert df.iloc[2]['open_fund'] == funding_rate + assert df.iloc[0]["open_fund"] == futures_funding_rate + assert df.iloc[1]["open_fund"] == futures_funding_rate + assert df.iloc[2]["open_fund"] == funding_rate else: assert len(df) == 1 # Empty funding rates - funding_rates2 = DataFrame([], columns=['date', 'open']) + funding_rates2 = DataFrame([], columns=["date", "open"]) df = exchange.combine_funding_and_mark(funding_rates2, mark_rates, futures_funding_rate) if futures_funding_rate is not None: assert len(df) == 3 - assert df.iloc[0]['open_fund'] == futures_funding_rate - assert df.iloc[1]['open_fund'] == futures_funding_rate - assert df.iloc[2]['open_fund'] == futures_funding_rate + assert df.iloc[0]["open_fund"] == futures_funding_rate + assert df.iloc[1]["open_fund"] == futures_funding_rate + assert df.iloc[2]["open_fund"] == futures_funding_rate else: assert len(df) == 0 # Empty mark candles - mark_candles = DataFrame([], columns=['date', 'open']) + mark_candles = DataFrame([], columns=["date", "open"]) df = exchange.combine_funding_and_mark(funding_rates, mark_candles, futures_funding_rate) assert len(df) == 0 -@pytest.mark.parametrize('exchange,rate_start,rate_end,d1,d2,amount,expected_fees', [ - ('binance', 0, 2, "2021-09-01 01:00:00", "2021-09-01 04:00:00", 30.0, 0.0), - ('binance', 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.00091409999), - ('binance', 0, 2, "2021-09-01 00:00:15", "2021-09-01 08:00:00", 30.0, -0.0002493), - ('binance', 1, 2, "2021-09-01 01:00:14", "2021-09-01 08:00:00", 30.0, -0.0002493), - ('binance', 1, 2, "2021-09-01 00:00:16", "2021-09-01 08:00:00", 30.0, -0.0002493), - ('binance', 0, 1, "2021-09-01 00:00:00", "2021-09-01 07:59:59", 30.0, -0.00066479999), - ('binance', 0, 2, "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, -0.00091409999), - # :01 must be rounded down. - ('binance', 0, 2, "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.00091409999), - ('binance', 0, 2, "2021-08-31 23:58:00", "2021-09-01 08:00:00", 30.0, -0.00091409999), - ('binance', 0, 2, "2021-09-01 00:10:01", "2021-09-01 08:00:00", 30.0, -0.0002493), - # TODO: Uncomment once _calculate_funding_fees can pass time_in_ratio to exchange. - # ('kraken', "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.0014937), - # ('kraken', "2021-09-01 00:00:15", "2021-09-01 08:00:00", 30.0, -0.0008289), - # ('kraken', "2021-09-01 01:00:14", "2021-09-01 08:00:00", 30.0, -0.0008289), - # ('kraken', "2021-09-01 00:00:00", "2021-09-01 07:59:59", 30.0, -0.0012443999999999999), - # ('kraken', "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, 0.0045759), - # ('kraken', "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.0008289), - ('gate', 0, 2, "2021-09-01 00:10:00", "2021-09-01 04:00:00", 30.0, 0.0), - ('gate', 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.0009140999), - ('gate', 0, 2, "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, -0.0009140999), - ('gate', 1, 2, "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.0002493), - ('binance', 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 50.0, -0.0015235), - # TODO: Uncomment once _calculate_funding_fees can pass time_in_ratio to exchange. - # ('kraken', "2021-09-01 00:00:00", "2021-09-01 08:00:00", 50.0, -0.0024895), -]) +@pytest.mark.parametrize( + "exchange,rate_start,rate_end,d1,d2,amount,expected_fees", + [ + ("binance", 0, 2, "2021-09-01 01:00:00", "2021-09-01 04:00:00", 30.0, 0.0), + ("binance", 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.00091409999), + ("binance", 0, 2, "2021-09-01 00:00:15", "2021-09-01 08:00:00", 30.0, -0.0002493), + ("binance", 1, 2, "2021-09-01 01:00:14", "2021-09-01 08:00:00", 30.0, -0.0002493), + ("binance", 1, 2, "2021-09-01 00:00:16", "2021-09-01 08:00:00", 30.0, -0.0002493), + ("binance", 0, 1, "2021-09-01 00:00:00", "2021-09-01 07:59:59", 30.0, -0.00066479999), + ("binance", 0, 2, "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, -0.00091409999), + # :01 must be rounded down. + ("binance", 0, 2, "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.00091409999), + ("binance", 0, 2, "2021-08-31 23:58:00", "2021-09-01 08:00:00", 30.0, -0.00091409999), + ("binance", 0, 2, "2021-09-01 00:10:01", "2021-09-01 08:00:00", 30.0, -0.0002493), + # TODO: Uncomment once _calculate_funding_fees can pass time_in_ratio to exchange. + # ('kraken', "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.0014937), + # ('kraken', "2021-09-01 00:00:15", "2021-09-01 08:00:00", 30.0, -0.0008289), + # ('kraken', "2021-09-01 01:00:14", "2021-09-01 08:00:00", 30.0, -0.0008289), + # ('kraken', "2021-09-01 00:00:00", "2021-09-01 07:59:59", 30.0, -0.0012443999999999999), + # ('kraken', "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, 0.0045759), + # ('kraken', "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.0008289), + ("gate", 0, 2, "2021-09-01 00:10:00", "2021-09-01 04:00:00", 30.0, 0.0), + ("gate", 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 30.0, -0.0009140999), + ("gate", 0, 2, "2021-09-01 00:00:00", "2021-09-01 12:00:00", 30.0, -0.0009140999), + ("gate", 1, 2, "2021-09-01 00:00:01", "2021-09-01 08:00:00", 30.0, -0.0002493), + ("binance", 0, 2, "2021-09-01 00:00:00", "2021-09-01 08:00:00", 50.0, -0.0015235), + # TODO: Uncomment once _calculate_funding_fees can pass time_in_ratio to exchange. + # ('kraken', "2021-09-01 00:00:00", "2021-09-01 08:00:00", 50.0, -0.0024895), + ], +) def test__fetch_and_calculate_funding_fees( mocker, default_conf, @@ -4373,7 +5044,7 @@ def test__fetch_and_calculate_funding_fees( d1, d2, amount, - expected_fees + expected_fees, ): """ nominal_value = mark_price * size @@ -4410,26 +5081,28 @@ def test__fetch_and_calculate_funding_fees( time: 12, mark: 2.81, nominal_value: 140.5, fundRate: 0.000072, fundFee: 0.010116 time: 13, mark: 2.82, nominal_value: 141.0, fundRate: 0.000097, fundFee: 0.013677 """ - d1 = datetime.strptime(f"{d1} +0000", '%Y-%m-%d %H:%M:%S %z') - d2 = datetime.strptime(f"{d2} +0000", '%Y-%m-%d %H:%M:%S %z') + d1 = datetime.strptime(f"{d1} +0000", "%Y-%m-%d %H:%M:%S %z") + d2 = datetime.strptime(f"{d2} +0000", "%Y-%m-%d %H:%M:%S %z") funding_rate_history = { - 'binance': funding_rate_history_octohourly, - 'gate': funding_rate_history_octohourly, + "binance": funding_rate_history_octohourly, + "gate": funding_rate_history_octohourly, }[exchange][rate_start:rate_end] api_mock = MagicMock() api_mock.fetch_funding_rate_history = get_mock_coro(return_value=funding_rate_history) api_mock.fetch_ohlcv = get_mock_coro(return_value=mark_ohlcv) - type(api_mock).has = PropertyMock(return_value={'fetchOHLCV': True}) - type(api_mock).has = PropertyMock(return_value={'fetchFundingRateHistory': True}) + type(api_mock).has = PropertyMock(return_value={"fetchOHLCV": True}) + type(api_mock).has = PropertyMock(return_value={"fetchFundingRateHistory": True}) ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) - mocker.patch(f'{EXMS}.timeframes', PropertyMock(return_value=['1h', '4h', '8h'])) + mocker.patch(f"{EXMS}.timeframes", PropertyMock(return_value=["1h", "4h", "8h"])) funding_fees = ex._fetch_and_calculate_funding_fees( - pair='ADA/USDT:USDT', amount=amount, is_short=True, open_date=d1, close_date=d2) + pair="ADA/USDT:USDT", amount=amount, is_short=True, open_date=d1, close_date=d2 + ) assert pytest.approx(funding_fees) == expected_fees # Fees for Longs are inverted funding_fees = ex._fetch_and_calculate_funding_fees( - pair='ADA/USDT:USDT', amount=amount, is_short=False, open_date=d1, close_date=d2) + pair="ADA/USDT:USDT", amount=amount, is_short=False, open_date=d1, close_date=d2 + ) assert pytest.approx(funding_fees) == -expected_fees # Return empty "refresh_latest" @@ -4437,13 +5110,17 @@ def test__fetch_and_calculate_funding_fees( ex = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) with pytest.raises(ExchangeError, match="Could not find funding rates."): ex._fetch_and_calculate_funding_fees( - pair='ADA/USDT:USDT', amount=amount, is_short=False, open_date=d1, close_date=d2) + pair="ADA/USDT:USDT", amount=amount, is_short=False, open_date=d1, close_date=d2 + ) -@pytest.mark.parametrize('exchange,expected_fees', [ - ('binance', -0.0009140999999999999), - ('gate', -0.0009140999999999999), -]) +@pytest.mark.parametrize( + "exchange,expected_fees", + [ + ("binance", -0.0009140999999999999), + ("gate", -0.0009140999999999999), + ], +) def test__fetch_and_calculate_funding_fees_datetime_called( mocker, default_conf, @@ -4451,62 +5128,72 @@ def test__fetch_and_calculate_funding_fees_datetime_called( mark_ohlcv, exchange, time_machine, - expected_fees + expected_fees, ): api_mock = MagicMock() api_mock.fetch_ohlcv = get_mock_coro(return_value=mark_ohlcv) api_mock.fetch_funding_rate_history = get_mock_coro( - return_value=funding_rate_history_octohourly) - type(api_mock).has = PropertyMock(return_value={'fetchOHLCV': True}) - type(api_mock).has = PropertyMock(return_value={'fetchFundingRateHistory': True}) - mocker.patch(f'{EXMS}.timeframes', PropertyMock(return_value=['4h', '8h'])) + return_value=funding_rate_history_octohourly + ) + type(api_mock).has = PropertyMock(return_value={"fetchOHLCV": True}) + type(api_mock).has = PropertyMock(return_value={"fetchFundingRateHistory": True}) + mocker.patch(f"{EXMS}.timeframes", PropertyMock(return_value=["4h", "8h"])) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange) - d1 = datetime.strptime("2021-08-31 23:00:01 +0000", '%Y-%m-%d %H:%M:%S %z') + d1 = datetime.strptime("2021-08-31 23:00:01 +0000", "%Y-%m-%d %H:%M:%S %z") time_machine.move_to("2021-09-01 08:00:00 +00:00") - funding_fees = exchange._fetch_and_calculate_funding_fees('ADA/USDT', 30.0, True, d1) + funding_fees = exchange._fetch_and_calculate_funding_fees("ADA/USDT", 30.0, True, d1) assert funding_fees == expected_fees - funding_fees = exchange._fetch_and_calculate_funding_fees('ADA/USDT', 30.0, False, d1) + funding_fees = exchange._fetch_and_calculate_funding_fees("ADA/USDT", 30.0, False, d1) assert funding_fees == 0 - expected_fees -@pytest.mark.parametrize('pair,expected_size,trading_mode', [ - ('XLTCUSDT', 1, 'spot'), - ('LTC/USD', 1, 'futures'), - ('XLTCUSDT', 0.01, 'futures'), - ('ETH/USDT:USDT', 10, 'futures'), - ('TORN/USDT:USDT', None, 'futures'), # Don't fail for unavailable pairs. -]) +@pytest.mark.parametrize( + "pair,expected_size,trading_mode", + [ + ("XLTCUSDT", 1, "spot"), + ("LTC/USD", 1, "futures"), + ("XLTCUSDT", 0.01, "futures"), + ("ETH/USDT:USDT", 10, "futures"), + ("TORN/USDT:USDT", None, "futures"), # Don't fail for unavailable pairs. + ], +) def test__get_contract_size(mocker, default_conf, pair, expected_size, trading_mode): api_mock = MagicMock() - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, api_mock) - mocker.patch(f'{EXMS}.markets', { - 'LTC/USD': { - 'symbol': 'LTC/USD', - 'contractSize': None, + mocker.patch( + f"{EXMS}.markets", + { + "LTC/USD": { + "symbol": "LTC/USD", + "contractSize": None, + }, + "XLTCUSDT": { + "symbol": "XLTCUSDT", + "contractSize": "0.01", + }, + "ETH/USDT:USDT": { + "symbol": "ETH/USDT:USDT", + "contractSize": "10", + }, }, - 'XLTCUSDT': { - 'symbol': 'XLTCUSDT', - 'contractSize': '0.01', - }, - 'ETH/USDT:USDT': { - 'symbol': 'ETH/USDT:USDT', - 'contractSize': '10', - } - }) + ) size = exchange.get_contract_size(pair) assert expected_size == size -@pytest.mark.parametrize('pair,contract_size,trading_mode', [ - ('XLTCUSDT', 1, 'spot'), - ('LTC/USD', 1, 'futures'), - ('ADA/USDT:USDT', 0.01, 'futures'), - ('LTC/ETH', 1, 'futures'), - ('ETH/USDT:USDT', 10, 'futures'), -]) +@pytest.mark.parametrize( + "pair,contract_size,trading_mode", + [ + ("XLTCUSDT", 1, "spot"), + ("LTC/USD", 1, "futures"), + ("ADA/USDT:USDT", 0.01, "futures"), + ("LTC/ETH", 1, "futures"), + ("ETH/USDT:USDT", 10, "futures"), + ], +) def test__order_contracts_to_amount( mocker, default_conf, @@ -4516,119 +5203,126 @@ def test__order_contracts_to_amount( trading_mode, ): api_mock = MagicMock() - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = 'isolated' - mocker.patch(f'{EXMS}.markets', markets) + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.markets", markets) exchange = get_patched_exchange(mocker, default_conf, api_mock) orders = [ { - 'id': '123456320', - 'clientOrderId': '12345632018', - 'timestamp': 1640124992000, - 'datetime': 'Tue 21 Dec 2021 22:16:32 UTC', - 'lastTradeTimestamp': 1640124911000, - 'status': 'active', - 'symbol': pair, - 'type': 'limit', - 'timeInForce': 'gtc', - 'postOnly': None, - 'side': 'buy', - 'price': 2.0, - 'stopPrice': None, - 'average': None, - 'amount': 30.0, - 'cost': 60.0, - 'filled': None, - 'remaining': 30.0, - 'fee': { - 'currency': 'USDT', - 'cost': 0.06, + "id": "123456320", + "clientOrderId": "12345632018", + "timestamp": 1640124992000, + "datetime": "Tue 21 Dec 2021 22:16:32 UTC", + "lastTradeTimestamp": 1640124911000, + "status": "active", + "symbol": pair, + "type": "limit", + "timeInForce": "gtc", + "postOnly": None, + "side": "buy", + "price": 2.0, + "stopPrice": None, + "average": None, + "amount": 30.0, + "cost": 60.0, + "filled": None, + "remaining": 30.0, + "fee": { + "currency": "USDT", + "cost": 0.06, }, - 'fees': [{ - 'currency': 'USDT', - 'cost': 0.06, - }], - 'trades': None, - 'info': {}, + "fees": [ + { + "currency": "USDT", + "cost": 0.06, + } + ], + "trades": None, + "info": {}, }, { - 'id': '123456380', - 'clientOrderId': '12345638203', - 'timestamp': 1640124992000, - 'datetime': 'Tue 21 Dec 2021 22:16:32 UTC', - 'lastTradeTimestamp': 1640124911000, - 'status': 'active', - 'symbol': pair, - 'type': 'limit', - 'timeInForce': 'gtc', - 'postOnly': None, - 'side': 'sell', - 'price': 2.2, - 'stopPrice': None, - 'average': None, - 'amount': 40.0, - 'cost': 80.0, - 'filled': None, - 'remaining': 40.0, - 'fee': { - 'currency': 'USDT', - 'cost': 0.08, + "id": "123456380", + "clientOrderId": "12345638203", + "timestamp": 1640124992000, + "datetime": "Tue 21 Dec 2021 22:16:32 UTC", + "lastTradeTimestamp": 1640124911000, + "status": "active", + "symbol": pair, + "type": "limit", + "timeInForce": "gtc", + "postOnly": None, + "side": "sell", + "price": 2.2, + "stopPrice": None, + "average": None, + "amount": 40.0, + "cost": 80.0, + "filled": None, + "remaining": 40.0, + "fee": { + "currency": "USDT", + "cost": 0.08, }, - 'fees': [{ - 'currency': 'USDT', - 'cost': 0.08, - }], - 'trades': None, - 'info': {}, + "fees": [ + { + "currency": "USDT", + "cost": 0.08, + } + ], + "trades": None, + "info": {}, }, { # Realistic stoploss order on gate. - 'id': '123456380', - 'clientOrderId': '12345638203', - 'timestamp': None, - 'datetime': None, - 'lastTradeTimestamp': None, - 'status': None, - 'symbol': None, - 'type': None, - 'timeInForce': None, - 'postOnly': None, - 'side': None, - 'price': None, - 'stopPrice': None, - 'average': None, - 'amount': None, - 'cost': None, - 'filled': None, - 'remaining': None, - 'fee': None, - 'fees': [], - 'trades': None, - 'info': {}, + "id": "123456380", + "clientOrderId": "12345638203", + "timestamp": None, + "datetime": None, + "lastTradeTimestamp": None, + "status": None, + "symbol": None, + "type": None, + "timeInForce": None, + "postOnly": None, + "side": None, + "price": None, + "stopPrice": None, + "average": None, + "amount": None, + "cost": None, + "filled": None, + "remaining": None, + "fee": None, + "fees": [], + "trades": None, + "info": {}, }, ] order1_bef = orders[0] order2_bef = orders[1] order1 = exchange._order_contracts_to_amount(deepcopy(order1_bef)) order2 = exchange._order_contracts_to_amount(deepcopy(order2_bef)) - assert order1['amount'] == order1_bef['amount'] * contract_size - assert order1['cost'] == order1_bef['cost'] * contract_size + assert order1["amount"] == order1_bef["amount"] * contract_size + assert order1["cost"] == order1_bef["cost"] * contract_size - assert order2['amount'] == order2_bef['amount'] * contract_size - assert order2['cost'] == order2_bef['cost'] * contract_size + assert order2["amount"] == order2_bef["amount"] * contract_size + assert order2["cost"] == order2_bef["cost"] * contract_size # Don't fail exchange._order_contracts_to_amount(orders[2]) -@pytest.mark.parametrize('pair,contract_size,trading_mode', [ - ('XLTCUSDT', 1, 'spot'), - ('LTC/USD', 1, 'futures'), - ('ADA/USDT:USDT', 0.01, 'futures'), - ('LTC/ETH', 1, 'futures'), - ('ETH/USDT:USDT', 10, 'futures'), -]) +@pytest.mark.parametrize( + "pair,contract_size,trading_mode", + [ + ("XLTCUSDT", 1, "spot"), + ("LTC/USD", 1, "futures"), + ("ADA/USDT:USDT", 0.01, "futures"), + ("LTC/ETH", 1, "futures"), + ("ETH/USDT:USDT", 10, "futures"), + ], +) def test__trades_contracts_to_amount( mocker, default_conf, @@ -4638,67 +5332,67 @@ def test__trades_contracts_to_amount( trading_mode, ): api_mock = MagicMock() - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = 'isolated' - mocker.patch(f'{EXMS}.markets', markets) + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.markets", markets) exchange = get_patched_exchange(mocker, default_conf, api_mock) trades = [ { - 'symbol': pair, - 'amount': 30.0, + "symbol": pair, + "amount": 30.0, }, { - 'symbol': pair, - 'amount': 40.0, - } + "symbol": pair, + "amount": 40.0, + }, ] new_amount_trades = exchange._trades_contracts_to_amount(trades) - assert new_amount_trades[0]['amount'] == 30.0 * contract_size - assert new_amount_trades[1]['amount'] == 40.0 * contract_size + assert new_amount_trades[0]["amount"] == 30.0 * contract_size + assert new_amount_trades[1]["amount"] == 40.0 * contract_size -@pytest.mark.parametrize('pair,param_amount,param_size', [ - ('ADA/USDT:USDT', 40, 4000), - ('LTC/ETH', 30, 30), - ('LTC/USD', 30, 30), - ('ETH/USDT:USDT', 10, 1), -]) -def test__amount_to_contracts( - mocker, - default_conf, - pair, - param_amount, - param_size -): +@pytest.mark.parametrize( + "pair,param_amount,param_size", + [ + ("ADA/USDT:USDT", 40, 4000), + ("LTC/ETH", 30, 30), + ("LTC/USD", 30, 30), + ("ETH/USDT:USDT", 10, 1), + ], +) +def test__amount_to_contracts(mocker, default_conf, pair, param_amount, param_size): api_mock = MagicMock() - default_conf['trading_mode'] = 'spot' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "spot" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, api_mock) - mocker.patch(f'{EXMS}.markets', { - 'LTC/USD': { - 'symbol': 'LTC/USD', - 'contractSize': None, + mocker.patch( + f"{EXMS}.markets", + { + "LTC/USD": { + "symbol": "LTC/USD", + "contractSize": None, + }, + "XLTCUSDT": { + "symbol": "XLTCUSDT", + "contractSize": "0.01", + }, + "LTC/ETH": { + "symbol": "LTC/ETH", + }, + "ETH/USDT:USDT": { + "symbol": "ETH/USDT:USDT", + "contractSize": "10", + }, }, - 'XLTCUSDT': { - 'symbol': 'XLTCUSDT', - 'contractSize': '0.01', - }, - 'LTC/ETH': { - 'symbol': 'LTC/ETH', - }, - 'ETH/USDT:USDT': { - 'symbol': 'ETH/USDT:USDT', - 'contractSize': '10', - } - }) + ) result_size = exchange._amount_to_contracts(pair, param_amount) assert result_size == param_amount result_amount = exchange._contracts_to_amount(pair, param_size) assert result_amount == param_size - default_conf['trading_mode'] = 'futures' + default_conf["trading_mode"] = "futures" exchange = get_patched_exchange(mocker, default_conf, api_mock) result_size = exchange._amount_to_contracts(pair, param_amount) assert result_size == param_size @@ -4706,18 +5400,21 @@ def test__amount_to_contracts( assert result_amount == param_amount -@pytest.mark.parametrize('pair,amount,expected_spot,expected_fut', [ - # Contract size of 0.01 - ('ADA/USDT:USDT', 40, 40, 40), - ('ADA/USDT:USDT', 10.4445555, 10.4, 10.444), - ('LTC/ETH', 30, 30, 30), - ('LTC/USD', 30, 30, 30), - ('ADA/USDT:USDT', 1.17, 1.1, 1.17), - # contract size of 10 - ('ETH/USDT:USDT', 10.111, 10.1, 10), - ('ETH/USDT:USDT', 10.188, 10.1, 10), - ('ETH/USDT:USDT', 10.988, 10.9, 10), -]) +@pytest.mark.parametrize( + "pair,amount,expected_spot,expected_fut", + [ + # Contract size of 0.01 + ("ADA/USDT:USDT", 40, 40, 40), + ("ADA/USDT:USDT", 10.4445555, 10.4, 10.444), + ("LTC/ETH", 30, 30, 30), + ("LTC/USD", 30, 30, 30), + ("ADA/USDT:USDT", 1.17, 1.1, 1.17), + # contract size of 10 + ("ETH/USDT:USDT", 10.111, 10.1, 10), + ("ETH/USDT:USDT", 10.188, 10.1, 10), + ("ETH/USDT:USDT", 10.988, 10.9, 10), + ], +) def test_amount_to_contract_precision( mocker, default_conf, @@ -4727,88 +5424,157 @@ def test_amount_to_contract_precision( expected_fut, ): api_mock = MagicMock() - default_conf['trading_mode'] = 'spot' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "spot" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, api_mock) result_size = exchange.amount_to_contract_precision(pair, amount) assert result_size == expected_spot - default_conf['trading_mode'] = 'futures' + default_conf["trading_mode"] = "futures" exchange = get_patched_exchange(mocker, default_conf, api_mock) result_size = exchange.amount_to_contract_precision(pair, amount) assert result_size == expected_fut -@pytest.mark.parametrize('exchange_name,open_rate,is_short,trading_mode,margin_mode', [ - # Bybit - ('bybit', 2.0, False, 'spot', None), - ('bybit', 2.0, False, 'spot', 'cross'), - ('bybit', 2.0, True, 'spot', 'isolated'), - # Binance - ('binance', 2.0, False, 'spot', None), - ('binance', 2.0, False, 'spot', 'cross'), - ('binance', 2.0, True, 'spot', 'isolated'), -]) +@pytest.mark.parametrize( + "exchange_name,open_rate,is_short,trading_mode,margin_mode", + [ + # Bybit + ("bybit", 2.0, False, "spot", None), + ("bybit", 2.0, False, "spot", "cross"), + ("bybit", 2.0, True, "spot", "isolated"), + # Binance + ("binance", 2.0, False, "spot", None), + ("binance", 2.0, False, "spot", "cross"), + ("binance", 2.0, True, "spot", "isolated"), + ], +) def test_liquidation_price_is_none( + mocker, default_conf, exchange_name, open_rate, is_short, trading_mode, margin_mode +): + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = margin_mode + exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) + assert ( + exchange.get_liquidation_price( + pair="DOGE/USDT", + open_rate=open_rate, + is_short=is_short, + amount=71200.81144, + stake_amount=open_rate * 71200.81144, + leverage=5, + wallet_balance=-56354.57, + mm_ex_1=0.10, + upnl_ex_1=0.0, + ) + is None + ) + + +@pytest.mark.parametrize( + "exchange_name, is_short, trading_mode, margin_mode, wallet_balance, " + "mm_ex_1, upnl_ex_1, maintenance_amt, amount, open_rate, " + "mm_ratio, expected", + [ + ( + "binance", + False, + "futures", + "isolated", + 1535443.01, + 0.0, + 0.0, + 135365.00, + 3683.979, + 1456.84, + 0.10, + 1114.78, + ), + ( + "binance", + False, + "futures", + "isolated", + 1535443.01, + 0.0, + 0.0, + 16300.000, + 109.488, + 32481.980, + 0.025, + 18778.73, + ), + ( + "binance", + False, + "futures", + "cross", + 1535443.01, + 71200.81144, + -56354.57, + 135365.00, + 3683.979, + 1456.84, + 0.10, + 1153.26, + ), + ( + "binance", + False, + "futures", + "cross", + 1535443.01, + 356512.508, + -448192.89, + 16300.000, + 109.488, + 32481.980, + 0.025, + 26316.89, + ), + ], +) +def test_liquidation_price_binance( mocker, default_conf, exchange_name, open_rate, is_short, trading_mode, - margin_mode + margin_mode, + wallet_balance, + mm_ex_1, + upnl_ex_1, + maintenance_amt, + amount, + mm_ratio, + expected, ): - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = margin_mode - exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) - assert exchange.get_liquidation_price( - pair='DOGE/USDT', - open_rate=open_rate, - is_short=is_short, - amount=71200.81144, - stake_amount=open_rate * 71200.81144, - leverage=5, - wallet_balance=-56354.57, - mm_ex_1=0.10, - upnl_ex_1=0.0 - ) is None - - -@pytest.mark.parametrize( - 'exchange_name, is_short, trading_mode, margin_mode, wallet_balance, ' - 'mm_ex_1, upnl_ex_1, maintenance_amt, amount, open_rate, ' - 'mm_ratio, expected', - [ - ("binance", False, 'futures', 'isolated', 1535443.01, 0.0, - 0.0, 135365.00, 3683.979, 1456.84, 0.10, 1114.78), - ("binance", False, 'futures', 'isolated', 1535443.01, 0.0, - 0.0, 16300.000, 109.488, 32481.980, 0.025, 18778.73), - ("binance", False, 'futures', 'cross', 1535443.01, 71200.81144, - -56354.57, 135365.00, 3683.979, 1456.84, 0.10, 1153.26), - ("binance", False, 'futures', 'cross', 1535443.01, 356512.508, - -448192.89, 16300.000, 109.488, 32481.980, 0.025, 26316.89) - ]) -def test_liquidation_price_binance( - mocker, default_conf, exchange_name, open_rate, is_short, trading_mode, - margin_mode, wallet_balance, mm_ex_1, upnl_ex_1, maintenance_amt, amount, mm_ratio, expected -): - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = margin_mode - default_conf['liquidation_buffer'] = 0.0 + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = margin_mode + default_conf["liquidation_buffer"] = 0.0 exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) exchange.get_maintenance_ratio_and_amt = MagicMock(return_value=(mm_ratio, maintenance_amt)) - assert pytest.approx(round(exchange.get_liquidation_price( - pair='DOGE/USDT', - open_rate=open_rate, - is_short=is_short, - wallet_balance=wallet_balance, - mm_ex_1=mm_ex_1, - upnl_ex_1=upnl_ex_1, - amount=amount, - stake_amount=open_rate * amount, - leverage=5, - ), 2)) == expected + assert ( + pytest.approx( + round( + exchange.get_liquidation_price( + pair="DOGE/USDT", + open_rate=open_rate, + is_short=is_short, + wallet_balance=wallet_balance, + mm_ex_1=mm_ex_1, + upnl_ex_1=upnl_ex_1, + amount=amount, + stake_amount=open_rate * amount, + leverage=5, + ), + 2, + ) + ) + == expected + ) def test_get_max_pair_stake_amount( @@ -4816,195 +5582,164 @@ def test_get_max_pair_stake_amount( default_conf, ): api_mock = MagicMock() - default_conf['margin_mode'] = 'isolated' - default_conf['trading_mode'] = 'futures' + default_conf["margin_mode"] = "isolated" + default_conf["trading_mode"] = "futures" exchange = get_patched_exchange(mocker, default_conf, api_mock) markets = { - 'XRP/USDT:USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': None - }, + "XRP/USDT:USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": {"min": 5, "max": None}, }, - 'contractSize': None, - 'spot': False, + "contractSize": None, + "spot": False, }, - 'LTC/USDT:USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': None - }, - 'cost': { - 'min': 5, - 'max': None - }, + "LTC/USDT:USDT": { + "limits": { + "amount": {"min": 0.001, "max": None}, + "cost": {"min": 5, "max": None}, }, - 'contractSize': 0.01, - 'spot': False, + "contractSize": 0.01, + "spot": False, }, - 'ETH/USDT:USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': 30000, + "ETH/USDT:USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": { + "min": 5, + "max": 30000, }, }, - 'contractSize': 0.01, - 'spot': False, + "contractSize": 0.01, + "spot": False, }, - 'BTC/USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': None - }, + "BTC/USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": {"min": 5, "max": None}, }, - 'contractSize': 0.01, - 'spot': True, + "contractSize": 0.01, + "spot": True, }, - 'ADA/USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': 500, + "ADA/USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": { + "min": 5, + "max": 500, }, }, - 'contractSize': 0.01, - 'spot': True, + "contractSize": 0.01, + "spot": True, }, - 'DOGE/USDT:USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': 500 - }, + "DOGE/USDT:USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": {"min": 5, "max": 500}, }, - 'contractSize': None, - 'spot': False, + "contractSize": None, + "spot": False, }, - 'LUNA/USDT:USDT': { - 'limits': { - 'amount': { - 'min': 0.001, - 'max': 10000 - }, - 'cost': { - 'min': 5, - 'max': 500 - }, + "LUNA/USDT:USDT": { + "limits": { + "amount": {"min": 0.001, "max": 10000}, + "cost": {"min": 5, "max": 500}, }, - 'contractSize': 0.01, - 'spot': False, + "contractSize": 0.01, + "spot": False, }, } - mocker.patch(f'{EXMS}.markets', markets) - assert exchange.get_max_pair_stake_amount('XRP/USDT:USDT', 2.0) == 20000 - assert exchange.get_max_pair_stake_amount('XRP/USDT:USDT', 2.0, 5) == 4000 - assert exchange.get_max_pair_stake_amount('LTC/USDT:USDT', 2.0) == float('inf') - assert exchange.get_max_pair_stake_amount('ETH/USDT:USDT', 2.0) == 200 - assert exchange.get_max_pair_stake_amount('DOGE/USDT:USDT', 2.0) == 500 - assert exchange.get_max_pair_stake_amount('LUNA/USDT:USDT', 2.0) == 5.0 + mocker.patch(f"{EXMS}.markets", markets) + assert exchange.get_max_pair_stake_amount("XRP/USDT:USDT", 2.0) == 20000 + assert exchange.get_max_pair_stake_amount("XRP/USDT:USDT", 2.0, 5) == 4000 + assert exchange.get_max_pair_stake_amount("LTC/USDT:USDT", 2.0) == float("inf") + assert exchange.get_max_pair_stake_amount("ETH/USDT:USDT", 2.0) == 200 + assert exchange.get_max_pair_stake_amount("DOGE/USDT:USDT", 2.0) == 500 + assert exchange.get_max_pair_stake_amount("LUNA/USDT:USDT", 2.0) == 5.0 - default_conf['trading_mode'] = 'spot' + default_conf["trading_mode"] = "spot" exchange = get_patched_exchange(mocker, default_conf, api_mock) - mocker.patch(f'{EXMS}.markets', markets) - assert exchange.get_max_pair_stake_amount('BTC/USDT', 2.0) == 20000 - assert exchange.get_max_pair_stake_amount('ADA/USDT', 2.0) == 500 + mocker.patch(f"{EXMS}.markets", markets) + assert exchange.get_max_pair_stake_amount("BTC/USDT", 2.0) == 20000 + assert exchange.get_max_pair_stake_amount("ADA/USDT", 2.0) == 500 -@pytest.mark.parametrize('exchange_name', EXCHANGES) -def test_load_leverage_tiers(mocker, default_conf, leverage_tiers, exchange_name): +@pytest.mark.parametrize("exchange_name", EXCHANGES) +def test_load_leverage_tiers(mocker, default_conf, exchange_name): + if exchange_name == "bybit": + # TODO: remove once get_leverage_tiers workaround has been removed. + pytest.skip("Currently skipping") api_mock = MagicMock() api_mock.fetch_leverage_tiers = MagicMock() - type(api_mock).has = PropertyMock(return_value={'fetchLeverageTiers': True}) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.validate_trading_mode_and_margin_mode') + type(api_mock).has = PropertyMock(return_value={"fetchLeverageTiers": True}) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.validate_trading_mode_and_margin_mode") - api_mock.fetch_leverage_tiers = MagicMock(return_value={ - 'ADA/USDT:USDT': [ - { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 500, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '500', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ADA-USDT' - } - }, - ] - }) + api_mock.fetch_leverage_tiers = MagicMock( + return_value={ + "ADA/USDT:USDT": [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 500, + "maintenanceMarginRate": 0.02, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "500", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ADA-USDT", + }, + }, + ] + } + ) # SPOT exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert exchange.load_leverage_tiers() == {} - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" - if exchange_name != 'binance': + if exchange_name != "binance": # FUTURES has.fetchLeverageTiers == False - type(api_mock).has = PropertyMock(return_value={'fetchLeverageTiers': False}) + type(api_mock).has = PropertyMock(return_value={"fetchLeverageTiers": False}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert exchange.load_leverage_tiers() == {} # FUTURES regular - type(api_mock).has = PropertyMock(return_value={'fetchLeverageTiers': True}) + type(api_mock).has = PropertyMock(return_value={"fetchLeverageTiers": True}) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) assert exchange.load_leverage_tiers() == { - 'ADA/USDT:USDT': [ + "ADA/USDT:USDT": [ { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 500, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '500', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ADA-USDT' - } + "tier": 1, + "minNotional": 0, + "maxNotional": 500, + "maintenanceMarginRate": 0.02, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "500", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ADA-USDT", + }, }, ] } @@ -5019,16 +5754,16 @@ def test_load_leverage_tiers(mocker, default_conf, leverage_tiers, exchange_name ) -@pytest.mark.parametrize('exchange_name', EXCHANGES) +@pytest.mark.parametrize("exchange_name", EXCHANGES) async def test_get_market_leverage_tiers(mocker, default_conf, exchange_name): - default_conf['exchange']['name'] = exchange_name + default_conf["exchange"]["name"] = exchange_name await async_ccxt_exception( mocker, default_conf, MagicMock(), "get_market_leverage_tiers", "fetch_market_leverage_tiers", - symbol='BTC/USDT:USDT' + symbol="BTC/USDT:USDT", ) @@ -5047,8 +5782,8 @@ def test_parse_leverage_tier(mocker, default_conf): "maxNotional": "100000", "minNotional": "0", "maintMarginRatio": "0.025", - "cum": "0.0" - } + "cum": "0.0", + }, } assert exchange.parse_leverage_tier(tier) == { @@ -5060,48 +5795,48 @@ def test_parse_leverage_tier(mocker, default_conf): } tier2 = { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '2000', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'SHIB-USDT' - } + "tier": 1, + "minNotional": 0, + "maxNotional": 2000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "2000", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "SHIB-USDT", + }, } assert exchange.parse_leverage_tier(tier2) == { - 'minNotional': 0, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 75, + "minNotional": 0, + "maxNotional": 2000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 75, "maintAmt": None, } def test_get_maintenance_ratio_and_amt_exceptions(mocker, default_conf, leverage_tiers): api_mock = MagicMock() - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock) exchange._leverage_tiers = leverage_tiers with pytest.raises( DependencyException, - match='nominal value can not be lower than 0', + match="nominal value can not be lower than 0", ): - exchange.get_maintenance_ratio_and_amt('1000SHIB/USDT:USDT', -1) + exchange.get_maintenance_ratio_and_amt("1000SHIB/USDT:USDT", -1) exchange._leverage_tiers = {} @@ -5109,42 +5844,38 @@ def test_get_maintenance_ratio_and_amt_exceptions(mocker, default_conf, leverage InvalidOrderException, match="Maintenance margin rate for 1000SHIB/USDT:USDT is unavailable for", ): - exchange.get_maintenance_ratio_and_amt('1000SHIB/USDT:USDT', 10000) + exchange.get_maintenance_ratio_and_amt("1000SHIB/USDT:USDT", 10000) -@pytest.mark.parametrize('pair,value,mmr,maintAmt', [ - ('ADA/USDT:USDT', 500, 0.025, 0.0), - ('ADA/USDT:USDT', 20000000, 0.5, 1527500.0), - ('ZEC/USDT:USDT', 500, 0.01, 0.0), - ('ZEC/USDT:USDT', 20000000, 0.5, 654500.0), -]) +@pytest.mark.parametrize( + "pair,value,mmr,maintAmt", + [ + ("ADA/USDT:USDT", 500, 0.025, 0.0), + ("ADA/USDT:USDT", 20000000, 0.5, 1527500.0), + ("ZEC/USDT:USDT", 500, 0.01, 0.0), + ("ZEC/USDT:USDT", 20000000, 0.5, 654500.0), + ], +) def test_get_maintenance_ratio_and_amt( - mocker, - default_conf, - leverage_tiers, - pair, - value, - mmr, - maintAmt + mocker, default_conf, leverage_tiers, pair, value, mmr, maintAmt ): api_mock = MagicMock() - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock) exchange._leverage_tiers = leverage_tiers assert exchange.get_maintenance_ratio_and_amt(pair, value) == (mmr, maintAmt) def test_get_max_leverage_futures(default_conf, mocker, leverage_tiers): - # Test Spot exchange = get_patched_exchange(mocker, default_conf, id="binance") assert exchange.get_max_leverage("BNB/USDT", 100.0) == 1.0 # Test Futures - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, id="binance") exchange._leverage_tiers = leverage_tiers @@ -5157,104 +5888,114 @@ def test_get_max_leverage_futures(default_conf, mocker, leverage_tiers): assert exchange.get_max_leverage("BTC/USDT:USDT", 300000000) == 2.0 assert exchange.get_max_leverage("BTC/USDT:USDT", 600000000) == 1.0 # Last tier - assert exchange.get_max_leverage("SPONGE/USDT:USDT", 200) == 1.0 # Pair not in leverage_tiers + assert exchange.get_max_leverage("SPONGE/USDT:USDT", 200) == 1.0 # Pair not in leverage_tiers assert exchange.get_max_leverage("BTC/USDT:USDT", 0.0) == 125.0 # No stake amount with pytest.raises( - InvalidOrderException, - match=r'Amount 1000000000.01 too high for BTC/USDT:USDT' + InvalidOrderException, match=r"Amount 1000000000.01 too high for BTC/USDT:USDT" ): exchange.get_max_leverage("BTC/USDT:USDT", 1000000000.01) -@pytest.mark.parametrize("exchange_name", ['binance', 'kraken', 'gate', 'okx', 'bybit']) +@pytest.mark.parametrize("exchange_name", ["binance", "kraken", "gate", "okx", "bybit"]) def test__get_params(mocker, default_conf, exchange_name): api_mock = MagicMock() - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange._params = {'test': True} + exchange._params = {"test": True} - params1 = {'test': True} + params1 = {"test": True} params2 = { - 'test': True, - 'timeInForce': 'IOC', - 'reduceOnly': True, + "test": True, + "timeInForce": "IOC", + "reduceOnly": True, } - if exchange_name == 'kraken': - params2['leverage'] = 3.0 + if exchange_name == "kraken": + params2["leverage"] = 3.0 - if exchange_name == 'okx': - params2['tdMode'] = 'isolated' - params2['posSide'] = 'net' + if exchange_name == "okx": + params2["tdMode"] = "isolated" + params2["posSide"] = "net" - if exchange_name == 'bybit': - params2['position_idx'] = 0 + if exchange_name == "bybit": + params2["position_idx"] = 0 - assert exchange._get_params( - side="buy", - ordertype='market', - reduceOnly=False, - time_in_force='GTC', - leverage=1.0, - ) == params1 + assert ( + exchange._get_params( + side="buy", + ordertype="market", + reduceOnly=False, + time_in_force="GTC", + leverage=1.0, + ) + == params1 + ) - assert exchange._get_params( - side="buy", - ordertype='market', - reduceOnly=False, - time_in_force='IOC', - leverage=1.0, - ) == params1 + assert ( + exchange._get_params( + side="buy", + ordertype="market", + reduceOnly=False, + time_in_force="IOC", + leverage=1.0, + ) + == params1 + ) - assert exchange._get_params( - side="buy", - ordertype='limit', - reduceOnly=False, - time_in_force='GTC', - leverage=1.0, - ) == params1 + assert ( + exchange._get_params( + side="buy", + ordertype="limit", + reduceOnly=False, + time_in_force="GTC", + leverage=1.0, + ) + == params1 + ) - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) - exchange._params = {'test': True} + exchange._params = {"test": True} - assert exchange._get_params( - side="buy", - ordertype='limit', - reduceOnly=True, - time_in_force='IOC', - leverage=3.0, - ) == params2 + assert ( + exchange._get_params( + side="buy", + ordertype="limit", + reduceOnly=True, + time_in_force="IOC", + leverage=3.0, + ) + == params2 + ) def test_get_liquidation_price1(mocker, default_conf): - api_mock = MagicMock() leverage = 9.97 positions = [ { - 'info': {}, - 'symbol': 'NEAR/USDT:USDT', - 'timestamp': 1642164737148, - 'datetime': '2022-01-14T12:52:17.148Z', - 'initialMargin': 1.51072, - 'initialMarginPercentage': 0.1, - 'maintenanceMargin': 0.38916147, - 'maintenanceMarginPercentage': 0.025, - 'entryPrice': 18.884, - 'notional': 15.1072, - 'leverage': leverage, - 'unrealizedPnl': 0.0048, - 'contracts': 8, - 'contractSize': 0.1, - 'marginRatio': None, - 'liquidationPrice': 17.47, - 'markPrice': 18.89, - 'margin_mode': 1.52549075, - 'marginType': 'isolated', - 'side': 'buy', - 'percentage': 0.003177292946409658 + "info": {}, + "symbol": "NEAR/USDT:USDT", + "timestamp": 1642164737148, + "datetime": "2022-01-14T12:52:17.148Z", + "initialMargin": 1.51072, + "initialMarginPercentage": 0.1, + "maintenanceMargin": 0.38916147, + "maintenanceMarginPercentage": 0.025, + "entryPrice": 18.884, + "notional": 15.1072, + "leverage": leverage, + "unrealizedPnl": 0.0048, + "contracts": 8, + "contractSize": 0.1, + "marginRatio": None, + "liquidationPrice": 17.47, + "markPrice": 18.89, + "margin_mode": 1.52549075, + "marginType": "isolated", + "side": "buy", + "percentage": 0.003177292946409658, } ] api_mock.fetch_positions = MagicMock(return_value=positions) @@ -5262,14 +6003,14 @@ def test_get_liquidation_price1(mocker, default_conf): EXMS, exchange_has=MagicMock(return_value=True), ) - default_conf['dry_run'] = False - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - default_conf['liquidation_buffer'] = 0.0 + default_conf["dry_run"] = False + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + default_conf["liquidation_buffer"] = 0.0 exchange = get_patched_exchange(mocker, default_conf, api_mock) liq_price = exchange.get_liquidation_price( - pair='NEAR/USDT:USDT', + pair="NEAR/USDT:USDT", open_rate=18.884, is_short=False, amount=0.8, @@ -5279,10 +6020,10 @@ def test_get_liquidation_price1(mocker, default_conf): ) assert liq_price == 17.47 - default_conf['liquidation_buffer'] = 0.05 + default_conf["liquidation_buffer"] = 0.05 exchange = get_patched_exchange(mocker, default_conf, api_mock) liq_price = exchange.get_liquidation_price( - pair='NEAR/USDT:USDT', + pair="NEAR/USDT:USDT", open_rate=18.884, is_short=False, amount=0.8, @@ -5295,7 +6036,7 @@ def test_get_liquidation_price1(mocker, default_conf): api_mock.fetch_positions = MagicMock(return_value=[]) exchange = get_patched_exchange(mocker, default_conf, api_mock) liq_price = exchange.get_liquidation_price( - pair='NEAR/USDT:USDT', + pair="NEAR/USDT:USDT", open_rate=18.884, is_short=False, amount=0.8, @@ -5304,12 +6045,12 @@ def test_get_liquidation_price1(mocker, default_conf): wallet_balance=18.884 * 0.8, ) assert liq_price is None - default_conf['trading_mode'] = 'margin' + default_conf["trading_mode"] = "margin" exchange = get_patched_exchange(mocker, default_conf, api_mock) - with pytest.raises(OperationalException, match=r'.*does not support .* margin'): + with pytest.raises(OperationalException, match=r".*does not support .* margin"): exchange.get_liquidation_price( - pair='NEAR/USDT:USDT', + pair="NEAR/USDT:USDT", open_rate=18.884, is_short=False, amount=0.8, @@ -5319,46 +6060,47 @@ def test_get_liquidation_price1(mocker, default_conf): ) -@pytest.mark.parametrize('liquidation_buffer', [0.0]) +@pytest.mark.parametrize("liquidation_buffer", [0.0]) @pytest.mark.parametrize( - "is_short,trading_mode,exchange_name,margin_mode,leverage,open_rate,amount,expected_liq", [ - (False, 'spot', 'binance', '', 5.0, 10.0, 1.0, None), - (True, 'spot', 'binance', '', 5.0, 10.0, 1.0, None), - (False, 'spot', 'gate', '', 5.0, 10.0, 1.0, None), - (True, 'spot', 'gate', '', 5.0, 10.0, 1.0, None), - (False, 'spot', 'okx', '', 5.0, 10.0, 1.0, None), - (True, 'spot', 'okx', '', 5.0, 10.0, 1.0, None), + "is_short,trading_mode,exchange_name,margin_mode,leverage,open_rate,amount,expected_liq", + [ + (False, "spot", "binance", "", 5.0, 10.0, 1.0, None), + (True, "spot", "binance", "", 5.0, 10.0, 1.0, None), + (False, "spot", "gate", "", 5.0, 10.0, 1.0, None), + (True, "spot", "gate", "", 5.0, 10.0, 1.0, None), + (False, "spot", "okx", "", 5.0, 10.0, 1.0, None), + (True, "spot", "okx", "", 5.0, 10.0, 1.0, None), # Binance, short - (True, 'futures', 'binance', 'isolated', 5.0, 10.0, 1.0, 11.89108910891089), - (True, 'futures', 'binance', 'isolated', 3.0, 10.0, 1.0, 13.211221122079207), - (True, 'futures', 'binance', 'isolated', 5.0, 8.0, 1.0, 9.514851485148514), - (True, 'futures', 'binance', 'isolated', 5.0, 10.0, 0.6, 11.897689768976898), + (True, "futures", "binance", "isolated", 5.0, 10.0, 1.0, 11.89108910891089), + (True, "futures", "binance", "isolated", 3.0, 10.0, 1.0, 13.211221122079207), + (True, "futures", "binance", "isolated", 5.0, 8.0, 1.0, 9.514851485148514), + (True, "futures", "binance", "isolated", 5.0, 10.0, 0.6, 11.897689768976898), # Binance, long - (False, 'futures', 'binance', 'isolated', 5, 10, 1.0, 8.070707070707071), - (False, 'futures', 'binance', 'isolated', 5, 8, 1.0, 6.454545454545454), - (False, 'futures', 'binance', 'isolated', 3, 10, 1.0, 6.723905723905723), - (False, 'futures', 'binance', 'isolated', 5, 10, 0.6, 8.063973063973064), + (False, "futures", "binance", "isolated", 5, 10, 1.0, 8.070707070707071), + (False, "futures", "binance", "isolated", 5, 8, 1.0, 6.454545454545454), + (False, "futures", "binance", "isolated", 3, 10, 1.0, 6.723905723905723), + (False, "futures", "binance", "isolated", 5, 10, 0.6, 8.063973063973064), # Gate/okx, short - (True, 'futures', 'gate', 'isolated', 5, 10, 1.0, 11.87413417771621), - (True, 'futures', 'gate', 'isolated', 5, 10, 2.0, 11.87413417771621), - (True, 'futures', 'gate', 'isolated', 3, 10, 1.0, 13.193482419684678), - (True, 'futures', 'gate', 'isolated', 5, 8, 1.0, 9.499307342172967), - (True, 'futures', 'okx', 'isolated', 3, 10, 1.0, 13.193482419684678), + (True, "futures", "gate", "isolated", 5, 10, 1.0, 11.87413417771621), + (True, "futures", "gate", "isolated", 5, 10, 2.0, 11.87413417771621), + (True, "futures", "gate", "isolated", 3, 10, 1.0, 13.193482419684678), + (True, "futures", "gate", "isolated", 5, 8, 1.0, 9.499307342172967), + (True, "futures", "okx", "isolated", 3, 10, 1.0, 13.193482419684678), # Gate/okx, long - (False, 'futures', 'gate', 'isolated', 5.0, 10.0, 1.0, 8.085708510208207), - (False, 'futures', 'gate', 'isolated', 3.0, 10.0, 1.0, 6.738090425173506), - (False, 'futures', 'okx', 'isolated', 3.0, 10.0, 1.0, 6.738090425173506), + (False, "futures", "gate", "isolated", 5.0, 10.0, 1.0, 8.085708510208207), + (False, "futures", "gate", "isolated", 3.0, 10.0, 1.0, 6.738090425173506), + (False, "futures", "okx", "isolated", 3.0, 10.0, 1.0, 6.738090425173506), # bybit, long - (False, 'futures', 'bybit', 'isolated', 1.0, 10.0, 1.0, 0.1), - (False, 'futures', 'bybit', 'isolated', 3.0, 10.0, 1.0, 6.7666666), - (False, 'futures', 'bybit', 'isolated', 5.0, 10.0, 1.0, 8.1), - (False, 'futures', 'bybit', 'isolated', 10.0, 10.0, 1.0, 9.1), + (False, "futures", "bybit", "isolated", 1.0, 10.0, 1.0, 0.1), + (False, "futures", "bybit", "isolated", 3.0, 10.0, 1.0, 6.7666666), + (False, "futures", "bybit", "isolated", 5.0, 10.0, 1.0, 8.1), + (False, "futures", "bybit", "isolated", 10.0, 10.0, 1.0, 9.1), # bybit, short - (True, 'futures', 'bybit', 'isolated', 1.0, 10.0, 1.0, 19.9), - (True, 'futures', 'bybit', 'isolated', 3.0, 10.0, 1.0, 13.233333), - (True, 'futures', 'bybit', 'isolated', 5.0, 10.0, 1.0, 11.9), - (True, 'futures', 'bybit', 'isolated', 10.0, 10.0, 1.0, 10.9), - ] + (True, "futures", "bybit", "isolated", 1.0, 10.0, 1.0, 19.9), + (True, "futures", "bybit", "isolated", 3.0, 10.0, 1.0, 13.233333), + (True, "futures", "bybit", "isolated", 5.0, 10.0, 1.0, 11.9), + (True, "futures", "bybit", "isolated", 10.0, 10.0, 1.0, 10.9), + ], ) def test_get_liquidation_price( mocker, @@ -5425,11 +6167,11 @@ def test_get_liquidation_price( leverage = 5, open_rate = 8, amount = 1.0 (8 - (1.6 / 1.0)) / (1 + (0.01 + 0.0006)) = 6.332871561448645 """ - default_conf_usdt['liquidation_buffer'] = liquidation_buffer - default_conf_usdt['trading_mode'] = trading_mode - default_conf_usdt['exchange']['name'] = exchange_name - default_conf_usdt['margin_mode'] = margin_mode - mocker.patch('freqtrade.exchange.gate.Gate.validate_ordertypes') + default_conf_usdt["liquidation_buffer"] = liquidation_buffer + default_conf_usdt["trading_mode"] = trading_mode + default_conf_usdt["exchange"]["name"] = exchange_name + default_conf_usdt["margin_mode"] = margin_mode + mocker.patch("freqtrade.exchange.gate.Gate.validate_ordertypes") exchange = get_patched_exchange(mocker, default_conf_usdt, id=exchange_name) exchange.get_maintenance_ratio_and_amt = MagicMock(return_value=(0.01, 0.01)) @@ -5438,7 +6180,7 @@ def test_get_liquidation_price( # "dry_run": False, # }) liq = exchange.get_liquidation_price( - pair='ETH/USDT:USDT', + pair="ETH/USDT:USDT", open_rate=open_rate, amount=amount, stake_amount=amount * open_rate / leverage, @@ -5454,47 +6196,45 @@ def test_get_liquidation_price( assert pytest.approx(expected_liq) == liq -@pytest.mark.parametrize('contract_size,order_amount', [ - (10, 10), - (0.01, 10000), -]) +@pytest.mark.parametrize( + "contract_size,order_amount", + [ + (10, 10), + (0.01, 10000), + ], +) def test_stoploss_contract_size(mocker, default_conf, contract_size, order_amount): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - }, - 'amount': order_amount, - 'cost': order_amount, - 'filled': order_amount, - 'remaining': order_amount, - 'symbol': 'ETH/BTC', - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + api_mock.create_order = MagicMock( + return_value={ + "id": order_id, + "info": {"foo": "bar"}, + "amount": order_amount, + "cost": order_amount, + "filled": order_amount, + "remaining": order_amount, + "symbol": "ETH/BTC", + } + ) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) exchange = get_patched_exchange(mocker, default_conf, api_mock) exchange.get_contract_size = MagicMock(return_value=contract_size) api_mock.create_order.reset_mock() order = exchange.create_stoploss( - pair='ETH/BTC', - amount=100, - stop_price=220, - order_types={}, - side='buy', - leverage=1.0 + pair="ETH/BTC", amount=100, stop_price=220, order_types={}, side="buy", leverage=1.0 ) - assert api_mock.create_order.call_args_list[0][1]['amount'] == order_amount - assert order['amount'] == 100 - assert order['cost'] == order_amount - assert order['filled'] == 100 - assert order['remaining'] == 100 + assert api_mock.create_order.call_args_list[0][1]["amount"] == order_amount + assert order["amount"] == 100 + assert order["cost"] == order_amount + assert order["filled"] == 100 + assert order["remaining"] == 100 def test_price_to_precision_with_default_conf(default_conf, mocker): diff --git a/tests/exchange/test_exchange_utils.py b/tests/exchange/test_exchange_utils.py index dd79bf083..7fe8cb707 100644 --- a/tests/exchange/test_exchange_utils.py +++ b/tests/exchange/test_exchange_utils.py @@ -2,105 +2,127 @@ from datetime import datetime, timedelta, timezone import pytest -from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGITS, TICK_SIZE, - TRUNCATE) +from ccxt import ( + DECIMAL_PLACES, + ROUND, + ROUND_DOWN, + ROUND_UP, + SIGNIFICANT_DIGITS, + TICK_SIZE, + TRUNCATE, +) from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException -from freqtrade.exchange import (amount_to_contract_precision, amount_to_precision, - date_minus_candles, price_to_precision, timeframe_to_minutes, - timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date, - timeframe_to_resample_freq, timeframe_to_seconds) +from freqtrade.exchange import ( + amount_to_contract_precision, + amount_to_precision, + date_minus_candles, + price_to_precision, + timeframe_to_minutes, + timeframe_to_msecs, + timeframe_to_next_date, + timeframe_to_prev_date, + timeframe_to_resample_freq, + timeframe_to_seconds, +) from freqtrade.exchange.check_exchange import check_exchange from tests.conftest import log_has_re def test_check_exchange(default_conf, caplog) -> None: # Test an officially supported by Freqtrade team exchange - default_conf['runmode'] = RunMode.DRY_RUN - default_conf.get('exchange').update({'name': 'BINANCE'}) - assert check_exchange(default_conf) - assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.", - caplog) - caplog.clear() - - # Test an officially supported by Freqtrade team exchange - default_conf.get('exchange').update({'name': 'binance'}) + default_conf["runmode"] = RunMode.DRY_RUN + default_conf.get("exchange").update({"name": "BINANCE"}) assert check_exchange(default_conf) assert log_has_re( - r"Exchange \"binance\" is officially supported by the Freqtrade development team\.", - caplog) + r"Exchange .* is officially supported by the Freqtrade development team\.", caplog + ) caplog.clear() # Test an officially supported by Freqtrade team exchange - default_conf.get('exchange').update({'name': 'binanceus'}) + default_conf.get("exchange").update({"name": "binance"}) + assert check_exchange(default_conf) + assert log_has_re( + r"Exchange \"binance\" is officially supported by the Freqtrade development team\.", caplog + ) + caplog.clear() + + # Test an officially supported by Freqtrade team exchange + default_conf.get("exchange").update({"name": "binanceus"}) assert check_exchange(default_conf) assert log_has_re( r"Exchange \"binanceus\" is officially supported by the Freqtrade development team\.", - caplog) + caplog, + ) caplog.clear() # Test an officially supported by Freqtrade team exchange - with remapping - default_conf.get('exchange').update({'name': 'okx'}) + default_conf.get("exchange").update({"name": "okx"}) assert check_exchange(default_conf) assert log_has_re( - r"Exchange \"okx\" is officially supported by the Freqtrade development team\.", - caplog) + r"Exchange \"okx\" is officially supported by the Freqtrade development team\.", caplog + ) caplog.clear() # Test an available exchange, supported by ccxt - default_conf.get('exchange').update({'name': 'huobijp'}) + default_conf.get("exchange").update({"name": "huobijp"}) assert check_exchange(default_conf) - assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, " - r"but not officially supported " - r"by the Freqtrade development team\. .*", caplog) + assert log_has_re( + r"Exchange .* is known to the ccxt library, available for the bot, " + r"but not officially supported " + r"by the Freqtrade development team\. .*", + caplog, + ) caplog.clear() # Test a 'bad' exchange, which known to have serious problems - default_conf.get('exchange').update({'name': 'bitmex'}) - with pytest.raises(OperationalException, - match=r"Exchange .* will not work with Freqtrade\..*"): + default_conf.get("exchange").update({"name": "bitmex"}) + with pytest.raises(OperationalException, match=r"Exchange .* will not work with Freqtrade\..*"): check_exchange(default_conf) caplog.clear() # Test a 'bad' exchange with check_for_bad=False - default_conf.get('exchange').update({'name': 'bitmex'}) + default_conf.get("exchange").update({"name": "bitmex"}) assert check_exchange(default_conf, False) - assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, " - r"but not officially supported " - r"by the Freqtrade development team\. .*", caplog) + assert log_has_re( + r"Exchange .* is known to the ccxt library, available for the bot, " + r"but not officially supported " + r"by the Freqtrade development team\. .*", + caplog, + ) caplog.clear() # Test an invalid exchange - default_conf.get('exchange').update({'name': 'unknown_exchange'}) + default_conf.get("exchange").update({"name": "unknown_exchange"}) with pytest.raises( OperationalException, match=r'Exchange "unknown_exchange" is not known to the ccxt library ' - r'and therefore not available for the bot.*' + r"and therefore not available for the bot.*", ): check_exchange(default_conf) # Test no exchange... - default_conf.get('exchange').update({'name': ''}) - default_conf['runmode'] = RunMode.PLOT + default_conf.get("exchange").update({"name": ""}) + default_conf["runmode"] = RunMode.PLOT assert check_exchange(default_conf) # Test no exchange... - default_conf.get('exchange').update({'name': ''}) - default_conf['runmode'] = RunMode.UTIL_EXCHANGE - with pytest.raises(OperationalException, - match=r'This command requires a configured exchange.*'): + default_conf.get("exchange").update({"name": ""}) + default_conf["runmode"] = RunMode.UTIL_EXCHANGE + with pytest.raises( + OperationalException, match=r"This command requires a configured exchange.*" + ): check_exchange(default_conf) def test_date_minus_candles(): - date = datetime(2019, 8, 12, 13, 25, 0, tzinfo=timezone.utc) assert date_minus_candles("5m", 3, date) == date - timedelta(minutes=15) assert date_minus_candles("5m", 5, date) == date - timedelta(minutes=25) assert date_minus_candles("1m", 6, date) == date - timedelta(minutes=6) assert date_minus_candles("1h", 3, date) == date - timedelta(hours=3, minutes=25) - assert date_minus_candles("1h", 3) == timeframe_to_prev_date('1h') - timedelta(hours=3) + assert date_minus_candles("1h", 3) == timeframe_to_prev_date("1h") - timedelta(hours=3) def test_timeframe_to_minutes(): @@ -124,17 +146,20 @@ def test_timeframe_to_msecs(): assert timeframe_to_msecs("1d") == 86400000 -@pytest.mark.parametrize("timeframe,expected", [ - ("1s", '1s'), - ("15s", '15s'), - ("5m", '300s'), - ("10m", '600s'), - ("1h", '3600s'), - ("1d", '86400s'), - ("1w", '1W-MON'), - ("1M", '1MS'), - ("1y", '1YS'), -]) +@pytest.mark.parametrize( + "timeframe,expected", + [ + ("1s", "1s"), + ("15s", "15s"), + ("5m", "300s"), + ("10m", "600s"), + ("1h", "3600s"), + ("1d", "86400s"), + ("1w", "1W-MON"), + ("1M", "1MS"), + ("1y", "1YS"), + ], +) def test_timeframe_to_resample_freq(timeframe, expected): assert timeframe_to_resample_freq(timeframe) == expected @@ -164,9 +189,9 @@ def test_timeframe_to_prev_date(): assert timeframe_to_prev_date("5m") < date # Does not round time = datetime(2019, 8, 12, 13, 20, 0, tzinfo=timezone.utc) - assert timeframe_to_prev_date('5m', time) == time + assert timeframe_to_prev_date("5m", time) == time time = datetime(2019, 8, 12, 13, 0, 0, tzinfo=timezone.utc) - assert timeframe_to_prev_date('1h', time) == time + assert timeframe_to_prev_date("1h", time) == time def test_timeframe_to_next_date(): @@ -197,35 +222,43 @@ def test_timeframe_to_next_date(): assert timeframe_to_next_date("5m", date) == date + timedelta(minutes=5) -@pytest.mark.parametrize("amount,precision_mode,precision,expected", [ - (2.34559, DECIMAL_PLACES, 4, 2.3455), - (2.34559, DECIMAL_PLACES, 5, 2.34559), - (2.34559, DECIMAL_PLACES, 3, 2.345), - (2.9999, DECIMAL_PLACES, 3, 2.999), - (2.9909, DECIMAL_PLACES, 3, 2.990), - (2.9909, DECIMAL_PLACES, 0, 2), - (29991.5555, DECIMAL_PLACES, 0, 29991), - (29991.5555, DECIMAL_PLACES, -1, 29990), - (29991.5555, DECIMAL_PLACES, -2, 29900), - # Tests for - (2.34559, SIGNIFICANT_DIGITS, 4, 2.345), - (2.34559, SIGNIFICANT_DIGITS, 5, 2.3455), - (2.34559, SIGNIFICANT_DIGITS, 3, 2.34), - (2.9999, SIGNIFICANT_DIGITS, 3, 2.99), - (2.9909, SIGNIFICANT_DIGITS, 3, 2.99), - (0.0000077723, SIGNIFICANT_DIGITS, 5, 0.0000077723), - (0.0000077723, SIGNIFICANT_DIGITS, 3, 0.00000777), - (0.0000077723, SIGNIFICANT_DIGITS, 1, 0.000007), - # Tests for Tick-size - (2.34559, TICK_SIZE, 0.0001, 2.3455), - (2.34559, TICK_SIZE, 0.00001, 2.34559), - (2.34559, TICK_SIZE, 0.001, 2.345), - (2.9999, TICK_SIZE, 0.001, 2.999), - (2.9909, TICK_SIZE, 0.001, 2.990), - (2.9909, TICK_SIZE, 0.005, 2.99), - (2.9999, TICK_SIZE, 0.005, 2.995), -]) -def test_amount_to_precision(amount, precision_mode, precision, expected,): +@pytest.mark.parametrize( + "amount,precision_mode,precision,expected", + [ + (2.34559, DECIMAL_PLACES, 4, 2.3455), + (2.34559, DECIMAL_PLACES, 5, 2.34559), + (2.34559, DECIMAL_PLACES, 3, 2.345), + (2.9999, DECIMAL_PLACES, 3, 2.999), + (2.9909, DECIMAL_PLACES, 3, 2.990), + (2.9909, DECIMAL_PLACES, 0, 2), + (29991.5555, DECIMAL_PLACES, 0, 29991), + (29991.5555, DECIMAL_PLACES, -1, 29990), + (29991.5555, DECIMAL_PLACES, -2, 29900), + # Tests for + (2.34559, SIGNIFICANT_DIGITS, 4, 2.345), + (2.34559, SIGNIFICANT_DIGITS, 5, 2.3455), + (2.34559, SIGNIFICANT_DIGITS, 3, 2.34), + (2.9999, SIGNIFICANT_DIGITS, 3, 2.99), + (2.9909, SIGNIFICANT_DIGITS, 3, 2.99), + (0.0000077723, SIGNIFICANT_DIGITS, 5, 0.0000077723), + (0.0000077723, SIGNIFICANT_DIGITS, 3, 0.00000777), + (0.0000077723, SIGNIFICANT_DIGITS, 1, 0.000007), + # Tests for Tick-size + (2.34559, TICK_SIZE, 0.0001, 2.3455), + (2.34559, TICK_SIZE, 0.00001, 2.34559), + (2.34559, TICK_SIZE, 0.001, 2.345), + (2.9999, TICK_SIZE, 0.001, 2.999), + (2.9909, TICK_SIZE, 0.001, 2.990), + (2.9909, TICK_SIZE, 0.005, 2.99), + (2.9999, TICK_SIZE, 0.005, 2.995), + ], +) +def test_amount_to_precision( + amount, + precision_mode, + precision, + expected, +): """ Test rounds down """ @@ -237,107 +270,115 @@ def test_amount_to_precision(amount, precision_mode, precision, expected,): assert amount_to_precision(amount, precision, precision_mode) == expected -@pytest.mark.parametrize("price,precision_mode,precision,expected,rounding_mode", [ - # Tests for DECIMAL_PLACES, ROUND_UP - (2.34559, DECIMAL_PLACES, 4, 2.3456, ROUND_UP), - (2.34559, DECIMAL_PLACES, 5, 2.34559, ROUND_UP), - (2.34559, DECIMAL_PLACES, 3, 2.346, ROUND_UP), - (2.9999, DECIMAL_PLACES, 3, 3.000, ROUND_UP), - (2.9909, DECIMAL_PLACES, 3, 2.991, ROUND_UP), - (2.9901, DECIMAL_PLACES, 3, 2.991, ROUND_UP), - (2.34559, DECIMAL_PLACES, 5, 2.34559, ROUND_DOWN), - (2.34559, DECIMAL_PLACES, 4, 2.3455, ROUND_DOWN), - (2.9901, DECIMAL_PLACES, 3, 2.990, ROUND_DOWN), - (0.00299, DECIMAL_PLACES, 3, 0.002, ROUND_DOWN), - # Tests for DECIMAL_PLACES, ROUND - (2.345600000000001, DECIMAL_PLACES, 4, 2.3456, ROUND), - (2.345551, DECIMAL_PLACES, 4, 2.3456, ROUND), - (2.49, DECIMAL_PLACES, 0, 2., ROUND), - (2.51, DECIMAL_PLACES, 0, 3., ROUND), - (5.1, DECIMAL_PLACES, -1, 10., ROUND), - (4.9, DECIMAL_PLACES, -1, 0., ROUND), - (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000007, ROUND), - (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000072, ROUND), - (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000078, ROUND), - # Tests for TICK_SIZE, ROUND_UP - (2.34559, TICK_SIZE, 0.0001, 2.3456, ROUND_UP), - (2.34559, TICK_SIZE, 0.00001, 2.34559, ROUND_UP), - (2.34559, TICK_SIZE, 0.001, 2.346, ROUND_UP), - (2.9999, TICK_SIZE, 0.001, 3.000, ROUND_UP), - (2.9909, TICK_SIZE, 0.001, 2.991, ROUND_UP), - (2.9909, TICK_SIZE, 0.001, 2.990, ROUND_DOWN), - (2.9909, TICK_SIZE, 0.005, 2.995, ROUND_UP), - (2.9973, TICK_SIZE, 0.005, 3.0, ROUND_UP), - (2.9977, TICK_SIZE, 0.005, 3.0, ROUND_UP), - (234.43, TICK_SIZE, 0.5, 234.5, ROUND_UP), - (234.43, TICK_SIZE, 0.5, 234.0, ROUND_DOWN), - (234.53, TICK_SIZE, 0.5, 235.0, ROUND_UP), - (234.53, TICK_SIZE, 0.5, 234.5, ROUND_DOWN), - (0.891534, TICK_SIZE, 0.0001, 0.8916, ROUND_UP), - (64968.89, TICK_SIZE, 0.01, 64968.89, ROUND_UP), - (0.000000003483, TICK_SIZE, 1e-12, 0.000000003483, ROUND_UP), - # Tests for TICK_SIZE, ROUND - (2.49, TICK_SIZE, 1., 2., ROUND), - (2.51, TICK_SIZE, 1., 3., ROUND), - (2.000000051, TICK_SIZE, 0.0000001, 2.0000001, ROUND), - (2.000000049, TICK_SIZE, 0.0000001, 2., ROUND), - (2.9909, TICK_SIZE, 0.005, 2.990, ROUND), - (2.9973, TICK_SIZE, 0.005, 2.995, ROUND), - (2.9977, TICK_SIZE, 0.005, 3.0, ROUND), - (234.24, TICK_SIZE, 0.5, 234., ROUND), - (234.26, TICK_SIZE, 0.5, 234.5, ROUND), - # Tests for TRUNCATTE - (2.34559, DECIMAL_PLACES, 4, 2.3455, TRUNCATE), - (2.34559, DECIMAL_PLACES, 5, 2.34559, TRUNCATE), - (2.34559, DECIMAL_PLACES, 3, 2.345, TRUNCATE), - (2.9999, DECIMAL_PLACES, 3, 2.999, TRUNCATE), - (2.9909, DECIMAL_PLACES, 3, 2.990, TRUNCATE), - (2.9909, TICK_SIZE, 0.001, 2.990, TRUNCATE), - (2.9909, TICK_SIZE, 0.01, 2.99, TRUNCATE), - (2.9909, TICK_SIZE, 0.1, 2.9, TRUNCATE), - # Tests for Significant - (2.34559, SIGNIFICANT_DIGITS, 4, 2.345, TRUNCATE), - (2.34559, SIGNIFICANT_DIGITS, 5, 2.3455, TRUNCATE), - (2.34559, SIGNIFICANT_DIGITS, 3, 2.34, TRUNCATE), - (2.9999, SIGNIFICANT_DIGITS, 3, 2.99, TRUNCATE), - (2.9909, SIGNIFICANT_DIGITS, 2, 2.9, TRUNCATE), - (0.00000777, SIGNIFICANT_DIGITS, 2, 0.0000077, TRUNCATE), - (0.00000729, SIGNIFICANT_DIGITS, 2, 0.0000072, TRUNCATE), - # ROUND - (722.2, SIGNIFICANT_DIGITS, 1, 700.0, ROUND), - (790.2, SIGNIFICANT_DIGITS, 1, 800.0, ROUND), - (722.2, SIGNIFICANT_DIGITS, 2, 720.0, ROUND), - (722.2, SIGNIFICANT_DIGITS, 1, 800.0, ROUND_UP), - (722.2, SIGNIFICANT_DIGITS, 2, 730.0, ROUND_UP), - (777.7, SIGNIFICANT_DIGITS, 2, 780.0, ROUND_UP), - (777.7, SIGNIFICANT_DIGITS, 3, 778.0, ROUND_UP), - (722.2, SIGNIFICANT_DIGITS, 1, 700.0, ROUND_DOWN), - (722.2, SIGNIFICANT_DIGITS, 2, 720.0, ROUND_DOWN), - (777.7, SIGNIFICANT_DIGITS, 2, 770.0, ROUND_DOWN), - (777.7, SIGNIFICANT_DIGITS, 3, 777.0, ROUND_DOWN), - - (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000008, ROUND_UP), - (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000073, ROUND_UP), - (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000078, ROUND_UP), - (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000007, ROUND_DOWN), - (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000072, ROUND_DOWN), - (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000077, ROUND_DOWN), -]) +@pytest.mark.parametrize( + "price,precision_mode,precision,expected,rounding_mode", + [ + # Tests for DECIMAL_PLACES, ROUND_UP + (2.34559, DECIMAL_PLACES, 4, 2.3456, ROUND_UP), + (2.34559, DECIMAL_PLACES, 5, 2.34559, ROUND_UP), + (2.34559, DECIMAL_PLACES, 3, 2.346, ROUND_UP), + (2.9999, DECIMAL_PLACES, 3, 3.000, ROUND_UP), + (2.9909, DECIMAL_PLACES, 3, 2.991, ROUND_UP), + (2.9901, DECIMAL_PLACES, 3, 2.991, ROUND_UP), + (2.34559, DECIMAL_PLACES, 5, 2.34559, ROUND_DOWN), + (2.34559, DECIMAL_PLACES, 4, 2.3455, ROUND_DOWN), + (2.9901, DECIMAL_PLACES, 3, 2.990, ROUND_DOWN), + (0.00299, DECIMAL_PLACES, 3, 0.002, ROUND_DOWN), + # Tests for DECIMAL_PLACES, ROUND + (2.345600000000001, DECIMAL_PLACES, 4, 2.3456, ROUND), + (2.345551, DECIMAL_PLACES, 4, 2.3456, ROUND), + (2.49, DECIMAL_PLACES, 0, 2.0, ROUND), + (2.51, DECIMAL_PLACES, 0, 3.0, ROUND), + (5.1, DECIMAL_PLACES, -1, 10.0, ROUND), + (4.9, DECIMAL_PLACES, -1, 0.0, ROUND), + (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000007, ROUND), + (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000072, ROUND), + (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000078, ROUND), + # Tests for TICK_SIZE, ROUND_UP + (2.34559, TICK_SIZE, 0.0001, 2.3456, ROUND_UP), + (2.34559, TICK_SIZE, 0.00001, 2.34559, ROUND_UP), + (2.34559, TICK_SIZE, 0.001, 2.346, ROUND_UP), + (2.9999, TICK_SIZE, 0.001, 3.000, ROUND_UP), + (2.9909, TICK_SIZE, 0.001, 2.991, ROUND_UP), + (2.9909, TICK_SIZE, 0.001, 2.990, ROUND_DOWN), + (2.9909, TICK_SIZE, 0.005, 2.995, ROUND_UP), + (2.9973, TICK_SIZE, 0.005, 3.0, ROUND_UP), + (2.9977, TICK_SIZE, 0.005, 3.0, ROUND_UP), + (234.43, TICK_SIZE, 0.5, 234.5, ROUND_UP), + (234.43, TICK_SIZE, 0.5, 234.0, ROUND_DOWN), + (234.53, TICK_SIZE, 0.5, 235.0, ROUND_UP), + (234.53, TICK_SIZE, 0.5, 234.5, ROUND_DOWN), + (0.891534, TICK_SIZE, 0.0001, 0.8916, ROUND_UP), + (64968.89, TICK_SIZE, 0.01, 64968.89, ROUND_UP), + (0.000000003483, TICK_SIZE, 1e-12, 0.000000003483, ROUND_UP), + # Tests for TICK_SIZE, ROUND + (2.49, TICK_SIZE, 1.0, 2.0, ROUND), + (2.51, TICK_SIZE, 1.0, 3.0, ROUND), + (2.000000051, TICK_SIZE, 0.0000001, 2.0000001, ROUND), + (2.000000049, TICK_SIZE, 0.0000001, 2.0, ROUND), + (2.9909, TICK_SIZE, 0.005, 2.990, ROUND), + (2.9973, TICK_SIZE, 0.005, 2.995, ROUND), + (2.9977, TICK_SIZE, 0.005, 3.0, ROUND), + (234.24, TICK_SIZE, 0.5, 234.0, ROUND), + (234.26, TICK_SIZE, 0.5, 234.5, ROUND), + # Tests for TRUNCATTE + (2.34559, DECIMAL_PLACES, 4, 2.3455, TRUNCATE), + (2.34559, DECIMAL_PLACES, 5, 2.34559, TRUNCATE), + (2.34559, DECIMAL_PLACES, 3, 2.345, TRUNCATE), + (2.9999, DECIMAL_PLACES, 3, 2.999, TRUNCATE), + (2.9909, DECIMAL_PLACES, 3, 2.990, TRUNCATE), + (2.9909, TICK_SIZE, 0.001, 2.990, TRUNCATE), + (2.9909, TICK_SIZE, 0.01, 2.99, TRUNCATE), + (2.9909, TICK_SIZE, 0.1, 2.9, TRUNCATE), + # Tests for Significant + (2.34559, SIGNIFICANT_DIGITS, 4, 2.345, TRUNCATE), + (2.34559, SIGNIFICANT_DIGITS, 5, 2.3455, TRUNCATE), + (2.34559, SIGNIFICANT_DIGITS, 3, 2.34, TRUNCATE), + (2.9999, SIGNIFICANT_DIGITS, 3, 2.99, TRUNCATE), + (2.9909, SIGNIFICANT_DIGITS, 2, 2.9, TRUNCATE), + (0.00000777, SIGNIFICANT_DIGITS, 2, 0.0000077, TRUNCATE), + (0.00000729, SIGNIFICANT_DIGITS, 2, 0.0000072, TRUNCATE), + # ROUND + (722.2, SIGNIFICANT_DIGITS, 1, 700.0, ROUND), + (790.2, SIGNIFICANT_DIGITS, 1, 800.0, ROUND), + (722.2, SIGNIFICANT_DIGITS, 2, 720.0, ROUND), + (722.2, SIGNIFICANT_DIGITS, 1, 800.0, ROUND_UP), + (722.2, SIGNIFICANT_DIGITS, 2, 730.0, ROUND_UP), + (777.7, SIGNIFICANT_DIGITS, 2, 780.0, ROUND_UP), + (777.7, SIGNIFICANT_DIGITS, 3, 778.0, ROUND_UP), + (722.2, SIGNIFICANT_DIGITS, 1, 700.0, ROUND_DOWN), + (722.2, SIGNIFICANT_DIGITS, 2, 720.0, ROUND_DOWN), + (777.7, SIGNIFICANT_DIGITS, 2, 770.0, ROUND_DOWN), + (777.7, SIGNIFICANT_DIGITS, 3, 777.0, ROUND_DOWN), + (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000008, ROUND_UP), + (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000073, ROUND_UP), + (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000078, ROUND_UP), + (0.000007222, SIGNIFICANT_DIGITS, 1, 0.000007, ROUND_DOWN), + (0.000007222, SIGNIFICANT_DIGITS, 2, 0.0000072, ROUND_DOWN), + (0.000007777, SIGNIFICANT_DIGITS, 2, 0.0000077, ROUND_DOWN), + ], +) def test_price_to_precision(price, precision_mode, precision, expected, rounding_mode): - assert price_to_precision( - price, precision, precision_mode, rounding_mode=rounding_mode) == expected + assert ( + price_to_precision(price, precision, precision_mode, rounding_mode=rounding_mode) + == expected + ) -@pytest.mark.parametrize('amount,precision,precision_mode,contract_size,expected', [ - (1.17, 1.0, 4, 0.01, 1.17), # Tick size - (1.17, 1.0, 2, 0.01, 1.17), # - (1.16, 1.0, 4, 0.01, 1.16), # - (1.16, 1.0, 2, 0.01, 1.16), # - (1.13, 1.0, 2, 0.01, 1.13), # - (10.988, 1.0, 2, 10, 10), - (10.988, 1.0, 4, 10, 10), -]) -def test_amount_to_contract_precision_standalone(amount, precision, precision_mode, contract_size, - expected): +@pytest.mark.parametrize( + "amount,precision,precision_mode,contract_size,expected", + [ + (1.17, 1.0, 4, 0.01, 1.17), # Tick size + (1.17, 1.0, 2, 0.01, 1.17), # + (1.16, 1.0, 4, 0.01, 1.16), # + (1.16, 1.0, 2, 0.01, 1.16), # + (1.13, 1.0, 2, 0.01, 1.13), # + (10.988, 1.0, 2, 10, 10), + (10.988, 1.0, 4, 10, 10), + ], +) +def test_amount_to_contract_precision_standalone( + amount, precision, precision_mode, contract_size, expected +): res = amount_to_contract_precision(amount, precision, precision_mode, contract_size) assert pytest.approx(res) == expected diff --git a/tests/exchange/test_gate.py b/tests/exchange/test_gate.py index 3cb5a9a3e..b4e021a5d 100644 --- a/tests/exchange/test_gate.py +++ b/tests/exchange/test_gate.py @@ -9,103 +9,113 @@ from tests.conftest import EXMS, get_patched_exchange @pytest.mark.usefixtures("init_persistence") def test_fetch_stoploss_order_gate(default_conf, mocker): - exchange = get_patched_exchange(mocker, default_conf, id='gate') + exchange = get_patched_exchange(mocker, default_conf, id="gate") fetch_order_mock = MagicMock() exchange.fetch_order = fetch_order_mock - exchange.fetch_stoploss_order('1234', 'ETH/BTC') + exchange.fetch_stoploss_order("1234", "ETH/BTC") assert fetch_order_mock.call_count == 1 - assert fetch_order_mock.call_args_list[0][1]['order_id'] == '1234' - assert fetch_order_mock.call_args_list[0][1]['pair'] == 'ETH/BTC' - assert fetch_order_mock.call_args_list[0][1]['params'] == {'stop': True} + assert fetch_order_mock.call_args_list[0][1]["order_id"] == "1234" + assert fetch_order_mock.call_args_list[0][1]["pair"] == "ETH/BTC" + assert fetch_order_mock.call_args_list[0][1]["params"] == {"stop": True} - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" - exchange = get_patched_exchange(mocker, default_conf, id='gate') + exchange = get_patched_exchange(mocker, default_conf, id="gate") - exchange.fetch_order = MagicMock(return_value={ - 'status': 'closed', - 'id': '1234', - 'stopPrice': 5.62, - 'info': { - 'trade_id': '222555' + exchange.fetch_order = MagicMock( + return_value={ + "status": "closed", + "id": "1234", + "stopPrice": 5.62, + "info": {"trade_id": "222555"}, } - }) + ) - exchange.fetch_stoploss_order('1234', 'ETH/BTC') + exchange.fetch_stoploss_order("1234", "ETH/BTC") assert exchange.fetch_order.call_count == 2 - assert exchange.fetch_order.call_args_list[0][1]['order_id'] == '1234' - assert exchange.fetch_order.call_args_list[1][1]['order_id'] == '222555' + assert exchange.fetch_order.call_args_list[0][1]["order_id"] == "1234" + assert exchange.fetch_order.call_args_list[1][1]["order_id"] == "222555" def test_cancel_stoploss_order_gate(default_conf, mocker): - exchange = get_patched_exchange(mocker, default_conf, id='gate') + exchange = get_patched_exchange(mocker, default_conf, id="gate") cancel_order_mock = MagicMock() exchange.cancel_order = cancel_order_mock - exchange.cancel_stoploss_order('1234', 'ETH/BTC') + exchange.cancel_stoploss_order("1234", "ETH/BTC") assert cancel_order_mock.call_count == 1 - assert cancel_order_mock.call_args_list[0][1]['order_id'] == '1234' - assert cancel_order_mock.call_args_list[0][1]['pair'] == 'ETH/BTC' - assert cancel_order_mock.call_args_list[0][1]['params'] == {'stop': True} + assert cancel_order_mock.call_args_list[0][1]["order_id"] == "1234" + assert cancel_order_mock.call_args_list[0][1]["pair"] == "ETH/BTC" + assert cancel_order_mock.call_args_list[0][1]["params"] == {"stop": True} -@pytest.mark.parametrize('sl1,sl2,sl3,side', [ - (1501, 1499, 1501, "sell"), - (1499, 1501, 1499, "buy") -]) +@pytest.mark.parametrize( + "sl1,sl2,sl3,side", [(1501, 1499, 1501, "sell"), (1499, 1501, 1499, "buy")] +) def test_stoploss_adjust_gate(mocker, default_conf, sl1, sl2, sl3, side): - exchange = get_patched_exchange(mocker, default_conf, id='gate') + exchange = get_patched_exchange(mocker, default_conf, id="gate") order = { - 'price': 1500, - 'stopPrice': 1500, + "price": 1500, + "stopPrice": 1500, } assert exchange.stoploss_adjust(sl1, order, side) assert not exchange.stoploss_adjust(sl2, order, side) -@pytest.mark.parametrize('takerormaker,rate,cost', [ - ('taker', 0.0005, 0.0001554325), - ('maker', 0.0, 0.0), -]) +@pytest.mark.parametrize( + "takerormaker,rate,cost", + [ + ("taker", 0.0005, 0.0001554325), + ("maker", 0.0, 0.0), + ], +) def test_fetch_my_trades_gate(mocker, default_conf, takerormaker, rate, cost): - mocker.patch(f'{EXMS}.exchange_has', return_value=True) - tick = {'ETH/USDT:USDT': { - 'info': {'user_id': '', - 'taker_fee': '0.0018', - 'maker_fee': '0.0018', - 'gt_discount': False, - 'gt_taker_fee': '0', - 'gt_maker_fee': '0', - 'loan_fee': '0.18', - 'point_type': '1', - 'futures_taker_fee': '0.0005', - 'futures_maker_fee': '0'}, - 'symbol': 'ETH/USDT:USDT', - 'maker': 0.0, - 'taker': 0.0005} - } - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + mocker.patch(f"{EXMS}.exchange_has", return_value=True) + tick = { + "ETH/USDT:USDT": { + "info": { + "user_id": "", + "taker_fee": "0.0018", + "maker_fee": "0.0018", + "gt_discount": False, + "gt_taker_fee": "0", + "gt_maker_fee": "0", + "loan_fee": "0.18", + "point_type": "1", + "futures_taker_fee": "0.0005", + "futures_maker_fee": "0", + }, + "symbol": "ETH/USDT:USDT", + "maker": 0.0, + "taker": 0.0005, + } + } + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED api_mock = MagicMock() - api_mock.fetch_my_trades = MagicMock(return_value=[{ - 'fee': {'cost': None}, - 'price': 3108.65, - 'cost': 0.310865, - 'order': '22255', - 'takerOrMaker': takerormaker, - 'amount': 1, # 1 contract - }]) - exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock, id='gate') + api_mock.fetch_my_trades = MagicMock( + return_value=[ + { + "fee": {"cost": None}, + "price": 3108.65, + "cost": 0.310865, + "order": "22255", + "takerOrMaker": takerormaker, + "amount": 1, # 1 contract + } + ] + ) + exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock, id="gate") exchange._trading_fees = tick - trades = exchange.get_trades_for_order('22255', 'ETH/USDT:USDT', datetime.now(timezone.utc)) + trades = exchange.get_trades_for_order("22255", "ETH/USDT:USDT", datetime.now(timezone.utc)) trade = trades[0] - assert trade['fee'] - assert trade['fee']['rate'] == rate - assert trade['fee']['currency'] == 'USDT' - assert trade['fee']['cost'] == cost + assert trade["fee"] + assert trade["fee"]["rate"] == rate + assert trade["fee"]["currency"] == "USDT" + assert trade["fee"]["cost"] == cost diff --git a/tests/exchange/test_htx.py b/tests/exchange/test_htx.py index ac136618f..807d9b28f 100644 --- a/tests/exchange/test_htx.py +++ b/tests/exchange/test_htx.py @@ -9,108 +9,132 @@ from tests.conftest import EXMS, get_patched_exchange from tests.exchange.test_exchange import ccxt_exceptionhandlers -@pytest.mark.parametrize('limitratio,expected,side', [ - (None, 220 * 0.99, "sell"), - (0.99, 220 * 0.99, "sell"), - (0.98, 220 * 0.98, "sell"), -]) +@pytest.mark.parametrize( + "limitratio,expected,side", + [ + (None, 220 * 0.99, "sell"), + (0.99, 220 * 0.99, "sell"), + (0.98, 220 * 0.98, "sell"), + ], +) def test_create_stoploss_order_htx(default_conf, mocker, limitratio, expected, side): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' - order_type = 'stop-limit' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" + order_type = "stop-limit" - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}}) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "htx") with pytest.raises(InvalidOrderException): - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, - order_types={'stoploss_on_exchange_limit_ratio': 1.05}, - side=side, - leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", + amount=1, + stop_price=190, + order_types={"stoploss_on_exchange_limit_ratio": 1.05}, + side=side, + leverage=1.0, + ) api_mock.create_order.reset_mock() - order_types = {} if limitratio is None else {'stoploss_on_exchange_limit_ratio': limitratio} + order_types = {} if limitratio is None else {"stoploss_on_exchange_limit_ratio": limitratio} order = exchange.create_stoploss( - pair='ETH/BTC', amount=1, stop_price=220, order_types=order_types, side=side, leverage=1.0) + pair="ETH/BTC", amount=1, stop_price=220, order_types=order_types, side=side, leverage=1.0 + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC' - assert api_mock.create_order.call_args_list[0][1]['type'] == order_type - assert api_mock.create_order.call_args_list[0][1]['side'] == 'sell' - assert api_mock.create_order.call_args_list[0][1]['amount'] == 1 + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args_list[0][1]["symbol"] == "ETH/BTC" + assert api_mock.create_order.call_args_list[0][1]["type"] == order_type + assert api_mock.create_order.call_args_list[0][1]["side"] == "sell" + assert api_mock.create_order.call_args_list[0][1]["amount"] == 1 # Price should be 1% below stopprice - assert api_mock.create_order.call_args_list[0][1]['price'] == expected - assert api_mock.create_order.call_args_list[0][1]['params'] == {"stopPrice": 220, - "operator": "lte", - } + assert api_mock.create_order.call_args_list[0][1]["price"] == expected + assert api_mock.create_order.call_args_list[0][1]["params"] == { + "stopPrice": 220, + "operator": "lte", + } # test exception handling with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx') - exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side=side, leverage=1.0) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "htx") + exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) with pytest.raises(InvalidOrderException): api_mock.create_order = MagicMock( - side_effect=ccxt.InvalidOrder("binance Order would trigger immediately.")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'binance') - exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side=side, leverage=1.0) + side_effect=ccxt.InvalidOrder("binance Order would trigger immediately.") + ) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "binance") + exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, "htx", - "create_stoploss", "create_order", retries=1, - pair='ETH/BTC', amount=1, stop_price=220, order_types={}, - side=side, leverage=1.0) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + "htx", + "create_stoploss", + "create_order", + retries=1, + pair="ETH/BTC", + amount=1, + stop_price=220, + order_types={}, + side=side, + leverage=1.0, + ) def test_create_stoploss_order_dry_run_htx(default_conf, mocker): api_mock = MagicMock() - order_type = 'stop-limit' - default_conf['dry_run'] = True - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + order_type = "stop-limit" + default_conf["dry_run"] = True + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "htx") with pytest.raises(InvalidOrderException): - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, - order_types={'stoploss_on_exchange_limit_ratio': 1.05}, - side='sell', leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", + amount=1, + stop_price=190, + order_types={"stoploss_on_exchange_limit_ratio": 1.05}, + side="sell", + leverage=1.0, + ) api_mock.create_order.reset_mock() - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side='sell', leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side="sell", leverage=1.0 + ) - assert 'id' in order - assert 'info' in order - assert 'type' in order + assert "id" in order + assert "info" in order + assert "type" in order - assert order['type'] == order_type - assert order['price'] == 220 - assert order['amount'] == 1 + assert order["type"] == order_type + assert order["price"] == 220 + assert order["amount"] == 1 def test_stoploss_adjust_htx(mocker, default_conf): - exchange = get_patched_exchange(mocker, default_conf, id='htx') + exchange = get_patched_exchange(mocker, default_conf, id="htx") order = { - 'type': 'stop', - 'price': 1500, - 'stopPrice': '1500', + "type": "stop", + "price": 1500, + "stopPrice": "1500", } - assert exchange.stoploss_adjust(1501, order, 'sell') - assert not exchange.stoploss_adjust(1499, order, 'sell') + assert exchange.stoploss_adjust(1501, order, "sell") + assert not exchange.stoploss_adjust(1499, order, "sell") # Test with invalid order case - assert exchange.stoploss_adjust(1501, order, 'sell') + assert exchange.stoploss_adjust(1501, order, "sell") diff --git a/tests/exchange/test_kraken.py b/tests/exchange/test_kraken.py index 760e18982..932677c68 100644 --- a/tests/exchange/test_kraken.py +++ b/tests/exchange/test_kraken.py @@ -9,276 +9,274 @@ from tests.conftest import EXMS, get_patched_exchange from tests.exchange.test_exchange import ccxt_exceptionhandlers -STOPLOSS_ORDERTYPE = 'stop-loss' -STOPLOSS_LIMIT_ORDERTYPE = 'stop-loss-limit' +STOPLOSS_ORDERTYPE = "stop-loss" +STOPLOSS_LIMIT_ORDERTYPE = "stop-loss-limit" -@pytest.mark.parametrize("order_type,time_in_force,expected_params", [ - ('limit', 'ioc', {'timeInForce': 'IOC', 'trading_agreement': 'agree'}), - ('limit', 'PO', {'postOnly': True, 'trading_agreement': 'agree'}), - ('market', None, {'trading_agreement': 'agree'}) -]) +@pytest.mark.parametrize( + "order_type,time_in_force,expected_params", + [ + ("limit", "ioc", {"timeInForce": "IOC", "trading_agreement": "agree"}), + ("limit", "PO", {"postOnly": True, "trading_agreement": "agree"}), + ("market", None, {"trading_agreement": "agree"}), + ], +) def test_kraken_trading_agreement(default_conf, mocker, order_type, time_in_force, expected_params): api_mock = MagicMock() - order_id = f'test_prod_{order_type}_{randint(0, 10 ** 6)}' + order_id = f"test_prod_{order_type}_{randint(0, 10 ** 6)}" api_mock.options = {} - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'symbol': 'ETH/BTC', - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False + api_mock.create_order = MagicMock( + return_value={"id": order_id, "symbol": "ETH/BTC", "info": {"foo": "bar"}} + ) + default_conf["dry_run"] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kraken") order = exchange.create_order( - pair='ETH/BTC', + pair="ETH/BTC", ordertype=order_type, side="buy", amount=1, rate=200, leverage=1.0, - time_in_force=time_in_force + time_in_force=time_in_force, ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args[0][0] == 'ETH/BTC' + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args[0][0] == "ETH/BTC" assert api_mock.create_order.call_args[0][1] == order_type - assert api_mock.create_order.call_args[0][2] == 'buy' + assert api_mock.create_order.call_args[0][2] == "buy" assert api_mock.create_order.call_args[0][3] == 1 - assert api_mock.create_order.call_args[0][4] == (200 if order_type == 'limit' else None) + assert api_mock.create_order.call_args[0][4] == (200 if order_type == "limit" else None) assert api_mock.create_order.call_args[0][5] == expected_params def test_get_balances_prod(default_conf, mocker): - balance_item = { - 'free': None, - 'total': 10.0, - 'used': 0.0 - } + balance_item = {"free": None, "total": 10.0, "used": 0.0} api_mock = MagicMock() - api_mock.fetch_balance = MagicMock(return_value={ - '1ST': balance_item.copy(), - '2ST': balance_item.copy(), - '3ST': balance_item.copy(), - '4ST': balance_item.copy(), - 'EUR': balance_item.copy(), - 'timestamp': 123123 - }) - kraken_open_orders = [{'symbol': '1ST/EUR', - 'type': 'limit', - 'side': 'sell', - 'price': 20, - 'cost': 0.0, - 'amount': 1.0, - 'filled': 0.0, - 'average': 0.0, - 'remaining': 1.0, - }, - {'status': 'open', - 'symbol': '2ST/EUR', - 'type': 'limit', - 'side': 'sell', - 'price': 20.0, - 'cost': 0.0, - 'amount': 2.0, - 'filled': 0.0, - 'average': 0.0, - 'remaining': 2.0, - }, - {'status': 'open', - 'symbol': '2ST/USD', - 'type': 'limit', - 'side': 'sell', - 'price': 20.0, - 'cost': 0.0, - 'amount': 2.0, - 'filled': 0.0, - 'average': 0.0, - 'remaining': 2.0, - }, - {'status': 'open', - 'symbol': '3ST/EUR', - 'type': 'limit', - 'side': 'buy', - 'price': 0.02, - 'cost': 0.0, - 'amount': 100.0, - 'filled': 0.0, - 'average': 0.0, - 'remaining': 100.0, - }] + api_mock.fetch_balance = MagicMock( + return_value={ + "1ST": balance_item.copy(), + "2ND": balance_item.copy(), + "3RD": balance_item.copy(), + "4TH": balance_item.copy(), + "EUR": balance_item.copy(), + "timestamp": 123123, + } + ) + kraken_open_orders = [ + { + "symbol": "1ST/EUR", + "type": "limit", + "side": "sell", + "price": 20, + "cost": 0.0, + "amount": 1.0, + "filled": 0.0, + "average": 0.0, + "remaining": 1.0, + }, + { + "status": "open", + "symbol": "2ND/EUR", + "type": "limit", + "side": "sell", + "price": 20.0, + "cost": 0.0, + "amount": 2.0, + "filled": 0.0, + "average": 0.0, + "remaining": 2.0, + }, + { + "status": "open", + "symbol": "2ND/USD", + "type": "limit", + "side": "sell", + "price": 20.0, + "cost": 0.0, + "amount": 2.0, + "filled": 0.0, + "average": 0.0, + "remaining": 2.0, + }, + { + "status": "open", + "symbol": "3RD/EUR", + "type": "limit", + "side": "buy", + "price": 0.02, + "cost": 0.0, + "amount": 100.0, + "filled": 0.0, + "average": 0.0, + "remaining": 100.0, + }, + ] api_mock.fetch_open_orders = MagicMock(return_value=kraken_open_orders) - default_conf['dry_run'] = False + default_conf["dry_run"] = False exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kraken") balances = exchange.get_balances() assert len(balances) == 6 - assert balances['1ST']['free'] == 9.0 - assert balances['1ST']['total'] == 10.0 - assert balances['1ST']['used'] == 1.0 + assert balances["1ST"]["free"] == 9.0 + assert balances["1ST"]["total"] == 10.0 + assert balances["1ST"]["used"] == 1.0 - assert balances['2ST']['free'] == 6.0 - assert balances['2ST']['total'] == 10.0 - assert balances['2ST']['used'] == 4.0 + assert balances["2ND"]["free"] == 6.0 + assert balances["2ND"]["total"] == 10.0 + assert balances["2ND"]["used"] == 4.0 - assert balances['3ST']['free'] == 10.0 - assert balances['3ST']['total'] == 10.0 - assert balances['3ST']['used'] == 0.0 + assert balances["3RD"]["free"] == 10.0 + assert balances["3RD"]["total"] == 10.0 + assert balances["3RD"]["used"] == 0.0 - assert balances['4ST']['free'] == 10.0 - assert balances['4ST']['total'] == 10.0 - assert balances['4ST']['used'] == 0.0 + assert balances["4TH"]["free"] == 10.0 + assert balances["4TH"]["total"] == 10.0 + assert balances["4TH"]["used"] == 0.0 - assert balances['EUR']['free'] == 8.0 - assert balances['EUR']['total'] == 10.0 - assert balances['EUR']['used'] == 2.0 - ccxt_exceptionhandlers(mocker, default_conf, api_mock, "kraken", - "get_balances", "fetch_balance") + assert balances["EUR"]["free"] == 8.0 + assert balances["EUR"]["total"] == 10.0 + assert balances["EUR"]["used"] == 2.0 + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, "kraken", "get_balances", "fetch_balance" + ) -@pytest.mark.parametrize('ordertype', ['market', 'limit']) -@pytest.mark.parametrize('side,adjustedprice', [ - ("sell", 217.8), - ("buy", 222.2), -]) +@pytest.mark.parametrize("ordertype", ["market", "limit"]) +@pytest.mark.parametrize( + "side,adjustedprice", + [ + ("sell", 217.8), + ("buy", 222.2), + ], +) def test_create_stoploss_order_kraken(default_conf, mocker, ordertype, side, adjustedprice): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - } - }) + api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}}) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kraken') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kraken") order = exchange.create_stoploss( - pair='ETH/BTC', + pair="ETH/BTC", amount=1, stop_price=220, side=side, - order_types={ - 'stoploss': ordertype, - 'stoploss_on_exchange_limit_ratio': 0.99 - }, - leverage=1.0 + order_types={"stoploss": ordertype, "stoploss_on_exchange_limit_ratio": 0.99}, + leverage=1.0, ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC' - assert api_mock.create_order.call_args_list[0][1]['type'] == ordertype - assert api_mock.create_order.call_args_list[0][1]['params'] == { - 'trading_agreement': 'agree', - 'stopLossPrice': 220 + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args_list[0][1]["symbol"] == "ETH/BTC" + assert api_mock.create_order.call_args_list[0][1]["type"] == ordertype + assert api_mock.create_order.call_args_list[0][1]["params"] == { + "trading_agreement": "agree", + "stopLossPrice": 220, } - assert api_mock.create_order.call_args_list[0][1]['side'] == side - assert api_mock.create_order.call_args_list[0][1]['amount'] == 1 - if ordertype == 'limit': - assert api_mock.create_order.call_args_list[0][1]['price'] == adjustedprice + assert api_mock.create_order.call_args_list[0][1]["side"] == side + assert api_mock.create_order.call_args_list[0][1]["amount"] == 1 + if ordertype == "limit": + assert api_mock.create_order.call_args_list[0][1]["price"] == adjustedprice else: - assert api_mock.create_order.call_args_list[0][1]['price'] is None + assert api_mock.create_order.call_args_list[0][1]["price"] is None # test exception handling with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kraken') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kraken") exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side=side, - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 ) with pytest.raises(InvalidOrderException): api_mock.create_order = MagicMock( - side_effect=ccxt.InvalidOrder("kraken Order would trigger immediately.")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kraken') + side_effect=ccxt.InvalidOrder("kraken Order would trigger immediately.") + ) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kraken") exchange.create_stoploss( - pair='ETH/BTC', - amount=1, - stop_price=220, - order_types={}, - side=side, - leverage=1.0 + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 ) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, "kraken", - "create_stoploss", "create_order", retries=1, - pair='ETH/BTC', amount=1, stop_price=220, order_types={}, - side=side, leverage=1.0) - - -@pytest.mark.parametrize('side', ['buy', 'sell']) -def test_create_stoploss_order_dry_run_kraken(default_conf, mocker, side): - api_mock = MagicMock() - default_conf['dry_run'] = True - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) - - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kraken') - - api_mock.create_order.reset_mock() - - order = exchange.create_stoploss( - pair='ETH/BTC', + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + "kraken", + "create_stoploss", + "create_order", + retries=1, + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, - leverage=1.0 + leverage=1.0, ) - assert 'id' in order - assert 'info' in order - assert 'type' in order - assert order['type'] == 'market' - assert order['price'] == 220 - assert order['amount'] == 1 +@pytest.mark.parametrize("side", ["buy", "sell"]) +def test_create_stoploss_order_dry_run_kraken(default_conf, mocker, side): + api_mock = MagicMock() + default_conf["dry_run"] = True + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) + + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kraken") + + api_mock.create_order.reset_mock() + + order = exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) + + assert "id" in order + assert "info" in order + assert "type" in order + + assert order["type"] == "market" + assert order["price"] == 220 + assert order["amount"] == 1 -@pytest.mark.parametrize('sl1,sl2,sl3,side', [ - (1501, 1499, 1501, "sell"), - (1499, 1501, 1499, "buy") -]) +@pytest.mark.parametrize( + "sl1,sl2,sl3,side", [(1501, 1499, 1501, "sell"), (1499, 1501, 1499, "buy")] +) def test_stoploss_adjust_kraken(mocker, default_conf, sl1, sl2, sl3, side): - exchange = get_patched_exchange(mocker, default_conf, id='kraken') + exchange = get_patched_exchange(mocker, default_conf, id="kraken") order = { - 'type': 'market', - 'stopLossPrice': 1500, + "type": "market", + "stopLossPrice": 1500, } assert exchange.stoploss_adjust(sl1, order, side=side) assert not exchange.stoploss_adjust(sl2, order, side=side) # diff. order type ... - order['type'] = 'limit' + order["type"] = "limit" assert exchange.stoploss_adjust(sl3, order, side=side) -@pytest.mark.parametrize('trade_id, expected', [ - ('1234', False), - ('170544369512007228', False), - ('1705443695120072285', True), - ('170544369512007228555', True), -]) +@pytest.mark.parametrize( + "trade_id, expected", + [ + ("1234", False), + ("170544369512007228", False), + ("1705443695120072285", True), + ("170544369512007228555", True), + ], +) def test__valid_trade_pagination_id_kraken(mocker, default_conf_usdt, trade_id, expected): - exchange = get_patched_exchange(mocker, default_conf_usdt, id='kraken') - assert exchange._valid_trade_pagination_id('XRP/USDT', trade_id) == expected + exchange = get_patched_exchange(mocker, default_conf_usdt, id="kraken") + assert exchange._valid_trade_pagination_id("XRP/USDT", trade_id) == expected diff --git a/tests/exchange/test_kucoin.py b/tests/exchange/test_kucoin.py index a74b77859..1d297505c 100644 --- a/tests/exchange/test_kucoin.py +++ b/tests/exchange/test_kucoin.py @@ -9,161 +9,169 @@ from tests.conftest import EXMS, get_patched_exchange from tests.exchange.test_exchange import ccxt_exceptionhandlers -@pytest.mark.parametrize('order_type', ['market', 'limit']) -@pytest.mark.parametrize('limitratio,expected,side', [ - (None, 220 * 0.99, "sell"), - (0.99, 220 * 0.99, "sell"), - (0.98, 220 * 0.98, "sell"), -]) +@pytest.mark.parametrize("order_type", ["market", "limit"]) +@pytest.mark.parametrize( + "limitratio,expected,side", + [ + (None, 220 * 0.99, "sell"), + (0.99, 220 * 0.99, "sell"), + (0.98, 220 * 0.98, "sell"), + ], +) def test_create_stoploss_order_kucoin(default_conf, mocker, limitratio, expected, side, order_type): api_mock = MagicMock() - order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' + order_id = f"test_prod_buy_{randint(0, 10 ** 6)}" - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - } - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + api_mock.create_order = MagicMock(return_value={"id": order_id, "info": {"foo": "bar"}}) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kucoin') - if order_type == 'limit': + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kucoin") + if order_type == "limit": with pytest.raises(InvalidOrderException): - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, - order_types={ - 'stoploss': order_type, - 'stoploss_on_exchange_limit_ratio': 1.05}, - side=side, leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", + amount=1, + stop_price=190, + order_types={"stoploss": order_type, "stoploss_on_exchange_limit_ratio": 1.05}, + side=side, + leverage=1.0, + ) api_mock.create_order.reset_mock() - order_types = {'stoploss': order_type} + order_types = {"stoploss": order_type} if limitratio is not None: - order_types.update({'stoploss_on_exchange_limit_ratio': limitratio}) - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types=order_types, side=side, leverage=1.0) + order_types.update({"stoploss_on_exchange_limit_ratio": limitratio}) + order = exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types=order_types, side=side, leverage=1.0 + ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC' - assert api_mock.create_order.call_args_list[0][1]['type'] == order_type - assert api_mock.create_order.call_args_list[0][1]['side'] == 'sell' - assert api_mock.create_order.call_args_list[0][1]['amount'] == 1 + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert api_mock.create_order.call_args_list[0][1]["symbol"] == "ETH/BTC" + assert api_mock.create_order.call_args_list[0][1]["type"] == order_type + assert api_mock.create_order.call_args_list[0][1]["side"] == "sell" + assert api_mock.create_order.call_args_list[0][1]["amount"] == 1 # Price should be 1% below stopprice - if order_type == 'limit': - assert api_mock.create_order.call_args_list[0][1]['price'] == expected + if order_type == "limit": + assert api_mock.create_order.call_args_list[0][1]["price"] == expected else: - assert api_mock.create_order.call_args_list[0][1]['price'] is None + assert api_mock.create_order.call_args_list[0][1]["price"] is None - assert api_mock.create_order.call_args_list[0][1]['params'] == { - 'stopPrice': 220, - 'stop': 'loss' + assert api_mock.create_order.call_args_list[0][1]["params"] == { + "stopPrice": 220, + "stop": "loss", } # test exception handling with pytest.raises(DependencyException): api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kucoin') - exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side=side, leverage=1.0) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kucoin") + exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) with pytest.raises(InvalidOrderException): api_mock.create_order = MagicMock( - side_effect=ccxt.InvalidOrder("kucoin Order would trigger immediately.")) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kucoin') - exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side=side, leverage=1.0) + side_effect=ccxt.InvalidOrder("kucoin Order would trigger immediately.") + ) + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kucoin") + exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side=side, leverage=1.0 + ) - ccxt_exceptionhandlers(mocker, default_conf, api_mock, "kucoin", - "create_stoploss", "create_order", retries=1, - pair='ETH/BTC', amount=1, stop_price=220, order_types={}, - side=side, leverage=1.0) + ccxt_exceptionhandlers( + mocker, + default_conf, + api_mock, + "kucoin", + "create_stoploss", + "create_order", + retries=1, + pair="ETH/BTC", + amount=1, + stop_price=220, + order_types={}, + side=side, + leverage=1.0, + ) def test_stoploss_order_dry_run_kucoin(default_conf, mocker): api_mock = MagicMock() - order_type = 'market' - default_conf['dry_run'] = True - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) + order_type = "market" + default_conf["dry_run"] = True + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y, **kwargs: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, 'kucoin') + exchange = get_patched_exchange(mocker, default_conf, api_mock, "kucoin") with pytest.raises(InvalidOrderException): - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, - order_types={'stoploss': 'limit', - 'stoploss_on_exchange_limit_ratio': 1.05}, - side='sell', leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", + amount=1, + stop_price=190, + order_types={"stoploss": "limit", "stoploss_on_exchange_limit_ratio": 1.05}, + side="sell", + leverage=1.0, + ) api_mock.create_order.reset_mock() - order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, - order_types={}, side='sell', leverage=1.0) + order = exchange.create_stoploss( + pair="ETH/BTC", amount=1, stop_price=220, order_types={}, side="sell", leverage=1.0 + ) - assert 'id' in order - assert 'info' in order - assert 'type' in order + assert "id" in order + assert "info" in order + assert "type" in order - assert order['type'] == order_type - assert order['price'] == 220 - assert order['amount'] == 1 + assert order["type"] == order_type + assert order["price"] == 220 + assert order["amount"] == 1 def test_stoploss_adjust_kucoin(mocker, default_conf): - exchange = get_patched_exchange(mocker, default_conf, id='kucoin') + exchange = get_patched_exchange(mocker, default_conf, id="kucoin") order = { - 'type': 'limit', - 'price': 1500, - 'stopPrice': 1500, - 'info': {'stopPrice': 1500, 'stop': "limit"}, + "type": "limit", + "price": 1500, + "stopPrice": 1500, + "info": {"stopPrice": 1500, "stop": "limit"}, } - assert exchange.stoploss_adjust(1501, order, 'sell') - assert not exchange.stoploss_adjust(1499, order, 'sell') + assert exchange.stoploss_adjust(1501, order, "sell") + assert not exchange.stoploss_adjust(1499, order, "sell") # Test with invalid order case - order['stopPrice'] = None - assert exchange.stoploss_adjust(1501, order, 'sell') + order["stopPrice"] = None + assert exchange.stoploss_adjust(1501, order, "sell") @pytest.mark.parametrize("side", ["buy", "sell"]) -@pytest.mark.parametrize("ordertype,rate", [ - ("market", None), - ("market", 200), - ("limit", 200), - ("stop_loss_limit", 200) -]) +@pytest.mark.parametrize( + "ordertype,rate", [("market", None), ("market", 200), ("limit", 200), ("stop_loss_limit", 200)] +) def test_kucoin_create_order(default_conf, mocker, side, ordertype, rate): api_mock = MagicMock() - order_id = f'test_prod_{side}_{randint(0, 10 ** 6)}' - api_mock.create_order = MagicMock(return_value={ - 'id': order_id, - 'info': { - 'foo': 'bar' - }, - 'symbol': 'XRP/USDT', - 'amount': 1 - }) - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='kucoin') + order_id = f"test_prod_{side}_{randint(0, 10 ** 6)}" + api_mock.create_order = MagicMock( + return_value={"id": order_id, "info": {"foo": "bar"}, "symbol": "XRP/USDT", "amount": 1} + ) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.amount_to_precision", lambda s, x, y: y) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="kucoin") exchange._set_leverage = MagicMock() exchange.set_margin_mode = MagicMock() order = exchange.create_order( - pair='XRP/USDT', - ordertype=ordertype, - side=side, - amount=1, - rate=rate, - leverage=1.0 + pair="XRP/USDT", ordertype=ordertype, side=side, amount=1, rate=rate, leverage=1.0 ) - assert 'id' in order - assert 'info' in order - assert order['id'] == order_id - assert order['amount'] == 1 + assert "id" in order + assert "info" in order + assert order["id"] == order_id + assert order["amount"] == 1 # Status must be faked to open for kucoin. - assert order['status'] == 'open' + assert order["status"] == "open" diff --git a/tests/exchange/test_okx.py b/tests/exchange/test_okx.py index 69e7e498b..305b16ea2 100644 --- a/tests/exchange/test_okx.py +++ b/tests/exchange/test_okx.py @@ -12,8 +12,8 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers def test_okx_ohlcv_candle_limit(default_conf, mocker): - exchange = get_patched_exchange(mocker, default_conf, id='okx') - timeframes = ('1m', '5m', '1h') + exchange = get_patched_exchange(mocker, default_conf, id="okx") + timeframes = ("1m", "5m", "1h") start_time = int(datetime(2021, 1, 1, tzinfo=timezone.utc).timestamp() * 1000) for timeframe in timeframes: @@ -26,14 +26,24 @@ def test_okx_ohlcv_candle_limit(default_conf, mocker): assert exchange.ohlcv_candle_limit(timeframe, CandleType.FUTURES, start_time) == 100 assert exchange.ohlcv_candle_limit(timeframe, CandleType.MARK, start_time) == 100 assert exchange.ohlcv_candle_limit(timeframe, CandleType.FUNDING_RATE, start_time) == 100 - one_call = int((datetime.now(timezone.utc) - timedelta( - minutes=290 * timeframe_to_minutes(timeframe))).timestamp() * 1000) + one_call = int( + ( + datetime.now(timezone.utc) + - timedelta(minutes=290 * timeframe_to_minutes(timeframe)) + ).timestamp() + * 1000 + ) assert exchange.ohlcv_candle_limit(timeframe, CandleType.SPOT, one_call) == 300 assert exchange.ohlcv_candle_limit(timeframe, CandleType.FUTURES, one_call) == 300 - one_call = int((datetime.now(timezone.utc) - timedelta( - minutes=320 * timeframe_to_minutes(timeframe))).timestamp() * 1000) + one_call = int( + ( + datetime.now(timezone.utc) + - timedelta(minutes=320 * timeframe_to_minutes(timeframe)) + ).timestamp() + * 1000 + ) assert exchange.ohlcv_candle_limit(timeframe, CandleType.SPOT, one_call) == 100 assert exchange.ohlcv_candle_limit(timeframe, CandleType.FUTURES, one_call) == 100 @@ -43,200 +53,210 @@ def test_get_maintenance_ratio_and_amt_okx( mocker, ): api_mock = MagicMock() - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - default_conf['dry_run'] = False + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + default_conf["dry_run"] = False mocker.patch.multiple( - 'freqtrade.exchange.okx.Okx', + "freqtrade.exchange.okx.Okx", exchange_has=MagicMock(return_value=True), - load_leverage_tiers=MagicMock(return_value={ - 'ETH/USDT:USDT': [ - { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '2000', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ETH-USDT' - } - }, - { - 'tier': 2, - 'minNotional': 2001, - 'maxNotional': 4000, - 'maintenanceMarginRate': 0.015, - 'maxLeverage': 50, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.02', - 'instId': '', - 'maxLever': '50', - 'maxSz': '4000', - 'minSz': '2001', - 'mmr': '0.015', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '2', - 'uly': 'ETH-USDT' - } - }, - { - 'tier': 3, - 'minNotional': 4001, - 'maxNotional': 8000, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 20, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.05', - 'instId': '', - 'maxLever': '20', - 'maxSz': '8000', - 'minSz': '4001', - 'mmr': '0.02', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '3', - 'uly': 'ETH-USDT' - } - }, - ], - 'ADA/USDT:USDT': [ - { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 500, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '500', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ADA-USDT' - } - }, - { - 'tier': 2, - 'minNotional': 501, - 'maxNotional': 1000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 50, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.02', - 'instId': '', - 'maxLever': '50', - 'maxSz': '1000', - 'minSz': '501', - 'mmr': '0.015', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '2', - 'uly': 'ADA-USDT' - } - }, - { - 'tier': 3, - 'minNotional': 1001, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.03, - 'maxLeverage': 20, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.05', - 'instId': '', - 'maxLever': '20', - 'maxSz': '2000', - 'minSz': '1001', - 'mmr': '0.02', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '3', - 'uly': 'ADA-USDT' - } - }, - ] - }) + load_leverage_tiers=MagicMock( + return_value={ + "ETH/USDT:USDT": [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 2000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "2000", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ETH-USDT", + }, + }, + { + "tier": 2, + "minNotional": 2001, + "maxNotional": 4000, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50, + "info": { + "baseMaxLoan": "", + "imr": "0.02", + "instId": "", + "maxLever": "50", + "maxSz": "4000", + "minSz": "2001", + "mmr": "0.015", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "2", + "uly": "ETH-USDT", + }, + }, + { + "tier": 3, + "minNotional": 4001, + "maxNotional": 8000, + "maintenanceMarginRate": 0.02, + "maxLeverage": 20, + "info": { + "baseMaxLoan": "", + "imr": "0.05", + "instId": "", + "maxLever": "20", + "maxSz": "8000", + "minSz": "4001", + "mmr": "0.02", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "3", + "uly": "ETH-USDT", + }, + }, + ], + "ADA/USDT:USDT": [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 500, + "maintenanceMarginRate": 0.02, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "500", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ADA-USDT", + }, + }, + { + "tier": 2, + "minNotional": 501, + "maxNotional": 1000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 50, + "info": { + "baseMaxLoan": "", + "imr": "0.02", + "instId": "", + "maxLever": "50", + "maxSz": "1000", + "minSz": "501", + "mmr": "0.015", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "2", + "uly": "ADA-USDT", + }, + }, + { + "tier": 3, + "minNotional": 1001, + "maxNotional": 2000, + "maintenanceMarginRate": 0.03, + "maxLeverage": 20, + "info": { + "baseMaxLoan": "", + "imr": "0.05", + "instId": "", + "maxLever": "20", + "maxSz": "2000", + "minSz": "1001", + "mmr": "0.02", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "3", + "uly": "ADA-USDT", + }, + }, + ], + } + ), ) exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") - assert exchange.get_maintenance_ratio_and_amt('ETH/USDT:USDT', 2000) == (0.01, None) - assert exchange.get_maintenance_ratio_and_amt('ETH/USDT:USDT', 2001) == (0.015, None) - assert exchange.get_maintenance_ratio_and_amt('ETH/USDT:USDT', 4001) == (0.02, None) - assert exchange.get_maintenance_ratio_and_amt('ETH/USDT:USDT', 8000) == (0.02, None) + assert exchange.get_maintenance_ratio_and_amt("ETH/USDT:USDT", 2000) == (0.01, None) + assert exchange.get_maintenance_ratio_and_amt("ETH/USDT:USDT", 2001) == (0.015, None) + assert exchange.get_maintenance_ratio_and_amt("ETH/USDT:USDT", 4001) == (0.02, None) + assert exchange.get_maintenance_ratio_and_amt("ETH/USDT:USDT", 8000) == (0.02, None) - assert exchange.get_maintenance_ratio_and_amt('ADA/USDT:USDT', 1) == (0.02, None) - assert exchange.get_maintenance_ratio_and_amt('ADA/USDT:USDT', 2000) == (0.03, None) + assert exchange.get_maintenance_ratio_and_amt("ADA/USDT:USDT", 1) == (0.02, None) + assert exchange.get_maintenance_ratio_and_amt("ADA/USDT:USDT", 2000) == (0.03, None) def test_get_max_pair_stake_amount_okx(default_conf, mocker, leverage_tiers): - exchange = get_patched_exchange(mocker, default_conf, id="okx") - assert exchange.get_max_pair_stake_amount('BNB/BUSD', 1.0) == float('inf') + assert exchange.get_max_pair_stake_amount("BNB/BUSD", 1.0) == float("inf") - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" exchange = get_patched_exchange(mocker, default_conf, id="okx") exchange._leverage_tiers = leverage_tiers - assert exchange.get_max_pair_stake_amount('XRP/USDT:USDT', 1.0) == 30000000 - assert exchange.get_max_pair_stake_amount('BNB/USDT:USDT', 1.0) == 50000000 - assert exchange.get_max_pair_stake_amount('BTC/USDT:USDT', 1.0) == 1000000000 - assert exchange.get_max_pair_stake_amount('BTC/USDT:USDT', 1.0, 10.0) == 100000000 + assert exchange.get_max_pair_stake_amount("XRP/USDT:USDT", 1.0) == 30000000 + assert exchange.get_max_pair_stake_amount("BNB/USDT:USDT", 1.0) == 50000000 + assert exchange.get_max_pair_stake_amount("BTC/USDT:USDT", 1.0) == 1000000000 + assert exchange.get_max_pair_stake_amount("BTC/USDT:USDT", 1.0, 10.0) == 100000000 - assert exchange.get_max_pair_stake_amount('TTT/USDT:USDT', 1.0) == float('inf') # Not in tiers + assert exchange.get_max_pair_stake_amount("TTT/USDT:USDT", 1.0) == float("inf") # Not in tiers -@pytest.mark.parametrize('mode,side,reduceonly,result', [ - ('net', 'buy', False, 'net'), - ('net', 'sell', True, 'net'), - ('net', 'sell', False, 'net'), - ('net', 'buy', True, 'net'), - ('longshort', 'buy', False, 'long'), - ('longshort', 'sell', True, 'long'), - ('longshort', 'sell', False, 'short'), - ('longshort', 'buy', True, 'short'), -]) +@pytest.mark.parametrize( + "mode,side,reduceonly,result", + [ + ("net", "buy", False, "net"), + ("net", "sell", True, "net"), + ("net", "sell", False, "net"), + ("net", "buy", True, "net"), + ("longshort", "buy", False, "long"), + ("longshort", "sell", True, "long"), + ("longshort", "sell", False, "short"), + ("longshort", "buy", True, "short"), + ], +) def test__get_posSide(default_conf, mocker, mode, side, reduceonly, result): - exchange = get_patched_exchange(mocker, default_conf, id="okx") - exchange.net_only = mode == 'net' + exchange.net_only = mode == "net" assert exchange._get_posSide(side, reduceonly) == result def test_additional_exchange_init_okx(default_conf, mocker): api_mock = MagicMock() - api_mock.fetch_accounts = MagicMock(return_value=[ - {'id': '2555', - 'type': '2', - 'currency': None, - 'info': {'acctLv': '2', - 'autoLoan': False, - 'ctIsoMode': 'automatic', - 'greeksType': 'PA', - 'level': 'Lv1', - 'levelTmp': '', - 'mgnIsoMode': 'automatic', - 'posMode': 'long_short_mode', - 'uid': '2555'}}]) - default_conf['dry_run'] = False + api_mock.fetch_accounts = MagicMock( + return_value=[ + { + "id": "2555", + "type": "2", + "currency": None, + "info": { + "acctLv": "2", + "autoLoan": False, + "ctIsoMode": "automatic", + "greeksType": "PA", + "level": "Lv1", + "levelTmp": "", + "mgnIsoMode": "automatic", + "posMode": "long_short_mode", + "uid": "2555", + }, + } + ] + ) + default_conf["dry_run"] = False exchange = get_patched_exchange(mocker, default_conf, id="okx", api_mock=api_mock) assert api_mock.fetch_accounts.call_count == 0 exchange.trading_mode = TradingMode.FUTURES @@ -246,225 +266,237 @@ def test_additional_exchange_init_okx(default_conf, mocker): assert api_mock.fetch_accounts.call_count == 1 assert not exchange.net_only - api_mock.fetch_accounts = MagicMock(return_value=[ - {'id': '2555', - 'type': '2', - 'currency': None, - 'info': {'acctLv': '2', - 'autoLoan': False, - 'ctIsoMode': 'automatic', - 'greeksType': 'PA', - 'level': 'Lv1', - 'levelTmp': '', - 'mgnIsoMode': 'automatic', - 'posMode': 'net_mode', - 'uid': '2555'}}]) + api_mock.fetch_accounts = MagicMock( + return_value=[ + { + "id": "2555", + "type": "2", + "currency": None, + "info": { + "acctLv": "2", + "autoLoan": False, + "ctIsoMode": "automatic", + "greeksType": "PA", + "level": "Lv1", + "levelTmp": "", + "mgnIsoMode": "automatic", + "posMode": "net_mode", + "uid": "2555", + }, + } + ] + ) exchange.additional_exchange_init() assert api_mock.fetch_accounts.call_count == 1 assert exchange.net_only - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - ccxt_exceptionhandlers(mocker, default_conf, api_mock, 'okx', - "additional_exchange_init", "fetch_accounts") + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + ccxt_exceptionhandlers( + mocker, default_conf, api_mock, "okx", "additional_exchange_init", "fetch_accounts" + ) def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmp_path, caplog, time_machine): - - default_conf['datadir'] = tmp_path + default_conf["datadir"] = tmp_path # fd_mock = mocker.patch('freqtrade.exchange.exchange.file_dump_json') api_mock = MagicMock() - type(api_mock).has = PropertyMock(return_value={ - 'fetchLeverageTiers': False, - 'fetchMarketLeverageTiers': True, - }) - api_mock.fetch_market_leverage_tiers = AsyncMock(side_effect=[ - [ - { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 500, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '500', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ADA-USDT' - } - }, - { - 'tier': 2, - 'minNotional': 501, - 'maxNotional': 1000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 50, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.02', - 'instId': '', - 'maxLever': '50', - 'maxSz': '1000', - 'minSz': '501', - 'mmr': '0.015', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '2', - 'uly': 'ADA-USDT' - } - }, - { - 'tier': 3, - 'minNotional': 1001, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.03, - 'maxLeverage': 20, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.05', - 'instId': '', - 'maxLever': '20', - 'maxSz': '2000', - 'minSz': '1001', - 'mmr': '0.02', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '3', - 'uly': 'ADA-USDT' - } - }, - ], - TemporaryError("this Failed"), - [ - { - 'tier': 1, - 'minNotional': 0, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 75, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.013', - 'instId': '', - 'maxLever': '75', - 'maxSz': '2000', - 'minSz': '0', - 'mmr': '0.01', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '1', - 'uly': 'ETH-USDT' - } - }, - { - 'tier': 2, - 'minNotional': 2001, - 'maxNotional': 4000, - 'maintenanceMarginRate': 0.015, - 'maxLeverage': 50, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.02', - 'instId': '', - 'maxLever': '50', - 'maxSz': '4000', - 'minSz': '2001', - 'mmr': '0.015', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '2', - 'uly': 'ETH-USDT' - } - }, - { - 'tier': 3, - 'minNotional': 4001, - 'maxNotional': 8000, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 20, - 'info': { - 'baseMaxLoan': '', - 'imr': '0.05', - 'instId': '', - 'maxLever': '20', - 'maxSz': '8000', - 'minSz': '4001', - 'mmr': '0.02', - 'optMgnFactor': '0', - 'quoteMaxLoan': '', - 'tier': '3', - 'uly': 'ETH-USDT' - } - }, + type(api_mock).has = PropertyMock( + return_value={ + "fetchLeverageTiers": False, + "fetchMarketLeverageTiers": True, + } + ) + api_mock.fetch_market_leverage_tiers = AsyncMock( + side_effect=[ + [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 500, + "maintenanceMarginRate": 0.02, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "500", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ADA-USDT", + }, + }, + { + "tier": 2, + "minNotional": 501, + "maxNotional": 1000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 50, + "info": { + "baseMaxLoan": "", + "imr": "0.02", + "instId": "", + "maxLever": "50", + "maxSz": "1000", + "minSz": "501", + "mmr": "0.015", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "2", + "uly": "ADA-USDT", + }, + }, + { + "tier": 3, + "minNotional": 1001, + "maxNotional": 2000, + "maintenanceMarginRate": 0.03, + "maxLeverage": 20, + "info": { + "baseMaxLoan": "", + "imr": "0.05", + "instId": "", + "maxLever": "20", + "maxSz": "2000", + "minSz": "1001", + "mmr": "0.02", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "3", + "uly": "ADA-USDT", + }, + }, + ], + TemporaryError("this Failed"), + [ + { + "tier": 1, + "minNotional": 0, + "maxNotional": 2000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 75, + "info": { + "baseMaxLoan": "", + "imr": "0.013", + "instId": "", + "maxLever": "75", + "maxSz": "2000", + "minSz": "0", + "mmr": "0.01", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "1", + "uly": "ETH-USDT", + }, + }, + { + "tier": 2, + "minNotional": 2001, + "maxNotional": 4000, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50, + "info": { + "baseMaxLoan": "", + "imr": "0.02", + "instId": "", + "maxLever": "50", + "maxSz": "4000", + "minSz": "2001", + "mmr": "0.015", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "2", + "uly": "ETH-USDT", + }, + }, + { + "tier": 3, + "minNotional": 4001, + "maxNotional": 8000, + "maintenanceMarginRate": 0.02, + "maxLeverage": 20, + "info": { + "baseMaxLoan": "", + "imr": "0.05", + "instId": "", + "maxLever": "20", + "maxSz": "8000", + "minSz": "4001", + "mmr": "0.02", + "optMgnFactor": "0", + "quoteMaxLoan": "", + "tier": "3", + "uly": "ETH-USDT", + }, + }, + ], ] - ]) - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - default_conf['stake_currency'] = 'USDT' + ) + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + default_conf["stake_currency"] = "USDT" exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") exchange.trading_mode = TradingMode.FUTURES exchange.margin_mode = MarginMode.ISOLATED exchange.markets = markets # Initialization of load_leverage_tiers happens as part of exchange init. assert exchange._leverage_tiers == { - 'ADA/USDT:USDT': [ + "ADA/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 500, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 75, - 'maintAmt': None + "minNotional": 0, + "maxNotional": 500, + "maintenanceMarginRate": 0.02, + "maxLeverage": 75, + "maintAmt": None, }, { - 'minNotional': 501, - 'maxNotional': 1000, - 'maintenanceMarginRate': 0.025, - 'maxLeverage': 50, - 'maintAmt': None + "minNotional": 501, + "maxNotional": 1000, + "maintenanceMarginRate": 0.025, + "maxLeverage": 50, + "maintAmt": None, }, { - 'minNotional': 1001, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.03, - 'maxLeverage': 20, - 'maintAmt': None + "minNotional": 1001, + "maxNotional": 2000, + "maintenanceMarginRate": 0.03, + "maxLeverage": 20, + "maintAmt": None, }, ], - 'ETH/USDT:USDT': [ + "ETH/USDT:USDT": [ { - 'minNotional': 0, - 'maxNotional': 2000, - 'maintenanceMarginRate': 0.01, - 'maxLeverage': 75, - 'maintAmt': None + "minNotional": 0, + "maxNotional": 2000, + "maintenanceMarginRate": 0.01, + "maxLeverage": 75, + "maintAmt": None, }, { - 'minNotional': 2001, - 'maxNotional': 4000, - 'maintenanceMarginRate': 0.015, - 'maxLeverage': 50, - 'maintAmt': None + "minNotional": 2001, + "maxNotional": 4000, + "maintenanceMarginRate": 0.015, + "maxLeverage": 50, + "maintAmt": None, }, { - 'minNotional': 4001, - 'maxNotional': 8000, - 'maintenanceMarginRate': 0.02, - 'maxLeverage': 20, - 'maintAmt': None + "minNotional": 4001, + "maxNotional": 8000, + "maintenanceMarginRate": 0.02, + "maxLeverage": 20, + "maintAmt": None, }, ], } - filename = (default_conf['datadir'] / - f"futures/leverage_tiers_{default_conf['stake_currency']}.json") + filename = ( + default_conf["datadir"] / f"futures/leverage_tiers_{default_conf['stake_currency']}.json" + ) assert filename.is_file() - logmsg = 'Cached leverage tiers are outdated. Will update.' + logmsg = "Cached leverage tiers are outdated. Will update." assert not log_has(logmsg, caplog) api_mock.fetch_market_leverage_tiers.reset_mock() @@ -481,25 +513,25 @@ def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmp_path, caplog def test__set_leverage_okx(mocker, default_conf): - api_mock = MagicMock() api_mock.set_leverage = MagicMock() - type(api_mock).has = PropertyMock(return_value={'setLeverage': True}) - default_conf['dry_run'] = False - default_conf['trading_mode'] = TradingMode.FUTURES - default_conf['margin_mode'] = MarginMode.ISOLATED + type(api_mock).has = PropertyMock(return_value={"setLeverage": True}) + default_conf["dry_run"] = False + default_conf["trading_mode"] = TradingMode.FUTURES + default_conf["margin_mode"] = MarginMode.ISOLATED exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") - exchange._lev_prep('BTC/USDT:USDT', 3.2, 'buy') + exchange._lev_prep("BTC/USDT:USDT", 3.2, "buy") assert api_mock.set_leverage.call_count == 1 # Leverage is rounded to 3. - assert api_mock.set_leverage.call_args_list[0][1]['leverage'] == 3.2 - assert api_mock.set_leverage.call_args_list[0][1]['symbol'] == 'BTC/USDT:USDT' - assert api_mock.set_leverage.call_args_list[0][1]['params'] == { - 'mgnMode': 'isolated', - 'posSide': 'net'} + assert api_mock.set_leverage.call_args_list[0][1]["leverage"] == 3.2 + assert api_mock.set_leverage.call_args_list[0][1]["symbol"] == "BTC/USDT:USDT" + assert api_mock.set_leverage.call_args_list[0][1]["params"] == { + "mgnMode": "isolated", + "posSide": "net", + } api_mock.set_leverage = MagicMock(side_effect=ccxt.NetworkError()) - exchange._lev_prep('BTC/USDT:USDT', 3.2, 'buy') + exchange._lev_prep("BTC/USDT:USDT", 3.2, "buy") assert api_mock.fetch_leverage.call_count == 1 api_mock.fetch_leverage = MagicMock(side_effect=ccxt.NetworkError()) @@ -512,23 +544,23 @@ def test__set_leverage_okx(mocker, default_conf): "set_leverage", pair="XRP/USDT:USDT", leverage=5.0, - side='buy' + side="buy", ) @pytest.mark.usefixtures("init_persistence") def test_fetch_stoploss_order_okx(default_conf, mocker): - default_conf['dry_run'] = False + default_conf["dry_run"] = False api_mock = MagicMock() api_mock.fetch_order = MagicMock() - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='okx') + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") - exchange.fetch_stoploss_order('1234', 'ETH/BTC') + exchange.fetch_stoploss_order("1234", "ETH/BTC") assert api_mock.fetch_order.call_count == 1 - assert api_mock.fetch_order.call_args_list[0][0][0] == '1234' - assert api_mock.fetch_order.call_args_list[0][0][1] == 'ETH/BTC' - assert api_mock.fetch_order.call_args_list[0][1]['params'] == {'stop': True} + assert api_mock.fetch_order.call_args_list[0][0][0] == "1234" + assert api_mock.fetch_order.call_args_list[0][0][1] == "ETH/BTC" + assert api_mock.fetch_order.call_args_list[0][1]["params"] == {"stop": True} api_mock.fetch_order = MagicMock(side_effect=ccxt.OrderNotFound) api_mock.fetch_open_orders = MagicMock(return_value=[]) @@ -536,7 +568,7 @@ def test_fetch_stoploss_order_okx(default_conf, mocker): api_mock.fetch_canceled_orders = MagicMock(creturn_value=[]) with pytest.raises(RetryableOrderError): - exchange.fetch_stoploss_order('1234', 'ETH/BTC') + exchange.fetch_stoploss_order("1234", "ETH/BTC") assert api_mock.fetch_order.call_count == 1 assert api_mock.fetch_open_orders.call_count == 1 assert api_mock.fetch_closed_orders.call_count == 1 @@ -547,33 +579,29 @@ def test_fetch_stoploss_order_okx(default_conf, mocker): api_mock.fetch_closed_orders.reset_mock() api_mock.fetch_canceled_orders.reset_mock() - api_mock.fetch_closed_orders = MagicMock(return_value=[ - { - 'id': '1234', - 'status': 'closed', - 'info': {'ordId': '123455'} - } - ]) - mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value={'id': '123455'})) - resp = exchange.fetch_stoploss_order('1234', 'ETH/BTC') + api_mock.fetch_closed_orders = MagicMock( + return_value=[{"id": "1234", "status": "closed", "info": {"ordId": "123455"}}] + ) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value={"id": "123455"})) + resp = exchange.fetch_stoploss_order("1234", "ETH/BTC") assert api_mock.fetch_order.call_count == 1 assert api_mock.fetch_open_orders.call_count == 1 assert api_mock.fetch_closed_orders.call_count == 1 assert api_mock.fetch_canceled_orders.call_count == 0 - assert resp['id'] == '1234' - assert resp['id_stop'] == '123455' - assert resp['type'] == 'stoploss' + assert resp["id"] == "1234" + assert resp["id_stop"] == "123455" + assert resp["type"] == "stoploss" - default_conf['dry_run'] = True - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='okx') - dro_mock = mocker.patch(f"{EXMS}.fetch_dry_run_order", MagicMock(return_value={'id': '123455'})) + default_conf["dry_run"] = True + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") + dro_mock = mocker.patch(f"{EXMS}.fetch_dry_run_order", MagicMock(return_value={"id": "123455"})) api_mock.fetch_order.reset_mock() api_mock.fetch_open_orders.reset_mock() api_mock.fetch_closed_orders.reset_mock() api_mock.fetch_canceled_orders.reset_mock() - resp = exchange.fetch_stoploss_order('1234', 'ETH/BTC') + resp = exchange.fetch_stoploss_order("1234", "ETH/BTC") assert api_mock.fetch_order.call_count == 0 assert api_mock.fetch_open_orders.call_count == 0 @@ -582,105 +610,105 @@ def test_fetch_stoploss_order_okx(default_conf, mocker): assert dro_mock.call_count == 1 -@pytest.mark.parametrize('sl1,sl2,sl3,side', [ - (1501, 1499, 1501, "sell"), - (1499, 1501, 1499, "buy") -]) +@pytest.mark.parametrize( + "sl1,sl2,sl3,side", [(1501, 1499, 1501, "sell"), (1499, 1501, 1499, "buy")] +) def test_stoploss_adjust_okx(mocker, default_conf, sl1, sl2, sl3, side): - exchange = get_patched_exchange(mocker, default_conf, id='okx') + exchange = get_patched_exchange(mocker, default_conf, id="okx") order = { - 'type': 'stoploss', - 'price': 1500, - 'stopLossPrice': 1500, + "type": "stoploss", + "price": 1500, + "stopLossPrice": 1500, } assert exchange.stoploss_adjust(sl1, order, side=side) assert not exchange.stoploss_adjust(sl2, order, side=side) def test_stoploss_cancel_okx(mocker, default_conf): - exchange = get_patched_exchange(mocker, default_conf, id='okx') + exchange = get_patched_exchange(mocker, default_conf, id="okx") exchange.cancel_order = MagicMock() - exchange.cancel_stoploss_order('1234', 'ETH/USDT') + exchange.cancel_stoploss_order("1234", "ETH/USDT") assert exchange.cancel_order.call_count == 1 - assert exchange.cancel_order.call_args_list[0][1]['order_id'] == '1234' - assert exchange.cancel_order.call_args_list[0][1]['pair'] == 'ETH/USDT' - assert exchange.cancel_order.call_args_list[0][1]['params'] == {'stop': True} + assert exchange.cancel_order.call_args_list[0][1]["order_id"] == "1234" + assert exchange.cancel_order.call_args_list[0][1]["pair"] == "ETH/USDT" + assert exchange.cancel_order.call_args_list[0][1]["params"] == {"stop": True} def test__get_stop_params_okx(mocker, default_conf): - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' - exchange = get_patched_exchange(mocker, default_conf, id='okx') - params = exchange._get_stop_params('ETH/USDT:USDT', 1500, 'sell') + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" + exchange = get_patched_exchange(mocker, default_conf, id="okx") + params = exchange._get_stop_params("ETH/USDT:USDT", 1500, "sell") - assert params['tdMode'] == 'isolated' - assert params['posSide'] == 'net' + assert params["tdMode"] == "isolated" + assert params["posSide"] == "net" def test_fetch_orders_okx(default_conf, mocker, limit_order): - api_mock = MagicMock() - api_mock.fetch_orders = MagicMock(return_value=[ - limit_order['buy'], - limit_order['sell'], - ]) - api_mock.fetch_open_orders = MagicMock(return_value=[limit_order['buy']]) - api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order['buy']]) + api_mock.fetch_orders = MagicMock( + return_value=[ + limit_order["buy"], + limit_order["sell"], + ] + ) + api_mock.fetch_open_orders = MagicMock(return_value=[limit_order["buy"]]) + api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order["buy"]]) - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) start_time = datetime.now(timezone.utc) - timedelta(days=20) - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='okx') + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") # Not available in dry-run - assert exchange.fetch_orders('mocked', start_time) == [] + assert exchange.fetch_orders("mocked", start_time) == [] assert api_mock.fetch_orders.call_count == 0 - default_conf['dry_run'] = False + default_conf["dry_run"] = False - exchange = get_patched_exchange(mocker, default_conf, api_mock, id='okx') + exchange = get_patched_exchange(mocker, default_conf, api_mock, id="okx") def has_resp(_, endpoint): - if endpoint == 'fetchOrders': + if endpoint == "fetchOrders": return False - if endpoint == 'fetchClosedOrders': + if endpoint == "fetchClosedOrders": return True - if endpoint == 'fetchOpenOrders': + if endpoint == "fetchOpenOrders": return True - mocker.patch(f'{EXMS}.exchange_has', has_resp) + mocker.patch(f"{EXMS}.exchange_has", has_resp) - history_params = {'method': 'privateGetTradeOrdersHistoryArchive'} + history_params = {"method": "privateGetTradeOrdersHistoryArchive"} # happy path without fetchOrders - exchange.fetch_orders('mocked', start_time) + exchange.fetch_orders("mocked", start_time) assert api_mock.fetch_orders.call_count == 0 assert api_mock.fetch_open_orders.call_count == 1 assert api_mock.fetch_closed_orders.call_count == 2 - assert 'params' not in api_mock.fetch_closed_orders.call_args_list[0][1] - assert api_mock.fetch_closed_orders.call_args_list[1][1]['params'] == history_params + assert "params" not in api_mock.fetch_closed_orders.call_args_list[0][1] + assert api_mock.fetch_closed_orders.call_args_list[1][1]["params"] == history_params api_mock.fetch_open_orders.reset_mock() api_mock.fetch_closed_orders.reset_mock() # regular closed_orders endpoint only has history for 7 days. - exchange.fetch_orders('mocked', datetime.now(timezone.utc) - timedelta(days=6)) + exchange.fetch_orders("mocked", datetime.now(timezone.utc) - timedelta(days=6)) assert api_mock.fetch_orders.call_count == 0 assert api_mock.fetch_open_orders.call_count == 1 assert api_mock.fetch_closed_orders.call_count == 1 - assert 'params' not in api_mock.fetch_closed_orders.call_args_list[0][1] + assert "params" not in api_mock.fetch_closed_orders.call_args_list[0][1] - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) # Unhappy path - first fetch-orders call fails. api_mock.fetch_orders = MagicMock(side_effect=ccxt.NotSupported()) api_mock.fetch_open_orders.reset_mock() api_mock.fetch_closed_orders.reset_mock() - exchange.fetch_orders('mocked', start_time) + exchange.fetch_orders("mocked", start_time) assert api_mock.fetch_orders.call_count == 1 assert api_mock.fetch_open_orders.call_count == 1 assert api_mock.fetch_closed_orders.call_count == 2 - assert 'params' not in api_mock.fetch_closed_orders.call_args_list[0][1] - assert api_mock.fetch_closed_orders.call_args_list[1][1]['params'] == history_params + assert "params" not in api_mock.fetch_closed_orders.call_args_list[0][1] + assert api_mock.fetch_closed_orders.call_args_list[1][1]["params"] == history_params diff --git a/tests/exchange_online/conftest.py b/tests/exchange_online/conftest.py index acd7d747f..8820ce3e7 100644 --- a/tests/exchange_online/conftest.py +++ b/tests/exchange_online/conftest.py @@ -14,128 +14,148 @@ EXCHANGE_FIXTURE_TYPE = Tuple[Exchange, str] # Exchanges that should be tested online EXCHANGES = { - 'binance': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'use_ci_proxy': True, - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': True, - 'futures_pair': 'BTC/USDT:USDT', - 'hasQuoteVolumeFutures': True, - 'leverage_tiers_public': False, - 'leverage_in_spot_market': False, - 'trades_lookback_hours': 4, - 'private_methods': [ - 'fapiPrivateGetPositionSideDual', - 'fapiPrivateGetMultiAssetsMargin' - ], - 'sample_order': [{ - "symbol": "SOLUSDT", - "orderId": 3551312894, - "orderListId": -1, - "clientOrderId": "x-R4DD3S8297c73a11ccb9dc8f2811ba", - "transactTime": 1674493798550, - "price": "15.50000000", - "origQty": "1.10000000", - "executedQty": "0.00000000", - "cummulativeQuoteQty": "0.00000000", - "status": "NEW", - "timeInForce": "GTC", - "type": "LIMIT", - "side": "BUY", - "workingTime": 1674493798550, - "fills": [], - "selfTradePreventionMode": "NONE", - }] - }, - 'binanceus': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': False, - 'sample_order': [{ - "symbol": "SOLUSDT", - "orderId": 3551312894, - "orderListId": -1, - "clientOrderId": "x-R4DD3S8297c73a11ccb9dc8f2811ba", - "transactTime": 1674493798550, - "price": "15.50000000", - "origQty": "1.10000000", - "executedQty": "0.00000000", - "cummulativeQuoteQty": "0.00000000", - "status": "NEW", - "timeInForce": "GTC", - "type": "LIMIT", - "side": "BUY", - "workingTime": 1674493798550, - "fills": [], - "selfTradePreventionMode": "NONE", - }] - }, - 'kraken': { - 'pair': 'BTC/USD', - 'stake_currency': 'USD', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'leverage_tiers_public': False, - 'leverage_in_spot_market': True, - 'trades_lookback_hours': 12, - }, - 'kucoin': { - 'pair': 'XRP/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'leverage_tiers_public': False, - 'leverage_in_spot_market': True, - 'sample_order': [ - {'id': '63d6742d0adc5570001d2bbf7'}, # create order + "binance": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "use_ci_proxy": True, + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": True, + "futures_pair": "BTC/USDT:USDT", + "hasQuoteVolumeFutures": True, + "leverage_tiers_public": False, + "leverage_in_spot_market": False, + "trades_lookback_hours": 4, + "private_methods": ["fapiPrivateGetPositionSideDual", "fapiPrivateGetMultiAssetsMargin"], + "sample_order": [ { - 'id': '63d6742d0adc5570001d2bbf7', - 'symbol': 'SOL-USDT', - 'opType': 'DEAL', - 'type': 'limit', - 'side': 'buy', - 'price': '15.5', - 'size': '1.1', - 'funds': '0', - 'dealFunds': '17.05', - 'dealSize': '1.1', - 'fee': '0.000065252', - 'feeCurrency': 'USDT', - 'stp': '', - 'stop': '', - 'stopTriggered': False, - 'stopPrice': '0', - 'timeInForce': 'GTC', - 'postOnly': False, - 'hidden': False, - 'iceberg': False, - 'visibleSize': '0', - 'cancelAfter': 0, - 'channel': 'API', - 'clientOid': '0a053870-11bf-41e5-be61-b272a4cb62e1', - 'remark': None, - 'tags': 'partner:ccxt', - 'isActive': False, - 'cancelExist': False, - 'createdAt': 1674493798550, - 'tradeType': 'TRADE' - }], + "symbol": "SOLUSDT", + "orderId": 3551312894, + "orderListId": -1, + "clientOrderId": "x-R4DD3S8297c73a11ccb9dc8f2811ba", + "transactTime": 1674493798550, + "price": "15.50000000", + "origQty": "1.10000000", + "executedQty": "0.00000000", + "cummulativeQuoteQty": "0.00000000", + "status": "NEW", + "timeInForce": "GTC", + "type": "LIMIT", + "side": "BUY", + "workingTime": 1674493798550, + "fills": [], + "selfTradePreventionMode": "NONE", + }, + { + "symbol": "SOLUSDT", + "orderId": 3551312894, + "orderListId": -1, + "clientOrderId": "x-R4DD3S8297c73a11ccb9dc8f2811ba", + "transactTime": 1674493798550, + "price": "15.50000000", + "origQty": "1.10000000", + "executedQty": "1.10000000", + "cummulativeQuoteQty": "17.05", + "status": "FILLED", + "timeInForce": "GTC", + "type": "LIMIT", + "side": "BUY", + "workingTime": 1674493798550, + "fills": [], + "selfTradePreventionMode": "NONE", + }, + ], }, - 'gate': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': True, - 'futures_pair': 'BTC/USDT:USDT', - 'hasQuoteVolumeFutures': True, - 'leverage_tiers_public': True, - 'leverage_in_spot_market': True, - 'sample_order': [ + "binanceus": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": False, + "sample_order": [ + { + "symbol": "SOLUSDT", + "orderId": 3551312894, + "orderListId": -1, + "clientOrderId": "x-R4DD3S8297c73a11ccb9dc8f2811ba", + "transactTime": 1674493798550, + "price": "15.50000000", + "origQty": "1.10000000", + "executedQty": "0.00000000", + "cummulativeQuoteQty": "0.00000000", + "status": "NEW", + "timeInForce": "GTC", + "type": "LIMIT", + "side": "BUY", + "workingTime": 1674493798550, + "fills": [], + "selfTradePreventionMode": "NONE", + } + ], + }, + "kraken": { + "pair": "BTC/USD", + "stake_currency": "USD", + "hasQuoteVolume": True, + "timeframe": "1h", + "leverage_tiers_public": False, + "leverage_in_spot_market": True, + "trades_lookback_hours": 12, + }, + "kucoin": { + "pair": "XRP/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "leverage_tiers_public": False, + "leverage_in_spot_market": True, + "sample_order": [ + {"id": "63d6742d0adc5570001d2bbf7"}, # create order + { + "id": "63d6742d0adc5570001d2bbf7", + "symbol": "SOL-USDT", + "opType": "DEAL", + "type": "limit", + "side": "buy", + "price": "15.5", + "size": "1.1", + "funds": "0", + "dealFunds": "17.05", + "dealSize": "1.1", + "fee": "0.000065252", + "feeCurrency": "USDT", + "stp": "", + "stop": "", + "stopTriggered": False, + "stopPrice": "0", + "timeInForce": "GTC", + "postOnly": False, + "hidden": False, + "iceberg": False, + "visibleSize": "0", + "cancelAfter": 0, + "channel": "API", + "clientOid": "0a053870-11bf-41e5-be61-b272a4cb62e1", + "remark": None, + "tags": "partner:ccxt", + "isActive": False, + "cancelExist": False, + "createdAt": 1674493798550, + "tradeType": "TRADE", + }, + ], + }, + "gate": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": True, + "futures_pair": "BTC/USDT:USDT", + "hasQuoteVolumeFutures": True, + "leverage_tiers_public": True, + "leverage_in_spot_market": True, + "sample_order": [ { "id": "276266139423", "text": "apiv4", @@ -164,65 +184,83 @@ EXCHANGES = { "gt_taker_fee": "0.0015", "gt_discount": True, "rebated_fee": "0", - "rebated_fee_currency": "USDT" + "rebated_fee_currency": "USDT", }, { # market order - 'id': '276401180529', - 'text': 'apiv4', - 'create_time': '1674493798', - 'update_time': '1674493798', - 'create_time_ms': '1674493798550', - 'update_time_ms': '1674493798550', - 'status': 'cancelled', - 'currency_pair': 'SOL_USDT', - 'type': 'market', - 'account': 'spot', - 'side': 'buy', - 'amount': '17.05', - 'price': '0', - 'time_in_force': 'ioc', - 'iceberg': '0', - 'left': '0.0000000016228', - 'fill_price': '17.05', - 'filled_total': '17.05', - 'avg_deal_price': '15.5', - 'fee': '0', - 'fee_currency': 'SOL', - 'point_fee': '0.0199999999967544', - 'gt_fee': '0', - 'gt_maker_fee': '0', - 'gt_taker_fee': '0', - 'gt_discount': False, - 'rebated_fee': '0', - 'rebated_fee_currency': 'USDT' + "id": "276401180529", + "text": "apiv4", + "create_time": "1674493798", + "update_time": "1674493798", + "create_time_ms": "1674493798550", + "update_time_ms": "1674493798550", + "status": "cancelled", + "currency_pair": "SOL_USDT", + "type": "market", + "account": "spot", + "side": "buy", + "amount": "17.05", + "price": "0", + "time_in_force": "ioc", + "iceberg": "0", + "left": "0.0000000016228", + "fill_price": "17.05", + "filled_total": "17.05", + "avg_deal_price": "15.5", + "fee": "0", + "fee_currency": "SOL", + "point_fee": "0.0199999999967544", + "gt_fee": "0", + "gt_maker_fee": "0", + "gt_taker_fee": "0", + "gt_discount": False, + "rebated_fee": "0", + "rebated_fee_currency": "USDT", + }, + ], + "sample_my_trades": [ + { + "id": "123412341234", + "create_time": "167997798", + "create_time_ms": "167997798825.566200", + "currency_pair": "ETH_USDT", + "side": "sell", + "role": "taker", + "amount": "0.0115", + "price": "1712.63", + "order_id": "1234123412", + "fee": "0.0", + "fee_currency": "USDT", + "point_fee": "0.03939049", + "gt_fee": "0.0", + "amend_text": "-", } ], }, - 'okx': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': True, - 'futures_pair': 'BTC/USDT:USDT', - 'hasQuoteVolumeFutures': False, - 'leverage_tiers_public': True, - 'leverage_in_spot_market': True, - 'private_methods': ['fetch_accounts'], + "okx": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": True, + "futures_pair": "BTC/USDT:USDT", + "hasQuoteVolumeFutures": False, + "leverage_tiers_public": True, + "leverage_in_spot_market": True, + "private_methods": ["fetch_accounts"], }, - 'bybit': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'use_ci_proxy': True, - 'timeframe': '1h', - 'futures_pair': 'BTC/USDT:USDT', - 'futures': True, - 'orderbook_max_entries': 50, - 'leverage_tiers_public': True, - 'leverage_in_spot_market': True, - 'sample_order': [ + "bybit": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "use_ci_proxy": True, + "timeframe": "1h", + "futures_pair": "BTC/USDT:USDT", + "futures": True, + "orderbook_max_entries": 50, + "leverage_tiers_public": True, + "leverage_in_spot_market": True, + "sample_order": [ { "orderId": "1274754916287346280", "orderLinkId": "1666798627015730", @@ -236,38 +274,68 @@ EXCHANGES = { "timeInForce": "GTC", "accountId": "5555555", "execQty": "0", - "orderCategory": "0" + "orderCategory": "0", } - ] + ], }, - 'bitmart': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'orderbook_max_entries': 50, + "bitmart": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "orderbook_max_entries": 50, }, - 'htx': { - 'pair': 'ETH/BTC', - 'stake_currency': 'BTC', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': False, + "htx": { + "pair": "ETH/BTC", + "stake_currency": "BTC", + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": False, }, - 'bitvavo': { - 'pair': 'BTC/EUR', - 'stake_currency': 'EUR', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'leverage_tiers_public': False, - 'leverage_in_spot_market': False, + "bitvavo": { + "pair": "BTC/EUR", + "stake_currency": "EUR", + "hasQuoteVolume": True, + "timeframe": "1h", + "leverage_tiers_public": False, + "leverage_in_spot_market": False, }, - 'bingx': { - 'pair': 'BTC/USDT', - 'stake_currency': 'USDT', - 'hasQuoteVolume': True, - 'timeframe': '1h', - 'futures': False, + "bingx": { + "pair": "BTC/USDT", + "stake_currency": "USDT", + "hasQuoteVolume": True, + "timeframe": "1h", + "futures": False, + "sample_order": [ + { + "symbol": "SOL-USDT", + "orderId": "1762393630149869568", + "transactTime": "1674493798550", + "price": "15.5", + "stopPrice": "0", + "origQty": "1.1", + "executedQty": "1.1", + "cummulativeQuoteQty": "17.05", + "status": "FILLED", + "type": "LIMIT", + "side": "BUY", + "clientOrderID": "", + }, + { + "symbol": "SOL-USDT", + "orderId": "1762393630149869568", + "transactTime": "1674493798550", + "price": "15.5", + "stopPrice": "0", + "origQty": "1.1", + "executedQty": "1.1", + "cummulativeQuoteQty": "17.05", + "status": "FILLED", + "type": "MARKET", + "side": "BUY", + "clientOrderID": "", + }, + ], }, } @@ -275,21 +343,22 @@ EXCHANGES = { @pytest.fixture(scope="class") def exchange_conf(): config = get_default_conf_usdt((Path(__file__).parent / "testdata").resolve()) - config['exchange']['pair_whitelist'] = [] - config['exchange']['key'] = '' - config['exchange']['secret'] = '' - config['dry_run'] = False - config['entry_pricing']['use_order_book'] = True - config['exit_pricing']['use_order_book'] = True + config["exchange"]["pair_whitelist"] = [] + config["exchange"]["key"] = "" + config["exchange"]["secret"] = "" + config["dry_run"] = False + config["entry_pricing"]["use_order_book"] = True + config["exit_pricing"]["use_order_book"] = True return config def set_test_proxy(config: Config, use_proxy: bool) -> Config: # Set proxy to test in CI. import os - if use_proxy and (proxy := os.environ.get('CI_WEB_PROXY')): + + if use_proxy and (proxy := os.environ.get("CI_WEB_PROXY")): config1 = deepcopy(config) - config1['exchange']['ccxt_config'] = { + config1["exchange"]["ccxt_config"] = { "httpsProxy": proxy, } return config1 @@ -299,44 +368,45 @@ def set_test_proxy(config: Config, use_proxy: bool) -> Config: def get_exchange(exchange_name, exchange_conf): exchange_conf = set_test_proxy( - exchange_conf, EXCHANGES[exchange_name].get('use_ci_proxy', False)) - exchange_conf['exchange']['name'] = exchange_name - exchange_conf['stake_currency'] = EXCHANGES[exchange_name]['stake_currency'] - exchange = ExchangeResolver.load_exchange(exchange_conf, validate=True, - load_leverage_tiers=True) + exchange_conf, EXCHANGES[exchange_name].get("use_ci_proxy", False) + ) + exchange_conf["exchange"]["name"] = exchange_name + exchange_conf["stake_currency"] = EXCHANGES[exchange_name]["stake_currency"] + exchange = ExchangeResolver.load_exchange( + exchange_conf, validate=True, load_leverage_tiers=True + ) yield exchange, exchange_name def get_futures_exchange(exchange_name, exchange_conf, class_mocker): - if EXCHANGES[exchange_name].get('futures') is not True: + if EXCHANGES[exchange_name].get("futures") is not True: pytest.skip(f"Exchange {exchange_name} does not support futures.") else: exchange_conf = deepcopy(exchange_conf) exchange_conf = set_test_proxy( - exchange_conf, EXCHANGES[exchange_name].get('use_ci_proxy', False)) - exchange_conf['trading_mode'] = 'futures' - exchange_conf['margin_mode'] = 'isolated' + exchange_conf, EXCHANGES[exchange_name].get("use_ci_proxy", False) + ) + exchange_conf["trading_mode"] = "futures" + exchange_conf["margin_mode"] = "isolated" - class_mocker.patch( - 'freqtrade.exchange.binance.Binance.fill_leverage_tiers') - class_mocker.patch(f'{EXMS}.fetch_trading_fees') - class_mocker.patch('freqtrade.exchange.okx.Okx.additional_exchange_init') - class_mocker.patch('freqtrade.exchange.binance.Binance.additional_exchange_init') - class_mocker.patch('freqtrade.exchange.bybit.Bybit.additional_exchange_init') - class_mocker.patch(f'{EXMS}.load_cached_leverage_tiers', return_value=None) - class_mocker.patch(f'{EXMS}.cache_leverage_tiers') + class_mocker.patch("freqtrade.exchange.binance.Binance.fill_leverage_tiers") + class_mocker.patch(f"{EXMS}.fetch_trading_fees") + class_mocker.patch("freqtrade.exchange.okx.Okx.additional_exchange_init") + class_mocker.patch("freqtrade.exchange.binance.Binance.additional_exchange_init") + class_mocker.patch("freqtrade.exchange.bybit.Bybit.additional_exchange_init") + class_mocker.patch(f"{EXMS}.load_cached_leverage_tiers", return_value=None) + class_mocker.patch(f"{EXMS}.cache_leverage_tiers") yield from get_exchange(exchange_name, exchange_conf) @pytest.fixture(params=EXCHANGES, scope="class") def exchange(request, exchange_conf, class_mocker): - class_mocker.patch('freqtrade.exchange.bybit.Bybit.additional_exchange_init') + class_mocker.patch("freqtrade.exchange.bybit.Bybit.additional_exchange_init") yield from get_exchange(request.param, exchange_conf) @pytest.fixture(params=EXCHANGES, scope="class") def exchange_futures(request, exchange_conf, class_mocker): - yield from get_futures_exchange(request.param, exchange_conf, class_mocker) diff --git a/tests/exchange_online/test_ccxt_compat.py b/tests/exchange_online/test_ccxt_compat.py index 370bc8184..49fbfc60d 100644 --- a/tests/exchange_online/test_ccxt_compat.py +++ b/tests/exchange_online/test_ccxt_compat.py @@ -18,38 +18,40 @@ from tests.exchange_online.conftest import EXCHANGE_FIXTURE_TYPE, EXCHANGES @pytest.mark.longrun class TestCCXTExchange: - def test_load_markets(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] + pair = EXCHANGES[exchangename]["pair"] markets = exch.markets assert pair in markets assert isinstance(markets[pair], dict) assert exch.market_is_spot(markets[pair]) def test_has_validations(self, exchange: EXCHANGE_FIXTURE_TYPE): - exch, exchangename = exchange - exch.validate_ordertypes({ - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - }) + exch.validate_ordertypes( + { + "entry": "limit", + "exit": "limit", + "stoploss": "limit", + } + ) - if exchangename == 'gate': + if exchangename == "gate": # gate doesn't have market orders on spot return - exch.validate_ordertypes({ - 'entry': 'market', - 'exit': 'market', - 'stoploss': 'market', - }) + exch.validate_ordertypes( + { + "entry": "market", + "exit": "market", + "stoploss": "market", + } + ) def test_load_markets_futures(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): exchange, exchangename = exchange_futures - pair = EXCHANGES[exchangename]['pair'] - pair = EXCHANGES[exchangename].get('futures_pair', pair) + pair = EXCHANGES[exchangename]["pair"] + pair = EXCHANGES[exchangename].get("futures_pair", pair) markets = exchange.markets assert pair in markets assert isinstance(markets[pair], dict) @@ -58,90 +60,118 @@ class TestCCXTExchange: def test_ccxt_order_parse(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchange_name = exchange - if orders := EXCHANGES[exchange_name].get('sample_order'): - pair = 'SOL/USDT' + if orders := EXCHANGES[exchange_name].get("sample_order"): + pair = "SOL/USDT" for order in orders: market = exch._api.markets[pair] po = exch._api.parse_order(order, market) - assert isinstance(po['id'], str) - assert po['id'] is not None + assert isinstance(po["id"], str) + assert po["id"] is not None if len(order.keys()) < 5: # Kucoin case - assert po['status'] is None + assert po["status"] is None continue - assert po['timestamp'] == 1674493798550 - assert isinstance(po['datetime'], str) - assert isinstance(po['timestamp'], int) - assert isinstance(po['price'], float) - assert po['price'] == 15.5 - if po['average'] is not None: - assert isinstance(po['average'], float) - assert po['average'] == 15.5 - assert po['symbol'] == pair - assert isinstance(po['amount'], float) - assert po['amount'] == 1.1 - assert isinstance(po['status'], str) + assert po["timestamp"] == 1674493798550 + assert isinstance(po["datetime"], str) + assert isinstance(po["timestamp"], int) + assert isinstance(po["price"], float) + assert po["price"] == 15.5 + if po["status"] == "closed": + # Filled orders should have average assigned. + assert isinstance(po["average"], float) + assert po["average"] == 15.5 + assert po["symbol"] == pair + assert isinstance(po["amount"], float) + assert po["amount"] == 1.1 + assert isinstance(po["status"], str) else: pytest.skip(f"No sample order available for exchange {exchange_name}") + def test_ccxt_my_trades_parse(self, exchange: EXCHANGE_FIXTURE_TYPE): + exch, exchange_name = exchange + if trades := EXCHANGES[exchange_name].get("sample_my_trades"): + pair = "SOL/USDT" + for trade in trades: + market = exch._api.markets[pair] + po = exch._api.parse_trade(trade) + (trade, market) + assert isinstance(po["id"], str) + assert isinstance(po["side"], str) + assert isinstance(po["amount"], float) + assert isinstance(po["price"], float) + assert isinstance(po["datetime"], str) + assert isinstance(po["timestamp"], int) + + if fees := po.get("fees"): + assert isinstance(fees, list) + for fee in fees: + assert isinstance(fee, dict) + assert isinstance(fee["cost"], str) + # TODO: this should be a float! + # assert isinstance(fee["cost"], float) + assert isinstance(fee["currency"], str) + + else: + pytest.skip(f"No sample Trades available for exchange {exchange_name}") + def test_ccxt_fetch_tickers(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] + pair = EXCHANGES[exchangename]["pair"] tickers = exch.get_tickers() assert pair in tickers - assert 'ask' in tickers[pair] - assert tickers[pair]['ask'] is not None - assert 'bid' in tickers[pair] - assert tickers[pair]['bid'] is not None - assert 'quoteVolume' in tickers[pair] - if EXCHANGES[exchangename].get('hasQuoteVolume'): - assert tickers[pair]['quoteVolume'] is not None + assert "ask" in tickers[pair] + assert tickers[pair]["ask"] is not None + assert "bid" in tickers[pair] + assert tickers[pair]["bid"] is not None + assert "quoteVolume" in tickers[pair] + if EXCHANGES[exchangename].get("hasQuoteVolume"): + assert tickers[pair]["quoteVolume"] is not None def test_ccxt_fetch_tickers_futures(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange_futures - if not exch or exchangename in ('gate'): + if not exch or exchangename in ("gate"): # exchange_futures only returns values for supported exchanges return - pair = EXCHANGES[exchangename]['pair'] - pair = EXCHANGES[exchangename].get('futures_pair', pair) + pair = EXCHANGES[exchangename]["pair"] + pair = EXCHANGES[exchangename].get("futures_pair", pair) tickers = exch.get_tickers() assert pair in tickers - assert 'ask' in tickers[pair] - assert tickers[pair]['ask'] is not None - assert 'bid' in tickers[pair] - assert tickers[pair]['bid'] is not None - assert 'quoteVolume' in tickers[pair] - if EXCHANGES[exchangename].get('hasQuoteVolumeFutures'): - assert tickers[pair]['quoteVolume'] is not None + assert "ask" in tickers[pair] + assert tickers[pair]["ask"] is not None + assert "bid" in tickers[pair] + assert tickers[pair]["bid"] is not None + assert "quoteVolume" in tickers[pair] + if EXCHANGES[exchangename].get("hasQuoteVolumeFutures"): + assert tickers[pair]["quoteVolume"] is not None def test_ccxt_fetch_ticker(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] + pair = EXCHANGES[exchangename]["pair"] ticker = exch.fetch_ticker(pair) - assert 'ask' in ticker - assert ticker['ask'] is not None - assert 'bid' in ticker - assert ticker['bid'] is not None - assert 'quoteVolume' in ticker - if EXCHANGES[exchangename].get('hasQuoteVolume'): - assert ticker['quoteVolume'] is not None + assert "ask" in ticker + assert ticker["ask"] is not None + assert "bid" in ticker + assert ticker["bid"] is not None + assert "quoteVolume" in ticker + if EXCHANGES[exchangename].get("hasQuoteVolume"): + assert ticker["quoteVolume"] is not None def test_ccxt_fetch_l2_orderbook(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] + pair = EXCHANGES[exchangename]["pair"] l2 = exch.fetch_l2_order_book(pair) - orderbook_max_entries = EXCHANGES[exchangename].get('orderbook_max_entries') - assert 'asks' in l2 - assert 'bids' in l2 - assert len(l2['asks']) >= 1 - assert len(l2['bids']) >= 1 - l2_limit_range = exch._ft_has['l2_limit_range'] - l2_limit_range_required = exch._ft_has['l2_limit_range_required'] - if exchangename == 'gate': + orderbook_max_entries = EXCHANGES[exchangename].get("orderbook_max_entries") + assert "asks" in l2 + assert "bids" in l2 + assert len(l2["asks"]) >= 1 + assert len(l2["bids"]) >= 1 + l2_limit_range = exch._ft_has["l2_limit_range"] + l2_limit_range_required = exch._ft_has["l2_limit_range_required"] + if exchangename == "gate": # TODO: Gate is unstable here at the moment, ignoring the limit partially. return for val in [1, 2, 5, 25, 50, 100]: @@ -151,29 +181,30 @@ class TestCCXTExchange: if not l2_limit_range or val in l2_limit_range: if val > 50: # Orderbooks are not always this deep. - assert val - 5 < len(l2['asks']) <= val - assert val - 5 < len(l2['bids']) <= val + assert val - 5 < len(l2["asks"]) <= val + assert val - 5 < len(l2["bids"]) <= val else: - assert len(l2['asks']) == val - assert len(l2['bids']) == val + assert len(l2["asks"]) == val + assert len(l2["bids"]) == val else: next_limit = exch.get_next_limit_in_list( - val, l2_limit_range, l2_limit_range_required) + val, l2_limit_range, l2_limit_range_required + ) if next_limit is None: - assert len(l2['asks']) > 100 - assert len(l2['asks']) > 100 + assert len(l2["asks"]) > 100 + assert len(l2["asks"]) > 100 elif next_limit > 200: # Large orderbook sizes can be a problem for some exchanges (bitrex ...) - assert len(l2['asks']) > 200 - assert len(l2['asks']) > 200 + assert len(l2["asks"]) > 200 + assert len(l2["asks"]) > 200 else: - assert len(l2['asks']) == next_limit - assert len(l2['asks']) == next_limit + assert len(l2["asks"]) == next_limit + assert len(l2["asks"]) == next_limit def test_ccxt_fetch_ohlcv(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] - timeframe = EXCHANGES[exchangename]['timeframe'] + pair = EXCHANGES[exchangename]["pair"] + timeframe = EXCHANGES[exchangename]["timeframe"] pair_tf = (pair, timeframe, CandleType.SPOT) @@ -182,19 +213,20 @@ class TestCCXTExchange: assert len(ohlcv[pair_tf]) == len(exch.klines(pair_tf)) # assert len(exch.klines(pair_tf)) > 200 # Assume 90% uptime ... - assert len(exch.klines(pair_tf)) > exch.ohlcv_candle_limit( - timeframe, CandleType.SPOT) * 0.90 + assert ( + len(exch.klines(pair_tf)) > exch.ohlcv_candle_limit(timeframe, CandleType.SPOT) * 0.90 + ) # Check if last-timeframe is within the last 2 intervals now = datetime.now(timezone.utc) - timedelta(minutes=(timeframe_to_minutes(timeframe) * 2)) - assert exch.klines(pair_tf).iloc[-1]['date'] >= timeframe_to_prev_date(timeframe, now) + assert exch.klines(pair_tf).iloc[-1]["date"] >= timeframe_to_prev_date(timeframe, now) def test_ccxt_fetch_ohlcv_startdate(self, exchange: EXCHANGE_FIXTURE_TYPE): """ Test that pair data starts at the provided startdate """ exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] - timeframe = '1d' + pair = EXCHANGES[exchangename]["pair"] + timeframe = "1d" pair_tf = (pair, timeframe, CandleType.SPOT) # last 5 days ... @@ -204,25 +236,22 @@ class TestCCXTExchange: assert len(ohlcv[pair_tf]) == len(exch.klines(pair_tf)) # Check if last-timeframe is within the last 2 intervals now = datetime.now(timezone.utc) - timedelta(minutes=(timeframe_to_minutes(timeframe) * 2)) - assert exch.klines(pair_tf).iloc[-1]['date'] >= timeframe_to_prev_date(timeframe, now) - assert exch.klines(pair_tf)['date'].astype(int).iloc[0] // 1e6 == since_ms + assert exch.klines(pair_tf).iloc[-1]["date"] >= timeframe_to_prev_date(timeframe, now) + assert exch.klines(pair_tf)["date"].astype(int).iloc[0] // 1e6 == since_ms def ccxt__async_get_candle_history( - self, exchange, exchangename, pair, timeframe, candle_type, factor=0.9): - + self, exchange, exchangename, pair, timeframe, candle_type, factor=0.9 + ): timeframe_ms = timeframe_to_msecs(timeframe) - now = timeframe_to_prev_date( - timeframe, datetime.now(timezone.utc)) + now = timeframe_to_prev_date(timeframe, datetime.now(timezone.utc)) for offset in (360, 120, 30, 10, 5, 2): since = now - timedelta(days=offset) since_ms = int(since.timestamp() * 1000) - res = exchange.loop.run_until_complete(exchange._async_get_candle_history( - pair=pair, - timeframe=timeframe, - since_ms=since_ms, - candle_type=candle_type - ) + res = exchange.loop.run_until_complete( + exchange._async_get_candle_history( + pair=pair, timeframe=timeframe, since_ms=since_ms, candle_type=candle_type + ) ) assert res assert res[0] == pair @@ -231,34 +260,39 @@ class TestCCXTExchange: candles = res[3] candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * factor candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms * factor - assert len(candles) >= min(candle_count, candle_count1), \ - f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}" + assert len(candles) >= min( + candle_count, candle_count1 + ), f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}" # Check if first-timeframe is either the start, or start + 1 assert candles[0][0] == since_ms or (since_ms + timeframe_ms) def test_ccxt__async_get_candle_history(self, exchange: EXCHANGE_FIXTURE_TYPE): exc, exchangename = exchange - if not exc._ft_has['ohlcv_has_history']: + if not exc._ft_has["ohlcv_has_history"]: pytest.skip("Exchange does not support candle history") - pair = EXCHANGES[exchangename]['pair'] - timeframe = EXCHANGES[exchangename]['timeframe'] - self.ccxt__async_get_candle_history( - exc, exchangename, pair, timeframe, CandleType.SPOT) + pair = EXCHANGES[exchangename]["pair"] + timeframe = EXCHANGES[exchangename]["timeframe"] + self.ccxt__async_get_candle_history(exc, exchangename, pair, timeframe, CandleType.SPOT) - @pytest.mark.parametrize('candle_type', [ - CandleType.FUTURES, - CandleType.FUNDING_RATE, - CandleType.MARK, - ]) + @pytest.mark.parametrize( + "candle_type", + [ + CandleType.FUTURES, + CandleType.FUNDING_RATE, + CandleType.MARK, + ], + ) def test_ccxt__async_get_candle_history_futures( - self, exchange_futures: EXCHANGE_FIXTURE_TYPE, candle_type): + self, exchange_futures: EXCHANGE_FIXTURE_TYPE, candle_type + ): exchange, exchangename = exchange_futures - pair = EXCHANGES[exchangename].get('futures_pair', EXCHANGES[exchangename]['pair']) - timeframe = EXCHANGES[exchangename]['timeframe'] + pair = EXCHANGES[exchangename].get("futures_pair", EXCHANGES[exchangename]["pair"]) + timeframe = EXCHANGES[exchangename]["timeframe"] if candle_type == CandleType.FUNDING_RATE: - timeframe = exchange._ft_has.get('funding_fee_timeframe', - exchange._ft_has['mark_ohlcv_timeframe']) + timeframe = exchange._ft_has.get( + "funding_fee_timeframe", exchange._ft_has["mark_ohlcv_timeframe"] + ) self.ccxt__async_get_candle_history( exchange, exchangename, @@ -270,16 +304,16 @@ class TestCCXTExchange: def test_ccxt_fetch_funding_rate_history(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): exchange, exchangename = exchange_futures - pair = EXCHANGES[exchangename].get('futures_pair', EXCHANGES[exchangename]['pair']) + pair = EXCHANGES[exchangename].get("futures_pair", EXCHANGES[exchangename]["pair"]) since = int((datetime.now(timezone.utc) - timedelta(days=5)).timestamp() * 1000) - timeframe_ff = exchange._ft_has.get('funding_fee_timeframe', - exchange._ft_has['mark_ohlcv_timeframe']) + timeframe_ff = exchange._ft_has.get( + "funding_fee_timeframe", exchange._ft_has["mark_ohlcv_timeframe"] + ) pair_tf = (pair, timeframe_ff, CandleType.FUNDING_RATE) funding_ohlcv = exchange.refresh_latest_ohlcv( - [pair_tf], - since_ms=since, - drop_incomplete=False) + [pair_tf], since_ms=since, drop_incomplete=False + ) assert isinstance(funding_ohlcv, dict) rate = funding_ohlcv[pair_tf] @@ -288,61 +322,58 @@ class TestCCXTExchange: hour1 = timeframe_to_prev_date(timeframe_ff, this_hour - timedelta(minutes=1)) hour2 = timeframe_to_prev_date(timeframe_ff, hour1 - timedelta(minutes=1)) hour3 = timeframe_to_prev_date(timeframe_ff, hour2 - timedelta(minutes=1)) - val0 = rate[rate['date'] == this_hour].iloc[0]['open'] - val1 = rate[rate['date'] == hour1].iloc[0]['open'] - val2 = rate[rate['date'] == hour2].iloc[0]['open'] - val3 = rate[rate['date'] == hour3].iloc[0]['open'] + val0 = rate[rate["date"] == this_hour].iloc[0]["open"] + val1 = rate[rate["date"] == hour1].iloc[0]["open"] + val2 = rate[rate["date"] == hour2].iloc[0]["open"] + val3 = rate[rate["date"] == hour3].iloc[0]["open"] # Test For last 4 hours # Avoids random test-failure when funding-fees are 0 for a few hours. assert val0 != 0.0 or val1 != 0.0 or val2 != 0.0 or val3 != 0.0 # We expect funding rates to be different from 0.0 - or moving around. assert ( - rate['open'].max() != 0.0 or rate['open'].min() != 0.0 or - (rate['open'].min() != rate['open'].max()) + rate["open"].max() != 0.0 + or rate["open"].min() != 0.0 + or (rate["open"].min() != rate["open"].max()) ) def test_ccxt_fetch_mark_price_history(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): exchange, exchangename = exchange_futures - pair = EXCHANGES[exchangename].get('futures_pair', EXCHANGES[exchangename]['pair']) + pair = EXCHANGES[exchangename].get("futures_pair", EXCHANGES[exchangename]["pair"]) since = int((datetime.now(timezone.utc) - timedelta(days=5)).timestamp() * 1000) - pair_tf = (pair, '1h', CandleType.MARK) + pair_tf = (pair, "1h", CandleType.MARK) - mark_ohlcv = exchange.refresh_latest_ohlcv( - [pair_tf], - since_ms=since, - drop_incomplete=False) + mark_ohlcv = exchange.refresh_latest_ohlcv([pair_tf], since_ms=since, drop_incomplete=False) assert isinstance(mark_ohlcv, dict) - expected_tf = '1h' + expected_tf = "1h" mark_candles = mark_ohlcv[pair_tf] this_hour = timeframe_to_prev_date(expected_tf) prev_hour = timeframe_to_prev_date(expected_tf, this_hour - timedelta(minutes=1)) - assert mark_candles[mark_candles['date'] == prev_hour].iloc[0]['open'] != 0.0 - assert mark_candles[mark_candles['date'] == this_hour].iloc[0]['open'] != 0.0 + assert mark_candles[mark_candles["date"] == prev_hour].iloc[0]["open"] != 0.0 + assert mark_candles[mark_candles["date"] == this_hour].iloc[0]["open"] != 0.0 def test_ccxt__calculate_funding_fees(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): exchange, exchangename = exchange_futures - pair = EXCHANGES[exchangename].get('futures_pair', EXCHANGES[exchangename]['pair']) + pair = EXCHANGES[exchangename].get("futures_pair", EXCHANGES[exchangename]["pair"]) since = datetime.now(timezone.utc) - timedelta(days=5) funding_fee = exchange._fetch_and_calculate_funding_fees( - pair, 20, is_short=False, open_date=since) + pair, 20, is_short=False, open_date=since + ) assert isinstance(funding_fee, float) # assert funding_fee > 0 def test_ccxt__async_get_trade_history(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - if not (lookback := EXCHANGES[exchangename].get('trades_lookback_hours')): - pytest.skip('test_fetch_trades not enabled for this exchange') - pair = EXCHANGES[exchangename]['pair'] + if not (lookback := EXCHANGES[exchangename].get("trades_lookback_hours")): + pytest.skip("test_fetch_trades not enabled for this exchange") + pair = EXCHANGES[exchangename]["pair"] since = int((datetime.now(timezone.utc) - timedelta(hours=lookback)).timestamp() * 1000) - res = exch.loop.run_until_complete( - exch._async_get_trade_history(pair, since, None, None) - ) + res = exch.loop.run_until_complete(exch._async_get_trade_history(pair, since, None, None)) assert len(res) == 2 res_pair, res_trades = res assert res_pair == pair @@ -352,85 +383,73 @@ class TestCCXTExchange: def test_ccxt_get_fee(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - pair = EXCHANGES[exchangename]['pair'] + pair = EXCHANGES[exchangename]["pair"] threshold = 0.01 - assert 0 < exch.get_fee(pair, 'limit', 'buy') < threshold - assert 0 < exch.get_fee(pair, 'limit', 'sell') < threshold - assert 0 < exch.get_fee(pair, 'market', 'buy') < threshold - assert 0 < exch.get_fee(pair, 'market', 'sell') < threshold + assert 0 < exch.get_fee(pair, "limit", "buy") < threshold + assert 0 < exch.get_fee(pair, "limit", "sell") < threshold + assert 0 < exch.get_fee(pair, "market", "buy") < threshold + assert 0 < exch.get_fee(pair, "market", "sell") < threshold def test_ccxt_get_max_leverage_spot(self, exchange: EXCHANGE_FIXTURE_TYPE): spot, spot_name = exchange if spot: - leverage_in_market_spot = EXCHANGES[spot_name].get('leverage_in_spot_market') + leverage_in_market_spot = EXCHANGES[spot_name].get("leverage_in_spot_market") if leverage_in_market_spot: - spot_pair = EXCHANGES[spot_name].get('pair', EXCHANGES[spot_name]['pair']) + spot_pair = EXCHANGES[spot_name].get("pair", EXCHANGES[spot_name]["pair"]) spot_leverage = spot.get_max_leverage(spot_pair, 20) - assert (isinstance(spot_leverage, float) or isinstance(spot_leverage, int)) + assert isinstance(spot_leverage, float) or isinstance(spot_leverage, int) assert spot_leverage >= 1.0 def test_ccxt_get_max_leverage_futures(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): futures, futures_name = exchange_futures - leverage_tiers_public = EXCHANGES[futures_name].get('leverage_tiers_public') + leverage_tiers_public = EXCHANGES[futures_name].get("leverage_tiers_public") if leverage_tiers_public: futures_pair = EXCHANGES[futures_name].get( - 'futures_pair', - EXCHANGES[futures_name]['pair'] + "futures_pair", EXCHANGES[futures_name]["pair"] ) futures_leverage = futures.get_max_leverage(futures_pair, 20) - assert (isinstance(futures_leverage, float) or isinstance(futures_leverage, int)) + assert isinstance(futures_leverage, float) or isinstance(futures_leverage, int) assert futures_leverage >= 1.0 def test_ccxt_get_contract_size(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): futures, futures_name = exchange_futures - futures_pair = EXCHANGES[futures_name].get( - 'futures_pair', - EXCHANGES[futures_name]['pair'] - ) + futures_pair = EXCHANGES[futures_name].get("futures_pair", EXCHANGES[futures_name]["pair"]) contract_size = futures.get_contract_size(futures_pair) - assert (isinstance(contract_size, float) or isinstance(contract_size, int)) + assert isinstance(contract_size, float) or isinstance(contract_size, int) assert contract_size >= 0.0 def test_ccxt_load_leverage_tiers(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): futures, futures_name = exchange_futures - if EXCHANGES[futures_name].get('leverage_tiers_public'): + if EXCHANGES[futures_name].get("leverage_tiers_public"): leverage_tiers = futures.load_leverage_tiers() futures_pair = EXCHANGES[futures_name].get( - 'futures_pair', - EXCHANGES[futures_name]['pair'] + "futures_pair", EXCHANGES[futures_name]["pair"] ) - assert (isinstance(leverage_tiers, dict)) + assert isinstance(leverage_tiers, dict) assert futures_pair in leverage_tiers pair_tiers = leverage_tiers[futures_pair] assert len(pair_tiers) > 0 - oldLeverage = float('inf') + oldLeverage = float("inf") oldMaintenanceMarginRate = oldminNotional = oldmaxNotional = -1 for tier in pair_tiers: - for key in [ - 'maintenanceMarginRate', - 'minNotional', - 'maxNotional', - 'maxLeverage' - ]: + for key in ["maintenanceMarginRate", "minNotional", "maxNotional", "maxLeverage"]: assert key in tier assert tier[key] >= 0.0 - assert tier['maxNotional'] > tier['minNotional'] - assert tier['maxLeverage'] <= oldLeverage - assert tier['maintenanceMarginRate'] >= oldMaintenanceMarginRate - assert tier['minNotional'] > oldminNotional - assert tier['maxNotional'] > oldmaxNotional - oldLeverage = tier['maxLeverage'] - oldMaintenanceMarginRate = tier['maintenanceMarginRate'] - oldminNotional = tier['minNotional'] - oldmaxNotional = tier['maxNotional'] + assert tier["maxNotional"] > tier["minNotional"] + assert tier["maxLeverage"] <= oldLeverage + assert tier["maintenanceMarginRate"] >= oldMaintenanceMarginRate + assert tier["minNotional"] > oldminNotional + assert tier["maxNotional"] > oldmaxNotional + oldLeverage = tier["maxLeverage"] + oldMaintenanceMarginRate = tier["maintenanceMarginRate"] + oldminNotional = tier["minNotional"] + oldmaxNotional = tier["maxNotional"] def test_ccxt_dry_run_liquidation_price(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): futures, futures_name = exchange_futures - if EXCHANGES[futures_name].get('leverage_tiers_public'): - + if EXCHANGES[futures_name].get("leverage_tiers_public"): futures_pair = EXCHANGES[futures_name].get( - 'futures_pair', - EXCHANGES[futures_name]['pair'] + "futures_pair", EXCHANGES[futures_name]["pair"] ) liquidation_price = futures.dry_run_liquidation_price( @@ -442,7 +461,7 @@ class TestCCXTExchange: leverage=5, wallet_balance=100, ) - assert (isinstance(liquidation_price, float)) + assert isinstance(liquidation_price, float) assert liquidation_price >= 0.0 liquidation_price = futures.dry_run_liquidation_price( @@ -454,20 +473,17 @@ class TestCCXTExchange: leverage=5, wallet_balance=100, ) - assert (isinstance(liquidation_price, float)) + assert isinstance(liquidation_price, float) assert liquidation_price >= 0.0 def test_ccxt_get_max_pair_stake_amount(self, exchange_futures: EXCHANGE_FIXTURE_TYPE): futures, futures_name = exchange_futures - futures_pair = EXCHANGES[futures_name].get( - 'futures_pair', - EXCHANGES[futures_name]['pair'] - ) + futures_pair = EXCHANGES[futures_name].get("futures_pair", EXCHANGES[futures_name]["pair"]) max_stake_amount = futures.get_max_pair_stake_amount(futures_pair, 40000) - assert (isinstance(max_stake_amount, float)) + assert isinstance(max_stake_amount, float) assert max_stake_amount >= 0.0 def test_private_method_presence(self, exchange: EXCHANGE_FIXTURE_TYPE): exch, exchangename = exchange - for method in EXCHANGES[exchangename].get('private_methods', []): + for method in EXCHANGES[exchangename].get("private_methods", []): assert hasattr(exch._api, method) diff --git a/tests/freqai/conftest.py b/tests/freqai/conftest.py index 4a1976d9d..fce01b9ee 100644 --- a/tests/freqai/conftest.py +++ b/tests/freqai/conftest.py @@ -32,13 +32,12 @@ def is_arm() -> bool: @pytest.fixture(autouse=True) def patch_torch_initlogs(mocker) -> None: - if is_mac(): # Mock torch import completely import sys import types - module_name = 'torch' + module_name = "torch" mocked_module = types.ModuleType(module_name) sys.modules[module_name] = mocked_module else: @@ -80,25 +79,23 @@ def freqai_conf(default_conf, tmp_path): "stratify_training_data": 0, "indicator_periods_candles": [10], "shuffle_after_split": False, - "buffer_train_data_candles": 0 + "buffer_train_data_candles": 0, }, "data_split_parameters": {"test_size": 0.33, "shuffle": False}, "model_training_parameters": {"n_estimators": 100}, }, - "config_files": [Path('config_examples', 'config_freqai.example.json')] + "config_files": [Path("config_examples", "config_freqai.example.json")], } ) - freqaiconf['exchange'].update({'pair_whitelist': ['ADA/BTC', 'DASH/BTC', 'ETH/BTC', 'LTC/BTC']}) + freqaiconf["exchange"].update({"pair_whitelist": ["ADA/BTC", "DASH/BTC", "ETH/BTC", "LTC/BTC"]}) return freqaiconf def make_rl_config(conf): conf.update({"strategy": "freqai_rl_test_strat"}) - conf["freqai"].update({"model_training_parameters": { - "learning_rate": 0.00025, - "gamma": 0.9, - "verbose": 1 - }}) + conf["freqai"].update( + {"model_training_parameters": {"learning_rate": 0.00025, "gamma": 0.9, "verbose": 1}} + ) conf["freqai"]["rl_config"] = { "train_cycles": 1, "thread_count": 2, @@ -107,31 +104,27 @@ def make_rl_config(conf): "policy_type": "MlpPolicy", "max_training_drawdown_pct": 0.5, "net_arch": [32, 32], - "model_reward_parameters": { - "rr": 1, - "profit_aim": 0.02, - "win_reward_factor": 2 - }, - "drop_ohlc_from_features": False - } + "model_reward_parameters": {"rr": 1, "profit_aim": 0.02, "win_reward_factor": 2}, + "drop_ohlc_from_features": False, + } return conf def mock_pytorch_mlp_model_training_parameters() -> Dict[str, Any]: return { - "learning_rate": 3e-4, - "trainer_kwargs": { - "n_steps": None, - "batch_size": 64, - "n_epochs": 1, - }, - "model_kwargs": { - "hidden_dim": 32, - "dropout_percent": 0.2, - "n_layer": 1, - } - } + "learning_rate": 3e-4, + "trainer_kwargs": { + "n_steps": None, + "batch_size": 64, + "n_epochs": 1, + }, + "model_kwargs": { + "hidden_dim": 32, + "dropout_percent": 0.2, + "n_layer": 1, + }, + } def get_patched_data_kitchen(mocker, freqaiconf): @@ -178,14 +171,14 @@ def make_unfiltered_dataframe(mocker, freqai_conf): new_timerange = TimeRange.parse_timerange("20180120-20180130") corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( - data_load_timerange, freqai.dk.pair, freqai.dk - ) + data_load_timerange, freqai.dk.pair, freqai.dk + ) unfiltered_dataframe = freqai.dk.use_strategy_to_populate_indicators( - strategy, corr_dataframes, base_dataframes, freqai.dk.pair - ) + strategy, corr_dataframes, base_dataframes, freqai.dk.pair + ) for i in range(5): - unfiltered_dataframe[f'constant_{i}'] = i + unfiltered_dataframe[f"constant_{i}"] = i unfiltered_dataframe = freqai.dk.slice_dataframe(new_timerange, unfiltered_dataframe) @@ -212,23 +205,23 @@ def make_data_dictionary(mocker, freqai_conf): new_timerange = TimeRange.parse_timerange("20180120-20180130") corr_dataframes, base_dataframes = freqai.dd.get_base_and_corr_dataframes( - data_load_timerange, freqai.dk.pair, freqai.dk - ) + data_load_timerange, freqai.dk.pair, freqai.dk + ) unfiltered_dataframe = freqai.dk.use_strategy_to_populate_indicators( - strategy, corr_dataframes, base_dataframes, freqai.dk.pair - ) + strategy, corr_dataframes, base_dataframes, freqai.dk.pair + ) unfiltered_dataframe = freqai.dk.slice_dataframe(new_timerange, unfiltered_dataframe) freqai.dk.find_features(unfiltered_dataframe) features_filtered, labels_filtered = freqai.dk.filter_features( - unfiltered_dataframe, - freqai.dk.training_features_list, - freqai.dk.label_list, - training_filter=True, - ) + unfiltered_dataframe, + freqai.dk.training_features_list, + freqai.dk.label_list, + training_filter=True, + ) data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered) @@ -247,8 +240,8 @@ def get_freqai_live_analyzed_dataframe(mocker, freqaiconf): timerange = TimeRange.parse_timerange("20180110-20180114") freqai.dk.load_all_pair_histories(timerange) - strategy.analyze_pair('ADA/BTC', '5m') - return strategy.dp.get_analyzed_dataframe('ADA/BTC', '5m') + strategy.analyze_pair("ADA/BTC", "5m") + return strategy.dp.get_analyzed_dataframe("ADA/BTC", "5m") def get_freqai_analyzed_dataframe(mocker, freqaiconf): @@ -264,7 +257,7 @@ def get_freqai_analyzed_dataframe(mocker, freqaiconf): sub_timerange = TimeRange.parse_timerange("20180111-20180114") corr_df, base_df = freqai.dk.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC") - return freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC') + return freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") def get_ready_to_train(mocker, freqaiconf): diff --git a/tests/freqai/test_freqai_backtesting.py b/tests/freqai/test_freqai_backtesting.py index 808f37ce5..e689d3927 100644 --- a/tests/freqai/test_freqai_backtesting.py +++ b/tests/freqai/test_freqai_backtesting.py @@ -14,8 +14,14 @@ from freqtrade.enums.candletype import CandleType from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.optimize.backtesting import Backtesting -from tests.conftest import (CURRENT_TEST_STRATEGY, get_args, get_patched_exchange, log_has_re, - patch_exchange, patched_configuration_load_config_file) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + get_args, + get_patched_exchange, + log_has_re, + patch_exchange, + patched_configuration_load_config_file, +) from tests.freqai.conftest import get_patched_freqai_strategy @@ -23,26 +29,34 @@ def test_freqai_backtest_start_backtest_list(freqai_conf, mocker, testdatadir, c patch_exchange(mocker) now = datetime.now(timezone.utc) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT'])) - mocker.patch('freqtrade.optimize.backtesting.history.load_data') - mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["HULUMULU/USDT", "XRP/USDT"]), + ) + mocker.patch("freqtrade.optimize.backtesting.history.load_data") + mocker.patch("freqtrade.optimize.backtesting.history.get_timerange", return_value=(now, now)) patched_configuration_load_config_file(mocker, freqai_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1m', - '--strategy-list', CURRENT_TEST_STRATEGY + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1m", + "--strategy-list", + CURRENT_TEST_STRATEGY, ] args = get_args(args) bt_config = setup_optimize_configuration(args, RunMode.BACKTEST) Backtesting(bt_config) - assert log_has_re('Using --strategy-list with FreqAI REQUIRES all strategies to have identical', - caplog) + assert log_has_re( + "Using --strategy-list with FreqAI REQUIRES all strategies to have identical", caplog + ) Backtesting.cleanup() @@ -54,23 +68,29 @@ def test_freqai_backtest_start_backtest_list(freqai_conf, mocker, testdatadir, c ("1d", 302), ], ) -def test_freqai_backtest_load_data(freqai_conf, mocker, caplog, - timeframe, expected_startup_candle_count): +def test_freqai_backtest_load_data( + freqai_conf, mocker, caplog, timeframe, expected_startup_candle_count +): patch_exchange(mocker) now = datetime.now(timezone.utc) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT'])) - mocker.patch('freqtrade.optimize.backtesting.history.load_data') - mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) - freqai_conf['timeframe'] = timeframe - freqai_conf.get('freqai', {}).get('feature_parameters', {}).update({'include_timeframes': []}) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["HULUMULU/USDT", "XRP/USDT"]), + ) + mocker.patch("freqtrade.optimize.backtesting.history.load_data") + mocker.patch("freqtrade.optimize.backtesting.history.get_timerange", return_value=(now, now)) + freqai_conf["timeframe"] = timeframe + freqai_conf.get("freqai", {}).get("feature_parameters", {}).update({"include_timeframes": []}) backtesting = Backtesting(deepcopy(freqai_conf)) backtesting.load_bt_data() - assert log_has_re(f'Increasing startup_candle_count for freqai on {timeframe} ' - f'to {expected_startup_candle_count}', caplog) - assert history.load_data.call_args[1]['startup_candles'] == expected_startup_candle_count + assert log_has_re( + f"Increasing startup_candle_count for freqai on {timeframe} " + f"to {expected_startup_candle_count}", + caplog, + ) + assert history.load_data.call_args[1]["startup_candles"] == expected_startup_candle_count Backtesting.cleanup() @@ -79,45 +99,55 @@ def test_freqai_backtest_live_models_model_not_found(freqai_conf, mocker, testda patch_exchange(mocker) now = datetime.now(timezone.utc) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT'])) - mocker.patch('freqtrade.optimize.backtesting.history.load_data') - mocker.patch('freqtrade.optimize.backtesting.history.get_timerange', return_value=(now, now)) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["HULUMULU/USDT", "XRP/USDT"]), + ) + mocker.patch("freqtrade.optimize.backtesting.history.load_data") + mocker.patch("freqtrade.optimize.backtesting.history.get_timerange", return_value=(now, now)) freqai_conf["timerange"] = "" freqai_conf.get("freqai", {}).update({"backtest_using_historic_predictions": False}) patched_configuration_load_config_file(mocker, freqai_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '5m', - '--freqai-backtest-live-models' + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "5m", + "--freqai-backtest-live-models", ] args = get_args(args) bt_config = setup_optimize_configuration(args, RunMode.BACKTEST) - with pytest.raises(OperationalException, - match=r".* Historic predictions data is required to run backtest .*"): + with pytest.raises( + OperationalException, match=r".* Historic predictions data is required to run backtest .*" + ): Backtesting(bt_config) Backtesting.cleanup() def test_freqai_backtest_consistent_timerange(mocker, freqai_conf): - freqai_conf['runmode'] = 'backtest' - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/USDT:USDT'])) + freqai_conf["runmode"] = "backtest" + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/USDT:USDT"]), + ) - gbs = mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats') + gbs = mocker.patch("freqtrade.optimize.backtesting.generate_backtest_stats") - freqai_conf['candle_type_def'] = CandleType.FUTURES - freqai_conf.get('exchange', {}).update({'pair_whitelist': ['XRP/USDT:USDT']}) - freqai_conf.get('freqai', {}).get('feature_parameters', {}).update( - {'include_timeframes': ['5m', '1h'], 'include_corr_pairlist': []}) - freqai_conf['timerange'] = '20211120-20211121' + freqai_conf["candle_type_def"] = CandleType.FUTURES + freqai_conf.get("exchange", {}).update({"pair_whitelist": ["XRP/USDT:USDT"]}) + freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( + {"include_timeframes": ["5m", "1h"], "include_corr_pairlist": []} + ) + freqai_conf["timerange"] = "20211120-20211121" strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -133,6 +163,6 @@ def test_freqai_backtest_consistent_timerange(mocker, freqai_conf): backtesting = Backtesting(deepcopy(freqai_conf)) backtesting.start() - assert gbs.call_args[1]['min_date'] == datetime(2021, 11, 20, 0, 0, tzinfo=timezone.utc) - assert gbs.call_args[1]['max_date'] == datetime(2021, 11, 21, 0, 0, tzinfo=timezone.utc) + assert gbs.call_args[1]["min_date"] == datetime(2021, 11, 20, 0, 0, tzinfo=timezone.utc) + assert gbs.call_args[1]["max_date"] == datetime(2021, 11, 21, 0, 0, tzinfo=timezone.utc) Backtesting.cleanup() diff --git a/tests/freqai/test_freqai_datadrawer.py b/tests/freqai/test_freqai_datadrawer.py index 548fad650..037691d50 100644 --- a/tests/freqai/test_freqai_datadrawer.py +++ b/tests/freqai/test_freqai_datadrawer.py @@ -1,4 +1,3 @@ - import shutil from pathlib import Path from unittest.mock import patch @@ -15,7 +14,7 @@ from tests.freqai.conftest import get_patched_freqai_strategy def test_update_historic_data(mocker, freqai_conf): - freqai_conf['runmode'] = 'backtest' + freqai_conf["runmode"] = "backtest" strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -99,7 +98,7 @@ def test_use_strategy_to_populate_indicators(mocker, freqai_conf): sub_timerange = TimeRange.parse_timerange("20180111-20180114") corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(sub_timerange, "LTC/BTC", freqai.dk) - df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, 'LTC/BTC') + df = freqai.dk.use_strategy_to_populate_indicators(strategy, corr_df, base_df, "LTC/BTC") assert len(df.columns) == 33 shutil.rmtree(Path(freqai.dk.full_path)) @@ -133,10 +132,7 @@ def test_get_timerange_from_backtesting_live_df_pred_not_found(mocker, freqai_co exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) freqai = strategy.freqai - with pytest.raises( - OperationalException, - match=r'Historic predictions not found.*' - ): + with pytest.raises(OperationalException, match=r"Historic predictions not found.*"): freqai.dd.get_timerange_from_live_historic_predictions() @@ -158,13 +154,10 @@ def test_set_initial_return_values(mocker, freqai_conf): start_x_plus_1 = "2023-08-30" end_x_plus_5 = "2023-09-03" - historic_data = { - 'date_pred': pd.date_range(end=end_x, periods=5), - 'value': range(1, 6) - } + historic_data = {"date_pred": pd.date_range(end=end_x, periods=5), "value": range(1, 6)} new_data = { - 'date': pd.date_range(start=start_x_plus_1, end=end_x_plus_5), - 'value': range(6, 11) + "date": pd.date_range(start=start_x_plus_1, end=end_x_plus_5), + "value": range(6, 11), } freqai.dd.historic_predictions[pair] = pd.DataFrame(historic_data) @@ -173,20 +166,21 @@ def test_set_initial_return_values(mocker, freqai_conf): dataframe = pd.DataFrame(new_data) # Action - with patch('logging.Logger.warning') as mock_logger_warning: + with patch("logging.Logger.warning") as mock_logger_warning: freqai.dd.set_initial_return_values(pair, new_pred_df, dataframe) # Assertions hist_pred_df = freqai.dd.historic_predictions[pair] model_return_df = freqai.dd.model_return_values[pair] - assert hist_pred_df['date_pred'].iloc[-1] == pd.Timestamp(end_x_plus_5) - assert 'date_pred' in hist_pred_df.columns + assert hist_pred_df["date_pred"].iloc[-1] == pd.Timestamp(end_x_plus_5) + assert "date_pred" in hist_pred_df.columns assert hist_pred_df.shape[0] == 8 # compare values in model_return_df with hist_pred_df - assert (model_return_df["value"].values == - hist_pred_df.tail(len(dataframe))["value"].values).all() + assert ( + model_return_df["value"].values == hist_pred_df.tail(len(dataframe))["value"].values + ).all() assert model_return_df.shape[0] == len(dataframe) # Ensure logger error is not called @@ -212,13 +206,10 @@ def test_set_initial_return_values_warning(mocker, freqai_conf): start_x_plus_1 = "2023-09-01" end_x_plus_5 = "2023-09-05" - historic_data = { - 'date_pred': pd.date_range(end=end_x, periods=5), - 'value': range(1, 6) - } + historic_data = {"date_pred": pd.date_range(end=end_x, periods=5), "value": range(1, 6)} new_data = { - 'date': pd.date_range(start=start_x_plus_1, end=end_x_plus_5), - 'value': range(6, 11) + "date": pd.date_range(start=start_x_plus_1, end=end_x_plus_5), + "value": range(6, 11), } freqai.dd.historic_predictions[pair] = pd.DataFrame(historic_data) @@ -227,20 +218,21 @@ def test_set_initial_return_values_warning(mocker, freqai_conf): dataframe = pd.DataFrame(new_data) # Action - with patch('logging.Logger.warning') as mock_logger_warning: + with patch("logging.Logger.warning") as mock_logger_warning: freqai.dd.set_initial_return_values(pair, new_pred_df, dataframe) # Assertions hist_pred_df = freqai.dd.historic_predictions[pair] model_return_df = freqai.dd.model_return_values[pair] - assert hist_pred_df['date_pred'].iloc[-1] == pd.Timestamp(end_x_plus_5) - assert 'date_pred' in hist_pred_df.columns + assert hist_pred_df["date_pred"].iloc[-1] == pd.Timestamp(end_x_plus_5) + assert "date_pred" in hist_pred_df.columns assert hist_pred_df.shape[0] == 10 # compare values in model_return_df with hist_pred_df - assert (model_return_df["value"].values == hist_pred_df.tail( - len(dataframe))["value"].values).all() + assert ( + model_return_df["value"].values == hist_pred_df.tail(len(dataframe))["value"].values + ).all() assert model_return_df.shape[0] == len(dataframe) # Ensure logger error is not called diff --git a/tests/freqai/test_freqai_datakitchen.py b/tests/freqai/test_freqai_datakitchen.py index ba1520601..27efc3a66 100644 --- a/tests/freqai/test_freqai_datakitchen.py +++ b/tests/freqai/test_freqai_datakitchen.py @@ -11,8 +11,12 @@ from freqtrade.data.dataprovider import DataProvider from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from tests.conftest import get_patched_exchange -from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, is_mac, - make_unfiltered_dataframe) +from tests.freqai.conftest import ( + get_patched_data_kitchen, + get_patched_freqai_strategy, + is_mac, + make_unfiltered_dataframe, +) @pytest.mark.parametrize( @@ -63,7 +67,6 @@ def test_split_timerange( def test_check_if_model_expired(mocker, freqai_conf): - dk = get_patched_data_kitchen(mocker, freqai_conf) now = datetime.now(tz=timezone.utc).timestamp() assert dk.check_if_model_expired(now) is False @@ -77,10 +80,10 @@ def test_filter_features(mocker, freqai_conf): freqai.dk.find_features(unfiltered_dataframe) filtered_df, _labels = freqai.dk.filter_features( - unfiltered_dataframe, - freqai.dk.training_features_list, - freqai.dk.label_list, - training_filter=True, + unfiltered_dataframe, + freqai.dk.training_features_list, + freqai.dk.label_list, + training_filter=True, ) assert len(filtered_df.columns) == 14 @@ -91,22 +94,20 @@ def test_make_train_test_datasets(mocker, freqai_conf): freqai.dk.find_features(unfiltered_dataframe) features_filtered, labels_filtered = freqai.dk.filter_features( - unfiltered_dataframe, - freqai.dk.training_features_list, - freqai.dk.label_list, - training_filter=True, - ) + unfiltered_dataframe, + freqai.dk.training_features_list, + freqai.dk.label_list, + training_filter=True, + ) data_dictionary = freqai.dk.make_train_test_datasets(features_filtered, labels_filtered) assert data_dictionary assert len(data_dictionary) == 7 - assert len(data_dictionary['train_features'].index) == 1916 + assert len(data_dictionary["train_features"].index) == 1916 -@pytest.mark.parametrize('model', [ - 'LightGBMRegressor' - ]) +@pytest.mark.parametrize("model", ["LightGBMRegressor"]) def test_get_full_model_path(mocker, freqai_conf, model): freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) @@ -130,9 +131,10 @@ def test_get_full_model_path(mocker, freqai_conf, model): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.dk.set_paths('ADA/BTC', None) + freqai.dk.set_paths("ADA/BTC", None) freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange + ) model_path = freqai.dk.get_full_models_path(freqai_conf) assert model_path.is_dir() is True @@ -157,7 +159,7 @@ def test_get_pair_data_for_features_with_prealoaded_data(mocker, freqai_conf): def test_get_pair_data_for_features_without_preloaded_data(mocker, freqai_conf): freqai_conf.update({"timerange": "20180115-20180130"}) - freqai_conf['runmode'] = 'backtest' + freqai_conf["runmode"] = "backtest" strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -168,13 +170,13 @@ def test_get_pair_data_for_features_without_preloaded_data(mocker, freqai_conf): timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) - base_df = {'5m': pd.DataFrame()} + base_df = {"5m": pd.DataFrame()} df = freqai.dk.get_pair_data_for_features("LTC/BTC", "5m", strategy, base_dataframes=base_df) assert df is not base_df["5m"] assert not df.empty - assert df.iloc[0]['date'].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-11 23:00:00" - assert df.iloc[-1]['date'].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-30 00:00:00" + assert df.iloc[0]["date"].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-11 23:00:00" + assert df.iloc[-1]["date"].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-30 00:00:00" def test_populate_features(mocker, freqai_conf): @@ -188,12 +190,14 @@ def test_populate_features(mocker, freqai_conf): freqai.dd.load_all_pair_histories(timerange, freqai.dk) corr_df, base_df = freqai.dd.get_base_and_corr_dataframes(timerange, "LTC/BTC", freqai.dk) - mocker.patch.object(strategy, 'feature_engineering_expand_all', return_value=base_df["5m"]) - df = freqai.dk.populate_features(base_df["5m"], "LTC/BTC", strategy, - base_dataframes=base_df, corr_dataframes=corr_df) + mocker.patch.object(strategy, "feature_engineering_expand_all", return_value=base_df["5m"]) + df = freqai.dk.populate_features( + base_df["5m"], "LTC/BTC", strategy, base_dataframes=base_df, corr_dataframes=corr_df + ) strategy.feature_engineering_expand_all.assert_called_once() - pd.testing.assert_frame_equal(base_df["5m"], - strategy.feature_engineering_expand_all.call_args[0][0]) + pd.testing.assert_frame_equal( + base_df["5m"], strategy.feature_engineering_expand_all.call_args[0][0] + ) - assert df.iloc[0]['date'].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-15 00:00:00" + assert df.iloc[0]["date"].strftime("%Y-%m-%d %H:%M:%S") == "2018-01-15 00:00:00" diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index 178984818..2779ddcb8 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -14,12 +14,17 @@ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade from freqtrade.plugins.pairlistmanager import PairListManager from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re -from tests.freqai.conftest import (get_patched_freqai_strategy, is_arm, is_mac, make_rl_config, - mock_pytorch_mlp_model_training_parameters) +from tests.freqai.conftest import ( + get_patched_freqai_strategy, + is_arm, + is_mac, + make_rl_config, + mock_pytorch_mlp_model_training_parameters, +) def can_run_model(model: str) -> None: - is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model + is_pytorch_model = "Reinforcement" in model or "PyTorch" in model if is_arm() and "Catboost" in model: pytest.skip("CatBoost is not supported on ARM.") @@ -28,57 +33,59 @@ def can_run_model(model: str) -> None: pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.") -@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer, noise', [ - ('LightGBMRegressor', True, False, True, True, False, 0, 0), - ('XGBoostRegressor', False, True, False, True, False, 10, 0.05), - ('XGBoostRFRegressor', False, False, False, True, False, 0, 0), - ('CatboostRegressor', False, False, False, True, True, 0, 0), - ('PyTorchMLPRegressor', False, False, False, False, False, 0, 0), - ('PyTorchTransformerRegressor', False, False, False, False, False, 0, 0), - ('ReinforcementLearner', False, True, False, True, False, 0, 0), - ('ReinforcementLearner_multiproc', False, False, False, True, False, 0, 0), - ('ReinforcementLearner_test_3ac', False, False, False, False, False, 0, 0), - ('ReinforcementLearner_test_3ac', False, False, False, True, False, 0, 0), - ('ReinforcementLearner_test_4ac', False, False, False, True, False, 0, 0), - ]) -def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, - dbscan, float32, can_short, shuffle, - buffer, noise): - +@pytest.mark.parametrize( + "model, pca, dbscan, float32, can_short, shuffle, buffer, noise", + [ + ("LightGBMRegressor", True, False, True, True, False, 0, 0), + ("XGBoostRegressor", False, True, False, True, False, 10, 0.05), + ("XGBoostRFRegressor", False, False, False, True, False, 0, 0), + ("CatboostRegressor", False, False, False, True, True, 0, 0), + ("PyTorchMLPRegressor", False, False, False, False, False, 0, 0), + ("PyTorchTransformerRegressor", False, False, False, False, False, 0, 0), + ("ReinforcementLearner", False, True, False, True, False, 0, 0), + ("ReinforcementLearner_multiproc", False, False, False, True, False, 0, 0), + ("ReinforcementLearner_test_3ac", False, False, False, False, False, 0, 0), + ("ReinforcementLearner_test_3ac", False, False, False, True, False, 0, 0), + ("ReinforcementLearner_test_4ac", False, False, False, True, False, 0, 0), + ], +) +def test_extract_data_and_train_model_Standard( + mocker, freqai_conf, model, pca, dbscan, float32, can_short, shuffle, buffer, noise +): can_run_model(model) test_tb = True if is_mac(): test_tb = False - model_save_ext = 'joblib' + model_save_ext = "joblib" freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_test_strat"}) - freqai_conf['freqai']['feature_parameters'].update({"principal_component_analysis": pca}) - freqai_conf['freqai']['feature_parameters'].update({"use_DBSCAN_to_remove_outliers": dbscan}) + freqai_conf["freqai"]["feature_parameters"].update({"principal_component_analysis": pca}) + freqai_conf["freqai"]["feature_parameters"].update({"use_DBSCAN_to_remove_outliers": dbscan}) freqai_conf.update({"reduce_df_footprint": float32}) - freqai_conf['freqai']['feature_parameters'].update({"shuffle_after_split": shuffle}) - freqai_conf['freqai']['feature_parameters'].update({"buffer_train_data_candles": buffer}) - freqai_conf['freqai']['feature_parameters'].update({"noise_standard_deviation": noise}) + freqai_conf["freqai"]["feature_parameters"].update({"shuffle_after_split": shuffle}) + freqai_conf["freqai"]["feature_parameters"].update({"buffer_train_data_candles": buffer}) + freqai_conf["freqai"]["feature_parameters"].update({"noise_standard_deviation": noise}) - if 'ReinforcementLearner' in model: - model_save_ext = 'zip' + if "ReinforcementLearner" in model: + model_save_ext = "zip" freqai_conf = make_rl_config(freqai_conf) # test the RL guardrails - freqai_conf['freqai']['feature_parameters'].update({"use_SVM_to_remove_outliers": True}) - freqai_conf['freqai']['feature_parameters'].update({"DI_threshold": 2}) - freqai_conf['freqai']['data_split_parameters'].update({'shuffle': True}) + freqai_conf["freqai"]["feature_parameters"].update({"use_SVM_to_remove_outliers": True}) + freqai_conf["freqai"]["feature_parameters"].update({"DI_threshold": 2}) + freqai_conf["freqai"]["data_split_parameters"].update({"shuffle": True}) - if 'test_3ac' in model or 'test_4ac' in model: + if "test_3ac" in model or "test_4ac" in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True - if 'PyTorch' in model: - model_save_ext = 'zip' + if "PyTorch" in model: + model_save_ext = "zip" pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters() - freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp) - if 'Transformer' in model: + freqai_conf["freqai"]["model_training_parameters"].update(pytorch_mlp_mtp) + if "Transformer" in model: # transformer model takes a window, unlike the MLP regressor freqai_conf.update({"conv_width": 10}) @@ -92,7 +99,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, freqai.can_short = can_short freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk.live = True - freqai.dk.set_paths('ADA/BTC', 10000) + freqai.dk.set_paths("ADA/BTC", 10000) timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) @@ -100,32 +107,37 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca, data_load_timerange = TimeRange.parse_timerange("20180125-20180130") new_timerange = TimeRange.parse_timerange("20180127-20180130") - freqai.dk.set_paths('ADA/BTC', None) + freqai.dk.set_paths("ADA/BTC", None) freqai.train_timer("start", "ADA/BTC") freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange + ) freqai.train_timer("stop", "ADA/BTC") freqai.dd.save_metric_tracker_to_disk() freqai.dd.save_drawer_to_disk() assert Path(freqai.dk.full_path / "metric_tracker.json").is_file() assert Path(freqai.dk.full_path / "pair_dictionary.json").is_file() - assert Path(freqai.dk.data_path / - f"{freqai.dk.model_filename}_model.{model_save_ext}").is_file() + assert Path( + freqai.dk.data_path / f"{freqai.dk.model_filename}_model.{model_save_ext}" + ).is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() shutil.rmtree(Path(freqai.dk.full_path)) -@pytest.mark.parametrize('model, strat', [ - ('LightGBMRegressorMultiTarget', "freqai_test_multimodel_strat"), - ('XGBoostRegressorMultiTarget', "freqai_test_multimodel_strat"), - ('CatboostRegressorMultiTarget', "freqai_test_multimodel_strat"), - ('LightGBMClassifierMultiTarget', "freqai_test_multimodel_classifier_strat"), - ('CatboostClassifierMultiTarget', "freqai_test_multimodel_classifier_strat") - ]) +@pytest.mark.parametrize( + "model, strat", + [ + ("LightGBMRegressorMultiTarget", "freqai_test_multimodel_strat"), + ("XGBoostRegressorMultiTarget", "freqai_test_multimodel_strat"), + ("CatboostRegressorMultiTarget", "freqai_test_multimodel_strat"), + ("LightGBMClassifierMultiTarget", "freqai_test_multimodel_classifier_strat"), + ("CatboostClassifierMultiTarget", "freqai_test_multimodel_classifier_strat"), + ], +) def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, strat): can_run_model(model) @@ -147,28 +159,32 @@ def test_extract_data_and_train_model_MultiTargets(mocker, freqai_conf, model, s data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.dk.set_paths('ADA/BTC', None) + freqai.dk.set_paths("ADA/BTC", None) freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange + ) assert len(freqai.dk.label_list) == 2 assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_model.joblib").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").is_file() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").is_file() - assert len(freqai.dk.data['training_features_list']) == 14 + assert len(freqai.dk.data["training_features_list"]) == 14 shutil.rmtree(Path(freqai.dk.full_path)) -@pytest.mark.parametrize('model', [ - 'LightGBMClassifier', - 'CatboostClassifier', - 'XGBoostClassifier', - 'XGBoostRFClassifier', - 'SKLearnRandomForestClassifier', - 'PyTorchMLPClassifier', - ]) +@pytest.mark.parametrize( + "model", + [ + "LightGBMClassifier", + "CatboostClassifier", + "XGBoostClassifier", + "XGBoostRFClassifier", + "SKLearnRandomForestClassifier", + "PyTorchMLPClassifier", + ], +) def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): can_run_model(model) @@ -191,25 +207,28 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.dk.set_paths('ADA/BTC', None) + freqai.dk.set_paths("ADA/BTC", None) - freqai.extract_data_and_train_model(new_timerange, "ADA/BTC", - strategy, freqai.dk, data_load_timerange) + freqai.extract_data_and_train_model( + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange + ) - if 'PyTorchMLPClassifier': + if "PyTorchMLPClassifier": pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters() - freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp) + freqai_conf["freqai"]["model_training_parameters"].update(pytorch_mlp_mtp) - if freqai.dd.model_type == 'joblib': + if freqai.dd.model_type == "joblib": model_file_extension = ".joblib" elif freqai.dd.model_type == "pytorch": model_file_extension = ".zip" else: - raise Exception(f"Unsupported model type: {freqai.dd.model_type}," - f" can't assign model_file_extension") + raise Exception( + f"Unsupported model type: {freqai.dd.model_type}, can't assign model_file_extension" + ) - assert Path(freqai.dk.data_path / - f"{freqai.dk.model_filename}_model{model_file_extension}").exists() + assert Path( + freqai.dk.data_path / f"{freqai.dk.model_filename}_model{model_file_extension}" + ).exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_metadata.json").exists() assert Path(freqai.dk.data_path / f"{freqai.dk.model_filename}_trained_df.pkl").exists() @@ -228,9 +247,9 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model): ("XGBoostClassifier", 2, "freqai_test_classifier"), ("LightGBMClassifier", 2, "freqai_test_classifier"), ("CatboostClassifier", 2, "freqai_test_classifier"), - ("PyTorchMLPClassifier", 2, "freqai_test_classifier") + ("PyTorchMLPClassifier", 2, "freqai_test_classifier"), ], - ) +) def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): can_run_model(model) test_tb = True @@ -238,7 +257,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) test_tb = False freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) - freqai_conf['runmode'] = RunMode.BACKTEST + freqai_conf["runmode"] = RunMode.BACKTEST Trade.use_db = False @@ -246,21 +265,22 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) freqai_conf.update({"timerange": "20180120-20180130"}) freqai_conf.update({"strategy": strat}) - if 'ReinforcementLearner' in model: + if "ReinforcementLearner" in model: freqai_conf = make_rl_config(freqai_conf) - if 'test_4ac' in model: + if "test_4ac" in model: freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") - if 'PyTorch' in model: + if "PyTorch" in model: pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters() - freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp) - if 'Transformer' in model: + freqai_conf["freqai"]["model_training_parameters"].update(pytorch_mlp_mtp) + if "Transformer" in model: # transformer model takes a window, unlike the MLP regressor freqai_conf.update({"conv_width": 10}) freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( - {"indicator_periods_candles": [2]}) + {"indicator_periods_candles": [2]} + ) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -277,7 +297,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) df = base_df[freqai_conf["timeframe"]] metadata = {"pair": "LTC/BTC"} - freqai.dk.set_paths('LTC/BTC', None) + freqai.dk.set_paths("LTC/BTC", None) freqai.start_backtesting(df, metadata, freqai.dk, strategy) model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] @@ -289,13 +309,16 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) def test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf): freqai_conf.update({"timerange": "20180120-20180124"}) - freqai_conf['runmode'] = 'backtest' - freqai_conf.get("freqai", {}).update({ - "backtest_period_days": 0.5, - "save_backtest_models": True, - }) + freqai_conf["runmode"] = "backtest" + freqai_conf.get("freqai", {}).update( + { + "backtest_period_days": 0.5, + "save_backtest_models": True, + } + ) freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( - {"indicator_periods_candles": [2]}) + {"indicator_periods_candles": [2]} + ) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -320,10 +343,11 @@ def test_start_backtesting_subdaily_backtest_period(mocker, freqai_conf): def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): freqai_conf.update({"timerange": "20180120-20180130"}) - freqai_conf['runmode'] = 'backtest' + freqai_conf["runmode"] = "backtest" freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( - {"indicator_periods_candles": [2]}) + {"indicator_periods_candles": [2]} + ) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange) @@ -376,7 +400,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): freqai.dk.pair = pair freqai.start_backtesting(df, metadata, freqai.dk, strategy) - path = (freqai.dd.full_path / freqai.dk.backtest_predictions_folder) + path = freqai.dd.full_path / freqai.dk.backtest_predictions_folder prediction_files = [x for x in path.iterdir() if x.is_file()] assert len(prediction_files) == 2 @@ -384,7 +408,7 @@ def test_start_backtesting_from_existing_folder(mocker, freqai_conf, caplog): def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): - freqai_conf['runmode'] = 'backtest' + freqai_conf["runmode"] = "backtest" freqai_conf.get("freqai", {}).update({"fit_live_predictions_candles": 10}) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -413,12 +437,12 @@ def test_backtesting_fit_live_predictions(mocker, freqai_conf, caplog): def test_plot_feature_importance(mocker, freqai_conf): - from freqtrade.freqai.utils import plot_feature_importance freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( - {"princpial_component_analysis": "true"}) + {"princpial_component_analysis": "true"} + ) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) @@ -431,15 +455,22 @@ def test_plot_feature_importance(mocker, freqai_conf): timerange = TimeRange.parse_timerange("20180110-20180130") freqai.dd.load_all_pair_histories(timerange, freqai.dk) - freqai.dd.pair_dict = {"ADA/BTC": {"model_filename": "fake_name", - "trained_timestamp": 1, "data_path": "", "extras": {}}} + freqai.dd.pair_dict = { + "ADA/BTC": { + "model_filename": "fake_name", + "trained_timestamp": 1, + "data_path": "", + "extras": {}, + } + } data_load_timerange = TimeRange.parse_timerange("20180110-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130") - freqai.dk.set_paths('ADA/BTC', None) + freqai.dk.set_paths("ADA/BTC", None) freqai.extract_data_and_train_model( - new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) + new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange + ) model = freqai.dd.load_data("ADA/BTC", freqai.dk) @@ -450,17 +481,21 @@ def test_plot_feature_importance(mocker, freqai_conf): shutil.rmtree(Path(freqai.dk.full_path)) -@pytest.mark.parametrize('timeframes,corr_pairs', [ - (['5m'], ['ADA/BTC', 'DASH/BTC']), - (['5m'], ['ADA/BTC', 'DASH/BTC', 'ETH/USDT']), - (['5m', '15m'], ['ADA/BTC', 'DASH/BTC', 'ETH/USDT']), -]) +@pytest.mark.parametrize( + "timeframes,corr_pairs", + [ + (["5m"], ["ADA/BTC", "DASH/BTC"]), + (["5m"], ["ADA/BTC", "DASH/BTC", "ETH/USDT"]), + (["5m", "15m"], ["ADA/BTC", "DASH/BTC", "ETH/USDT"]), + ], +) def test_freqai_informative_pairs(mocker, freqai_conf, timeframes, corr_pairs): - freqai_conf['freqai']['feature_parameters'].update({ - 'include_timeframes': timeframes, - 'include_corr_pairlist': corr_pairs, - - }) + freqai_conf["freqai"]["feature_parameters"].update( + { + "include_timeframes": timeframes, + "include_corr_pairlist": corr_pairs, + } + ) strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) pairlists = PairListManager(exchange, freqai_conf) @@ -502,8 +537,8 @@ def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmp_path): exchange = get_patched_exchange(mocker, freqai_conf) pairlist = PairListManager(exchange, freqai_conf) strategy.dp = DataProvider(freqai_conf, exchange, pairlist) - freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist'] - freqai_conf['datadir'] = tmp_path + freqai_conf["pairs"] = freqai_conf["exchange"]["pair_whitelist"] + freqai_conf["datadir"] = tmp_path download_all_data_for_training(strategy.dp, freqai_conf) assert log_has_re( @@ -513,9 +548,8 @@ def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmp_path): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('dp_exists', [(False), (True)]) +@pytest.mark.parametrize("dp_exists", [(False), (True)]) def test_get_state_info(mocker, freqai_conf, dp_exists, caplog, tickers): - if is_mac(): pytest.skip("Reinforcement learning module not available on intel based Mac OS") @@ -523,12 +557,12 @@ def test_get_state_info(mocker, freqai_conf, dp_exists, caplog, tickers): freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"strategy": "freqai_rl_test_strat"}) freqai_conf = make_rl_config(freqai_conf) - freqai_conf['entry_pricing']['price_side'] = 'same' - freqai_conf['exit_pricing']['price_side'] = 'same' + freqai_conf["entry_pricing"]["price_side"] = "same" + freqai_conf["exit_pricing"]["price_side"] = "same" strategy = get_patched_freqai_strategy(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf) - ticker_mock = MagicMock(return_value=tickers()['ETH/BTC']) + ticker_mock = MagicMock(return_value=tickers()["ETH/BTC"]) mocker.patch(f"{EXMS}.fetch_ticker", ticker_mock) strategy.dp = DataProvider(freqai_conf, exchange) diff --git a/tests/freqai/test_models/ReinforcementLearner_test_3ac.py b/tests/freqai/test_models/ReinforcementLearner_test_3ac.py index f77120c3c..ec7679883 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_3ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_3ac.py @@ -26,24 +26,25 @@ class ReinforcementLearner_test_3ac(ReinforcementLearner): """ def calculate_reward(self, action: int) -> float: - # first, penalize if the action is not valid if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100. + factor = 100.0 # reward agent for entering trades - if (action in (Actions.Buy.value, Actions.Sell.value) - and self._position == Positions.Neutral): + if ( + action in (Actions.Buy.value, Actions.Sell.value) + and self._position == Positions.Neutral + ): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + max_trade_duration = self.rl_config.get("max_trade_duration_candles", 300) trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: @@ -67,4 +68,4 @@ class ReinforcementLearner_test_3ac(ReinforcementLearner): factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(rew * factor) - return 0. + return 0.0 diff --git a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py index 4fc2b0005..4044fc41d 100644 --- a/tests/freqai/test_models/ReinforcementLearner_test_4ac.py +++ b/tests/freqai/test_models/ReinforcementLearner_test_4ac.py @@ -26,24 +26,25 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): """ def calculate_reward(self, action: int) -> float: - # first, penalize if the action is not valid if not self._is_valid(action): return -2 pnl = self.get_unrealized_profit() rew = np.sign(pnl) * (pnl + 1) - factor = 100. + factor = 100.0 # reward agent for entering trades - if (action in (Actions.Long_enter.value, Actions.Short_enter.value) - and self._position == Positions.Neutral): + if ( + action in (Actions.Long_enter.value, Actions.Short_enter.value) + and self._position == Positions.Neutral + ): return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: return -1 - max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) + max_trade_duration = self.rl_config.get("max_trade_duration_candles", 300) trade_duration = self._current_tick - self._last_trade_tick # type: ignore if trade_duration <= max_trade_duration: @@ -52,20 +53,22 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner): factor *= 0.5 # discourage sitting in position - if (self._position in (Positions.Short, Positions.Long) and - action == Actions.Neutral.value): + if ( + self._position in (Positions.Short, Positions.Long) + and action == Actions.Neutral.value + ): return -1 * trade_duration / max_trade_duration # close long if action == Actions.Exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(rew * factor) # close short if action == Actions.Exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: - factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + factor *= self.rl_config["model_reward_parameters"].get("win_reward_factor", 2) return float(rew * factor) - return 0. + return 0.0 diff --git a/tests/freqtradebot/test_freqtradebot.py b/tests/freqtradebot/test_freqtradebot.py index 993091068..e793e3c50 100644 --- a/tests/freqtradebot/test_freqtradebot.py +++ b/tests/freqtradebot/test_freqtradebot.py @@ -13,23 +13,55 @@ from pandas import DataFrame from sqlalchemy import select from freqtrade.constants import CANCEL_REASON, UNLIMITED_STAKE_AMOUNT -from freqtrade.enums import (CandleType, ExitCheckTuple, ExitType, RPCMessageType, RunMode, - SignalDirection, State) -from freqtrade.exceptions import (DependencyException, ExchangeError, InsufficientFundsError, - InvalidOrderException, OperationalException, PricingError, - TemporaryError) +from freqtrade.enums import ( + CandleType, + ExitCheckTuple, + ExitType, + RPCMessageType, + RunMode, + SignalDirection, + State, +) +from freqtrade.exceptions import ( + DependencyException, + ExchangeError, + InsufficientFundsError, + InvalidOrderException, + OperationalException, + PricingError, + TemporaryError, +) from freqtrade.freqtradebot import FreqtradeBot from freqtrade.persistence import Order, PairLocks, Trade from freqtrade.plugins.protections.iprotection import ProtectionReturn from freqtrade.util.datetime_helpers import dt_now, dt_utc from freqtrade.worker import Worker -from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt, - get_patched_freqtradebot, get_patched_worker, log_has, log_has_re, - patch_edge, patch_exchange, patch_get_signal, patch_wallet, - patch_whitelist) -from tests.conftest_trades import (MOCK_TRADE_COUNT, entry_side, exit_side, mock_order_2, - mock_order_2_sell, mock_order_3, mock_order_3_sell, mock_order_4, - mock_order_5_stoploss, mock_order_6_sell) +from tests.conftest import ( + EXMS, + create_mock_trades, + create_mock_trades_usdt, + get_patched_freqtradebot, + get_patched_worker, + log_has, + log_has_re, + patch_edge, + patch_exchange, + patch_get_signal, + patch_wallet, + patch_whitelist, +) +from tests.conftest_trades import ( + MOCK_TRADE_COUNT, + entry_side, + exit_side, + mock_order_2, + mock_order_2_sell, + mock_order_3, + mock_order_3_sell, + mock_order_4, + mock_order_5_stoploss, + mock_order_6_sell, +) from tests.conftest_trades_usdt import mock_trade_usdt_4 @@ -39,8 +71,8 @@ def patch_RPCManager(mocker) -> MagicMock: :param mocker: mocker to patch RPCManager class :return: RPCManager.send_msg MagicMock to track if this method is called """ - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - rpc_mock = mocker.patch('freqtrade.freqtradebot.RPCManager.send_msg', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + rpc_mock = mocker.patch("freqtrade.freqtradebot.RPCManager.send_msg", MagicMock()) return rpc_mock @@ -48,23 +80,22 @@ def patch_RPCManager(mocker) -> MagicMock: def test_freqtradebot_state(mocker, default_conf_usdt, markets) -> None: - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) assert freqtrade.state is State.RUNNING - default_conf_usdt.pop('initial_state') + default_conf_usdt.pop("initial_state") freqtrade = FreqtradeBot(default_conf_usdt) assert freqtrade.state is State.STOPPED def test_process_stopped(mocker, default_conf_usdt) -> None: - freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders') + coo_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders") freqtrade.process_stopped() assert coo_mock.call_count == 0 - default_conf_usdt['cancel_open_orders_on_exit'] = True + default_conf_usdt["cancel_open_orders_on_exit"] = True freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) freqtrade.process_stopped() assert coo_mock.call_count == 1 @@ -77,24 +108,25 @@ def test_process_calls_sendmsg(mocker, default_conf_usdt) -> None: def test_bot_cleanup(mocker, default_conf_usdt, caplog) -> None: - mock_cleanup = mocker.patch('freqtrade.freqtradebot.Trade.commit') - coo_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders') + mock_cleanup = mocker.patch("freqtrade.freqtradebot.Trade.commit") + coo_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cancel_all_open_orders") freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) freqtrade.cleanup() - assert log_has('Cleaning up modules ...', caplog) + assert log_has("Cleaning up modules ...", caplog) assert mock_cleanup.call_count == 1 assert coo_mock.call_count == 0 - freqtrade.config['cancel_open_orders_on_exit'] = True + freqtrade.config["cancel_open_orders_on_exit"] = True freqtrade.cleanup() assert coo_mock.call_count == 1 def test_bot_cleanup_db_errors(mocker, default_conf_usdt, caplog) -> None: - mocker.patch('freqtrade.freqtradebot.Trade.commit', - side_effect=OperationalException()) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.check_for_open_trades', - side_effect=OperationalException()) + mocker.patch("freqtrade.freqtradebot.Trade.commit", side_effect=OperationalException()) + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.check_for_open_trades", + side_effect=OperationalException(), + ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) freqtrade.emc = MagicMock() freqtrade.emc.shutdown = MagicMock() @@ -102,40 +134,37 @@ def test_bot_cleanup_db_errors(mocker, default_conf_usdt, caplog) -> None: assert freqtrade.emc.shutdown.call_count == 1 -@pytest.mark.parametrize('runmode', [ - RunMode.DRY_RUN, - RunMode.LIVE -]) +@pytest.mark.parametrize("runmode", [RunMode.DRY_RUN, RunMode.LIVE]) def test_order_dict(default_conf_usdt, mocker, runmode, caplog) -> None: patch_RPCManager(mocker) patch_exchange(mocker) conf = default_conf_usdt.copy() - conf['runmode'] = runmode - conf['order_types'] = { - 'entry': 'market', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': True, + conf["runmode"] = runmode + conf["order_types"] = { + "entry": "market", + "exit": "limit", + "stoploss": "limit", + "stoploss_on_exchange": True, } - conf['entry_pricing']['price_side'] = 'ask' + conf["entry_pricing"]["price_side"] = "ask" freqtrade = FreqtradeBot(conf) if runmode == RunMode.LIVE: assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog) - assert freqtrade.strategy.order_types['stoploss_on_exchange'] + assert freqtrade.strategy.order_types["stoploss_on_exchange"] caplog.clear() # is left untouched conf = default_conf_usdt.copy() - conf['runmode'] = runmode - conf['order_types'] = { - 'entry': 'market', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': False, + conf["runmode"] = runmode + conf["order_types"] = { + "entry": "market", + "exit": "limit", + "stoploss": "limit", + "stoploss_on_exchange": False, } freqtrade = FreqtradeBot(conf) - assert not freqtrade.strategy.order_types['stoploss_on_exchange'] + assert not freqtrade.strategy.order_types["stoploss_on_exchange"] assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog) @@ -145,51 +174,59 @@ def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None: freqtrade = FreqtradeBot(default_conf_usdt) - result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT', 1) - assert result == default_conf_usdt['stake_amount'] + result = freqtrade.wallets.get_trade_stake_amount("ETH/USDT", 1) + assert result == default_conf_usdt["stake_amount"] -@pytest.mark.parametrize('runmode', [ - RunMode.DRY_RUN, - RunMode.LIVE -]) +@pytest.mark.parametrize("runmode", [RunMode.DRY_RUN, RunMode.LIVE]) def test_load_strategy_no_keys(default_conf_usdt, mocker, runmode, caplog) -> None: patch_RPCManager(mocker) patch_exchange(mocker) conf = deepcopy(default_conf_usdt) - conf['runmode'] = runmode - erm = mocker.patch('freqtrade.freqtradebot.ExchangeResolver.load_exchange') + conf["runmode"] = runmode + erm = mocker.patch("freqtrade.freqtradebot.ExchangeResolver.load_exchange") freqtrade = FreqtradeBot(conf) strategy_config = freqtrade.strategy.config - assert id(strategy_config['exchange']) == id(conf['exchange']) + assert id(strategy_config["exchange"]) == id(conf["exchange"]) # Keys have been removed and are not passed to the exchange - assert strategy_config['exchange']['key'] == '' - assert strategy_config['exchange']['secret'] == '' + assert strategy_config["exchange"]["key"] == "" + assert strategy_config["exchange"]["secret"] == "" assert erm.call_count == 1 - ex_conf = erm.call_args_list[0][1]['exchange_config'] - assert id(ex_conf) != id(conf['exchange']) + ex_conf = erm.call_args_list[0][1]["exchange_config"] + assert id(ex_conf) != id(conf["exchange"]) # Keys are still present - assert ex_conf['key'] != '' - assert ex_conf['key'] == default_conf_usdt['exchange']['key'] - assert ex_conf['secret'] != '' - assert ex_conf['secret'] == default_conf_usdt['exchange']['secret'] + assert ex_conf["key"] != "" + assert ex_conf["key"] == default_conf_usdt["exchange"]["key"] + assert ex_conf["secret"] != "" + assert ex_conf["secret"] == default_conf_usdt["exchange"]["secret"] -@pytest.mark.parametrize("amend_last,wallet,max_open,lsamr,expected", [ - (False, 120, 2, 0.5, [60, None]), - (True, 120, 2, 0.5, [60, 58.8]), - (False, 180, 3, 0.5, [60, 60, None]), - (True, 180, 3, 0.5, [60, 60, 58.2]), - (False, 122, 3, 0.5, [60, 60, None]), - (True, 122, 3, 0.5, [60, 60, 0.0]), - (True, 167, 3, 0.5, [60, 60, 45.33]), - (True, 122, 3, 1, [60, 60, 0.0]), -]) +@pytest.mark.parametrize( + "amend_last,wallet,max_open,lsamr,expected", + [ + (False, 120, 2, 0.5, [60, None]), + (True, 120, 2, 0.5, [60, 58.8]), + (False, 180, 3, 0.5, [60, 60, None]), + (True, 180, 3, 0.5, [60, 60, 58.2]), + (False, 122, 3, 0.5, [60, 60, None]), + (True, 122, 3, 0.5, [60, 60, 0.0]), + (True, 167, 3, 0.5, [60, 60, 45.33]), + (True, 122, 3, 1, [60, 60, 0.0]), + ], +) def test_check_available_stake_amount( - default_conf_usdt, ticker_usdt, mocker, fee, limit_buy_order_usdt_open, - amend_last, wallet, max_open, lsamr, expected + default_conf_usdt, + ticker_usdt, + mocker, + fee, + limit_buy_order_usdt_open, + amend_last, + wallet, + max_open, + lsamr, + expected, ) -> None: patch_RPCManager(mocker) patch_exchange(mocker) @@ -197,25 +234,24 @@ def test_check_available_stake_amount( EXMS, fetch_ticker=ticker_usdt, create_order=MagicMock(return_value=limit_buy_order_usdt_open), - get_fee=fee + get_fee=fee, ) - default_conf_usdt['dry_run_wallet'] = wallet + default_conf_usdt["dry_run_wallet"] = wallet - default_conf_usdt['amend_last_stake_amount'] = amend_last - default_conf_usdt['last_stake_amount_min_ratio'] = lsamr + default_conf_usdt["amend_last_stake_amount"] = amend_last + default_conf_usdt["last_stake_amount_min_ratio"] = lsamr freqtrade = FreqtradeBot(default_conf_usdt) for i in range(0, max_open): - if expected[i] is not None: - limit_buy_order_usdt_open['id'] = str(i) - result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT', 1) + limit_buy_order_usdt_open["id"] = str(i) + result = freqtrade.wallets.get_trade_stake_amount("ETH/USDT", 1) assert pytest.approx(result) == expected[i] - freqtrade.execute_entry('ETH/USDT', result) + freqtrade.execute_entry("ETH/USDT", result) else: with pytest.raises(DependencyException): - freqtrade.wallets.get_trade_stake_amount('ETH/USDT', 1) + freqtrade.wallets.get_trade_stake_amount("ETH/USDT", 1) def test_edge_called_in_process(mocker, edge_conf) -> None: @@ -226,43 +262,51 @@ def test_edge_called_in_process(mocker, edge_conf) -> None: freqtrade = FreqtradeBot(edge_conf) patch_get_signal(freqtrade) freqtrade.process() - assert freqtrade.active_pair_whitelist == ['NEO/BTC', 'LTC/BTC'] + assert freqtrade.active_pair_whitelist == ["NEO/BTC", "LTC/BTC"] def test_edge_overrides_stake_amount(mocker, edge_conf) -> None: patch_RPCManager(mocker) patch_exchange(mocker) patch_edge(mocker) - edge_conf['dry_run_wallet'] = 999.9 + edge_conf["dry_run_wallet"] = 999.9 freqtrade = FreqtradeBot(edge_conf) - assert freqtrade.wallets.get_trade_stake_amount( - 'NEO/BTC', 1, freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.20 - assert freqtrade.wallets.get_trade_stake_amount( - 'LTC/BTC', 1, freqtrade.edge) == (999.9 * 0.5 * 0.01) / 0.21 + assert ( + freqtrade.wallets.get_trade_stake_amount("NEO/BTC", 1, freqtrade.edge) + == (999.9 * 0.5 * 0.01) / 0.20 + ) + assert ( + freqtrade.wallets.get_trade_stake_amount("LTC/BTC", 1, freqtrade.edge) + == (999.9 * 0.5 * 0.01) / 0.21 + ) -@pytest.mark.parametrize('buy_price_mult,ignore_strat_sl', [ - (0.79, False), # Override stoploss - (0.85, True), # Override strategy stoploss -]) -def test_edge_overrides_stoploss(limit_order, fee, caplog, mocker, - buy_price_mult, ignore_strat_sl, edge_conf) -> None: +@pytest.mark.parametrize( + "buy_price_mult,ignore_strat_sl", + [ + (0.79, False), # Override stoploss + (0.85, True), # Override strategy stoploss + ], +) +def test_edge_overrides_stoploss( + limit_order, fee, caplog, mocker, buy_price_mult, ignore_strat_sl, edge_conf +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) patch_edge(mocker) - edge_conf['max_open_trades'] = float('inf') + edge_conf["max_open_trades"] = float("inf") # Strategy stoploss is -0.1 but Edge imposes a stoploss at -0.2 # Thus, if price falls 21%, stoploss should be triggered # # mocking the ticker: price is falling ... - enter_price = limit_order['buy']['price'] + enter_price = limit_order["buy"]["price"] ticker_val = { - 'bid': enter_price, - 'ask': enter_price, - 'last': enter_price, - } + "bid": enter_price, + "ask": enter_price, + "last": enter_price, + } mocker.patch.multiple( EXMS, fetch_ticker=MagicMock(return_value=ticker_val), @@ -272,23 +316,25 @@ def test_edge_overrides_stoploss(limit_order, fee, caplog, mocker, # Create a trade with "limit_buy_order_usdt" price freqtrade = FreqtradeBot(edge_conf) - freqtrade.active_pair_whitelist = ['NEO/BTC'] + freqtrade.active_pair_whitelist = ["NEO/BTC"] patch_get_signal(freqtrade) freqtrade.strategy.min_roi_reached = MagicMock(return_value=False) freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() caplog.clear() ############################################# - ticker_val.update({ - 'bid': enter_price * buy_price_mult, - 'ask': enter_price * buy_price_mult, - 'last': enter_price * buy_price_mult, - }) + ticker_val.update( + { + "bid": enter_price * buy_price_mult, + "ask": enter_price * buy_price_mult, + "last": enter_price * buy_price_mult, + } + ) # stoploss should be hit assert freqtrade.handle_trade(trade) is not ignore_strat_sl if not ignore_strat_sl: - assert log_has_re('Exit for NEO/BTC detected. Reason: stop_loss.*', caplog) + assert log_has_re("Exit for NEO/BTC detected. Reason: stop_loss.*", caplog) assert trade.exit_reason == ExitType.STOP_LOSS.value # Test compatibility ... assert trade.sell_reason == ExitType.STOP_LOSS.value @@ -297,7 +343,7 @@ def test_edge_overrides_stoploss(limit_order, fee, caplog, mocker, def test_total_open_trades_stakes(mocker, default_conf_usdt, ticker_usdt, fee) -> None: patch_RPCManager(mocker) patch_exchange(mocker) - default_conf_usdt['max_open_trades'] = 2 + default_conf_usdt["max_open_trades"] = 2 mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, @@ -325,12 +371,10 @@ def test_total_open_trades_stakes(mocker, default_conf_usdt, ticker_usdt, fee) - assert Trade.total_open_trades_stakes() == 120.0 -@pytest.mark.parametrize("is_short,open_rate", [ - (False, 2.0), - (True, 2.2) -]) -def test_create_trade(default_conf_usdt, ticker_usdt, limit_order, - fee, mocker, is_short, open_rate) -> None: +@pytest.mark.parametrize("is_short,open_rate", [(False, 2.0), (True, 2.2)]) +def test_create_trade( + default_conf_usdt, ticker_usdt, limit_order, fee, mocker, is_short, open_rate +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -341,10 +385,10 @@ def test_create_trade(default_conf_usdt, ticker_usdt, limit_order, ) # Save state of current whitelist - whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist']) + whitelist = deepcopy(default_conf_usdt["exchange"]["pair_whitelist"]) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) - freqtrade.create_trade('ETH/USDT') + freqtrade.create_trade("ETH/USDT") trade = Trade.session.scalars(select(Trade)).first() trade.is_short = is_short @@ -352,23 +396,24 @@ def test_create_trade(default_conf_usdt, ticker_usdt, limit_order, assert pytest.approx(trade.stake_amount) == 60.0 assert trade.is_open assert trade.open_date is not None - assert trade.exchange == 'binance' + assert trade.exchange == "binance" # Simulate fulfilled LIMIT_BUY order for trade oobj = Order.parse_from_ccxt_object( - limit_order[entry_side(is_short)], 'ADA/USDT', entry_side(is_short)) + limit_order[entry_side(is_short)], "ADA/USDT", entry_side(is_short) + ) trade.update_trade(oobj) assert trade.open_rate == open_rate assert trade.amount == 30.0 - assert whitelist == default_conf_usdt['exchange']['pair_whitelist'] + assert whitelist == default_conf_usdt["exchange"]["pair_whitelist"] def test_create_trade_no_stake_amount(default_conf_usdt, ticker_usdt, fee, mocker) -> None: patch_RPCManager(mocker) patch_exchange(mocker) - patch_wallet(mocker, free=default_conf_usdt['stake_amount'] * 0.5) + patch_wallet(mocker, free=default_conf_usdt["stake_amount"] * 0.5) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, @@ -377,20 +422,32 @@ def test_create_trade_no_stake_amount(default_conf_usdt, ticker_usdt, fee, mocke freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade) - with pytest.raises(DependencyException, match=r'.*stake amount.*'): - freqtrade.create_trade('ETH/USDT') + with pytest.raises(DependencyException, match=r".*stake amount.*"): + freqtrade.create_trade("ETH/USDT") @pytest.mark.parametrize("is_short", [False, True]) -@pytest.mark.parametrize('stake_amount,create,amount_enough,max_open_trades', [ - (5.0, True, True, 99), - (0.042, True, False, 99), # Amount will be adjusted to min - which is 0.051 - (0, False, True, 99), - (UNLIMITED_STAKE_AMOUNT, False, True, 0), -]) +@pytest.mark.parametrize( + "stake_amount,create,amount_enough,max_open_trades", + [ + (5.0, True, True, 99), + (0.042, True, False, 99), # Amount will be adjusted to min - which is 0.051 + (0, False, True, 99), + (UNLIMITED_STAKE_AMOUNT, False, True, 0), + ], +) def test_create_trade_minimal_amount( - default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, - stake_amount, create, amount_enough, max_open_trades, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_order_open, + fee, + mocker, + stake_amount, + create, + amount_enough, + max_open_trades, + caplog, + is_short, ) -> None: patch_RPCManager(mocker) patch_exchange(mocker) @@ -401,34 +458,46 @@ def test_create_trade_minimal_amount( create_order=enter_mock, get_fee=fee, ) - default_conf_usdt['max_open_trades'] = max_open_trades + default_conf_usdt["max_open_trades"] = max_open_trades freqtrade = FreqtradeBot(default_conf_usdt) - freqtrade.config['stake_amount'] = stake_amount + freqtrade.config["stake_amount"] = stake_amount patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) if create: - assert freqtrade.create_trade('ETH/USDT') + assert freqtrade.create_trade("ETH/USDT") if amount_enough: - rate, amount = enter_mock.call_args[1]['rate'], enter_mock.call_args[1]['amount'] - assert rate * amount <= default_conf_usdt['stake_amount'] + rate, amount = enter_mock.call_args[1]["rate"], enter_mock.call_args[1]["amount"] + assert rate * amount <= default_conf_usdt["stake_amount"] else: - assert log_has_re( - r"Stake amount for pair .* is too small.*", - caplog - ) + assert log_has_re(r"Stake amount for pair .* is too small.*", caplog) else: - assert not freqtrade.create_trade('ETH/USDT') + assert not freqtrade.create_trade("ETH/USDT") if not max_open_trades: - assert freqtrade.wallets.get_trade_stake_amount( - 'ETH/USDT', default_conf_usdt['max_open_trades'], freqtrade.edge) == 0 + assert ( + freqtrade.wallets.get_trade_stake_amount( + "ETH/USDT", default_conf_usdt["max_open_trades"], freqtrade.edge + ) + == 0 + ) -@pytest.mark.parametrize('whitelist,positions', [ - (["ETH/USDT"], 1), # No pairs left - ([], 0), # No pairs in whitelist -]) -def test_enter_positions_no_pairs_left(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open, - fee, whitelist, positions, mocker, caplog) -> None: +@pytest.mark.parametrize( + "whitelist,positions", + [ + (["ETH/USDT"], 1), # No pairs left + ([], 0), # No pairs in whitelist + ], +) +def test_enter_positions_no_pairs_left( + default_conf_usdt, + ticker_usdt, + limit_buy_order_usdt_open, + fee, + whitelist, + positions, + mocker, + caplog, +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -437,8 +506,8 @@ def test_enter_positions_no_pairs_left(default_conf_usdt, ticker_usdt, limit_buy create_order=MagicMock(return_value=limit_buy_order_usdt_open), get_fee=fee, ) - mocker.patch('freqtrade.configuration.config_validation._validate_whitelist') - default_conf_usdt['exchange']['pair_whitelist'] = whitelist + mocker.patch("freqtrade.configuration.config_validation._validate_whitelist") + default_conf_usdt["exchange"]["pair_whitelist"] = whitelist freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade) @@ -455,14 +524,15 @@ def test_enter_positions_no_pairs_left(default_conf_usdt, ticker_usdt, limit_buy @pytest.mark.usefixtures("init_persistence") -def test_enter_positions_global_pairlock(default_conf_usdt, ticker_usdt, limit_buy_order_usdt, fee, - mocker, caplog) -> None: +def test_enter_positions_global_pairlock( + default_conf_usdt, ticker_usdt, limit_buy_order_usdt, fee, mocker, caplog +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}), + create_order=MagicMock(return_value={"id": limit_buy_order_usdt["id"]}), get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -475,38 +545,39 @@ def test_enter_positions_global_pairlock(default_conf_usdt, ticker_usdt, limit_b assert not log_has_re(message, caplog) caplog.clear() - PairLocks.lock_pair('*', dt_now() + timedelta(minutes=20), 'Just because', side='*') + PairLocks.lock_pair("*", dt_now() + timedelta(minutes=20), "Just because", side="*") n = freqtrade.enter_positions() assert n == 0 assert log_has_re(message, caplog) -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize("is_short", [False, True]) def test_handle_protections(mocker, default_conf_usdt, fee, is_short): - default_conf_usdt['protections'] = [ + default_conf_usdt["protections"] = [ {"method": "CooldownPeriod", "stop_duration": 60}, { "method": "StoplossGuard", "lookback_period_candles": 24, "trade_limit": 4, "stop_duration_candles": 4, - "only_per_pair": False - } + "only_per_pair": False, + }, ] freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) freqtrade.protections._protection_handlers[1].global_stop = MagicMock( - return_value=ProtectionReturn(True, dt_now() + timedelta(hours=1), "asdf")) + return_value=ProtectionReturn(True, dt_now() + timedelta(hours=1), "asdf") + ) create_mock_trades(fee, is_short) - freqtrade.handle_protections('ETC/BTC', '*') + freqtrade.handle_protections("ETC/BTC", "*") send_msg_mock = freqtrade.rpc.send_msg assert send_msg_mock.call_count == 2 - assert send_msg_mock.call_args_list[0][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER - assert send_msg_mock.call_args_list[1][0][0]['type'] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL + assert send_msg_mock.call_args_list[0][0][0]["type"] == RPCMessageType.PROTECTION_TRIGGER + assert send_msg_mock.call_args_list[1][0][0]["type"] == RPCMessageType.PROTECTION_TRIGGER_GLOBAL def test_create_trade_no_signal(default_conf_usdt, fee, mocker) -> None: - default_conf_usdt['dry_run'] = True + default_conf_usdt["dry_run"] = True patch_RPCManager(mocker) patch_exchange(mocker) @@ -514,24 +585,30 @@ def test_create_trade_no_signal(default_conf_usdt, fee, mocker) -> None: EXMS, get_fee=fee, ) - default_conf_usdt['stake_amount'] = 10 + default_conf_usdt["stake_amount"] = 10 freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_long=False, exit_long=False) - assert not freqtrade.create_trade('ETH/USDT') + assert not freqtrade.create_trade("ETH/USDT") @pytest.mark.parametrize("max_open", range(0, 5)) @pytest.mark.parametrize("tradable_balance_ratio,modifier", [(1.0, 1), (0.99, 0.8), (0.5, 0.5)]) def test_create_trades_multiple_trades( - default_conf_usdt, ticker_usdt, fee, mocker, limit_buy_order_usdt_open, - max_open, tradable_balance_ratio, modifier + default_conf_usdt, + ticker_usdt, + fee, + mocker, + limit_buy_order_usdt_open, + max_open, + tradable_balance_ratio, + modifier, ) -> None: patch_RPCManager(mocker) patch_exchange(mocker) - default_conf_usdt['max_open_trades'] = max_open - default_conf_usdt['tradable_balance_ratio'] = tradable_balance_ratio - default_conf_usdt['dry_run_wallet'] = 60.0 * max_open + default_conf_usdt["max_open_trades"] = max_open + default_conf_usdt["tradable_balance_ratio"] = tradable_balance_ratio + default_conf_usdt["dry_run_wallet"] = 60.0 * max_open mocker.patch.multiple( EXMS, @@ -550,11 +627,12 @@ def test_create_trades_multiple_trades( assert len(trades) == max(int(max_open * modifier), 0) -def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker, - limit_buy_order_usdt_open, caplog) -> None: +def test_create_trades_preopen( + default_conf_usdt, ticker_usdt, fee, mocker, limit_buy_order_usdt_open, caplog +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) - default_conf_usdt['max_open_trades'] = 4 + default_conf_usdt["max_open_trades"] = 4 mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, @@ -565,26 +643,26 @@ def test_create_trades_preopen(default_conf_usdt, ticker_usdt, fee, mocker, patch_get_signal(freqtrade) # Create 2 existing trades - freqtrade.execute_entry('ETH/USDT', default_conf_usdt['stake_amount']) - freqtrade.execute_entry('NEO/BTC', default_conf_usdt['stake_amount']) + freqtrade.execute_entry("ETH/USDT", default_conf_usdt["stake_amount"]) + freqtrade.execute_entry("NEO/BTC", default_conf_usdt["stake_amount"]) assert len(Trade.get_open_trades()) == 2 # Change order_id for new orders - limit_buy_order_usdt_open['id'] = '123444' + limit_buy_order_usdt_open["id"] = "123444" # Create 2 new trades using create_trades - assert freqtrade.create_trade('ETH/USDT') - assert freqtrade.create_trade('NEO/BTC') + assert freqtrade.create_trade("ETH/USDT") + assert freqtrade.create_trade("NEO/BTC") trades = Trade.get_open_trades() assert len(trades) == 4 -@pytest.mark.parametrize('is_short', [False, True]) -def test_process_trade_creation(default_conf_usdt, ticker_usdt, limit_order, limit_order_open, - is_short, fee, mocker, caplog - ) -> None: - ticker_side = 'ask' if is_short else 'bid' +@pytest.mark.parametrize("is_short", [False, True]) +def test_process_trade_creation( + default_conf_usdt, ticker_usdt, limit_order, limit_order_open, is_short, fee, mocker, caplog +) -> None: + ticker_side = "ask" if is_short else "bid" patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -606,17 +684,17 @@ def test_process_trade_creation(default_conf_usdt, ticker_usdt, limit_order, lim assert len(trades) == 1 trade = trades[0] assert trade is not None - assert pytest.approx(trade.stake_amount) == default_conf_usdt['stake_amount'] + assert pytest.approx(trade.stake_amount) == default_conf_usdt["stake_amount"] assert trade.is_open assert trade.open_date is not None - assert trade.exchange == 'binance' + assert trade.exchange == "binance" assert trade.open_rate == ticker_usdt.return_value[ticker_side] assert pytest.approx(trade.amount) == 60 / ticker_usdt.return_value[ticker_side] assert log_has( f'{"Short" if is_short else "Long"} signal found: about create a new trade for ETH/USDT ' - 'with stake_amount: 60.0 ...', - caplog + "with stake_amount: 60.0 ...", + caplog, ) @@ -629,7 +707,7 @@ def test_process_exchange_failures(default_conf_usdt, ticker_usdt, mocker) -> No reload_markets=MagicMock(side_effect=TemporaryError), create_order=MagicMock(side_effect=TemporaryError), ) - sleep_mock = mocker.patch('time.sleep') + sleep_mock = mocker.patch("time.sleep") worker = Worker(args=None, config=default_conf_usdt) patch_get_signal(worker.freqtrade) @@ -642,9 +720,7 @@ def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) - msg_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt, - create_order=MagicMock(side_effect=OperationalException) + EXMS, fetch_ticker=ticker_usdt, create_order=MagicMock(side_effect=OperationalException) ) worker = Worker(args=None, config=default_conf_usdt) patch_get_signal(worker.freqtrade) @@ -653,11 +729,12 @@ def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) - worker._process_running() assert worker.freqtrade.state == State.STOPPED - assert 'OperationalException' in msg_mock.call_args_list[-1][0][0]['status'] + assert "OperationalException" in msg_mock.call_args_list[-1][0][0]["status"] -def test_process_trade_handling(default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open, fee, - mocker) -> None: +def test_process_trade_handling( + default_conf_usdt, ticker_usdt, limit_buy_order_usdt_open, fee, mocker +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -682,45 +759,50 @@ def test_process_trade_handling(default_conf_usdt, ticker_usdt, limit_buy_order_ assert len(trades) == 1 -def test_process_trade_no_whitelist_pair(default_conf_usdt, ticker_usdt, limit_buy_order_usdt, - fee, mocker) -> None: - """ Test process with trade not in pair list """ +def test_process_trade_no_whitelist_pair( + default_conf_usdt, ticker_usdt, limit_buy_order_usdt, fee, mocker +) -> None: + """Test process with trade not in pair list""" patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - create_order=MagicMock(return_value={'id': limit_buy_order_usdt['id']}), + create_order=MagicMock(return_value={"id": limit_buy_order_usdt["id"]}), fetch_order=MagicMock(return_value=limit_buy_order_usdt), get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade) - pair = 'BLK/BTC' + pair = "BLK/BTC" # Ensure the pair is not in the whitelist! - assert pair not in default_conf_usdt['exchange']['pair_whitelist'] + assert pair not in default_conf_usdt["exchange"]["pair_whitelist"] # create open trade not in whitelist - Trade.session.add(Trade( - pair=pair, - stake_amount=0.001, - fee_open=fee.return_value, - fee_close=fee.return_value, - is_open=True, - amount=20, - open_rate=0.01, - exchange='binance', - )) - Trade.session.add(Trade( - pair='ETH/USDT', - stake_amount=0.001, - fee_open=fee.return_value, - fee_close=fee.return_value, - is_open=True, - amount=12, - open_rate=0.001, - exchange='binance', - )) + Trade.session.add( + Trade( + pair=pair, + stake_amount=0.001, + fee_open=fee.return_value, + fee_close=fee.return_value, + is_open=True, + amount=20, + open_rate=0.01, + exchange="binance", + ) + ) + Trade.session.add( + Trade( + pair="ETH/USDT", + stake_amount=0.001, + fee_open=fee.return_value, + fee_close=fee.return_value, + is_open=True, + amount=12, + open_rate=0.001, + exchange="binance", + ) + ) Trade.commit() assert pair not in freqtrade.active_pair_whitelist @@ -741,16 +823,15 @@ def test_process_informative_pairs_added(default_conf_usdt, ticker_usdt, mocker) create_order=MagicMock(side_effect=TemporaryError), refresh_latest_ohlcv=refresh_mock, ) - inf_pairs = MagicMock(return_value=[ - ("BTC/ETH", '1m', CandleType.SPOT), - ("ETH/USDT", "1h", CandleType.SPOT) - ]) - mocker.patch.multiple( - 'freqtrade.strategy.interface.IStrategy', - get_exit_signal=MagicMock(return_value=(False, False)), - get_entry_signal=MagicMock(return_value=(None, None)) + inf_pairs = MagicMock( + return_value=[("BTC/ETH", "1m", CandleType.SPOT), ("ETH/USDT", "1h", CandleType.SPOT)] ) - mocker.patch('time.sleep', return_value=None) + mocker.patch.multiple( + "freqtrade.strategy.interface.IStrategy", + get_exit_signal=MagicMock(return_value=(False, False)), + get_entry_signal=MagicMock(return_value=(None, None)), + ) + mocker.patch("time.sleep", return_value=None) freqtrade = FreqtradeBot(default_conf_usdt) freqtrade.strategy.informative_pairs = inf_pairs @@ -761,33 +842,47 @@ def test_process_informative_pairs_added(default_conf_usdt, ticker_usdt, mocker) assert refresh_mock.call_count == 1 assert ("BTC/ETH", "1m", CandleType.SPOT) in refresh_mock.call_args[0][0] assert ("ETH/USDT", "1h", CandleType.SPOT) in refresh_mock.call_args[0][0] - assert ("ETH/USDT", default_conf_usdt["timeframe"], - CandleType.SPOT) in refresh_mock.call_args[0][0] + assert ("ETH/USDT", default_conf_usdt["timeframe"], CandleType.SPOT) in refresh_mock.call_args[ + 0 + ][0] -@pytest.mark.parametrize("is_short,trading_mode,exchange_name,margin_mode,liq_buffer,liq_price", [ - (False, 'spot', 'binance', None, 0.0, None), - (True, 'spot', 'binance', None, 0.0, None), - (False, 'spot', 'gate', None, 0.0, None), - (True, 'spot', 'gate', None, 0.0, None), - (False, 'spot', 'okx', None, 0.0, None), - (True, 'spot', 'okx', None, 0.0, None), - (True, 'futures', 'binance', 'isolated', 0.0, 11.88151815181518), - (False, 'futures', 'binance', 'isolated', 0.0, 8.080471380471382), - (True, 'futures', 'gate', 'isolated', 0.0, 11.87413417771621), - (False, 'futures', 'gate', 'isolated', 0.0, 8.085708510208207), - (True, 'futures', 'binance', 'isolated', 0.05, 11.7874422442244), - (False, 'futures', 'binance', 'isolated', 0.05, 8.17644781144781), - (True, 'futures', 'gate', 'isolated', 0.05, 11.7804274688304), - (False, 'futures', 'gate', 'isolated', 0.05, 8.181423084697796), - (True, 'futures', 'okx', 'isolated', 0.0, 11.87413417771621), - (False, 'futures', 'okx', 'isolated', 0.0, 8.085708510208207), - (True, 'futures', 'bybit', 'isolated', 0.0, 11.9), - (False, 'futures', 'bybit', 'isolated', 0.0, 8.1), -]) -def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, - limit_order_open, is_short, trading_mode, - exchange_name, margin_mode, liq_buffer, liq_price) -> None: +@pytest.mark.parametrize( + "is_short,trading_mode,exchange_name,margin_mode,liq_buffer,liq_price", + [ + (False, "spot", "binance", None, 0.0, None), + (True, "spot", "binance", None, 0.0, None), + (False, "spot", "gate", None, 0.0, None), + (True, "spot", "gate", None, 0.0, None), + (False, "spot", "okx", None, 0.0, None), + (True, "spot", "okx", None, 0.0, None), + (True, "futures", "binance", "isolated", 0.0, 11.88151815181518), + (False, "futures", "binance", "isolated", 0.0, 8.080471380471382), + (True, "futures", "gate", "isolated", 0.0, 11.87413417771621), + (False, "futures", "gate", "isolated", 0.0, 8.085708510208207), + (True, "futures", "binance", "isolated", 0.05, 11.7874422442244), + (False, "futures", "binance", "isolated", 0.05, 8.17644781144781), + (True, "futures", "gate", "isolated", 0.05, 11.7804274688304), + (False, "futures", "gate", "isolated", 0.05, 8.181423084697796), + (True, "futures", "okx", "isolated", 0.0, 11.87413417771621), + (False, "futures", "okx", "isolated", 0.0, 8.085708510208207), + (True, "futures", "bybit", "isolated", 0.0, 11.9), + (False, "futures", "bybit", "isolated", 0.0, 8.1), + ], +) +def test_execute_entry( + mocker, + default_conf_usdt, + fee, + limit_order, + limit_order_open, + is_short, + trading_mode, + exchange_name, + margin_mode, + liq_buffer, + liq_price, +) -> None: """ exchange_name = binance, is_short = true leverage = 5 @@ -810,13 +905,13 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, # TODO: Split this test into multiple tests to improve readability open_order = limit_order_open[entry_side(is_short)] order = limit_order[entry_side(is_short)] - default_conf_usdt['trading_mode'] = trading_mode - default_conf_usdt['liquidation_buffer'] = liq_buffer - leverage = 1.0 if trading_mode == 'spot' else 5.0 - default_conf_usdt['exchange']['name'] = exchange_name + default_conf_usdt["trading_mode"] = trading_mode + default_conf_usdt["liquidation_buffer"] = liq_buffer + leverage = 1.0 if trading_mode == "spot" else 5.0 + default_conf_usdt["exchange"]["name"] = exchange_name if margin_mode: - default_conf_usdt['margin_mode'] = margin_mode - mocker.patch('freqtrade.exchange.gate.Gate.validate_ordertypes') + default_conf_usdt["margin_mode"] = margin_mode + mocker.patch("freqtrade.exchange.gate.Gate.validate_ordertypes") patch_RPCManager(mocker) patch_exchange(mocker, id=exchange_name) freqtrade = FreqtradeBot(default_conf_usdt) @@ -829,11 +924,7 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, mocker.patch.multiple( EXMS, get_rate=enter_rate_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=enter_mm, get_min_pair_stake_amount=MagicMock(return_value=1), get_max_pair_stake_amount=MagicMock(return_value=500000), @@ -844,10 +935,10 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, get_max_leverage=MagicMock(return_value=10), ) mocker.patch.multiple( - 'freqtrade.exchange.okx.Okx', + "freqtrade.exchange.okx.Okx", get_max_pair_stake_amount=MagicMock(return_value=500000), ) - pair = 'ETH/USDT' + pair = "ETH/USDT" assert not freqtrade.execute_entry(pair, stake_amount, is_short=is_short) assert enter_rate_mock.call_count == 1 @@ -855,15 +946,15 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert freqtrade.strategy.confirm_trade_entry.call_count == 1 enter_rate_mock.reset_mock() - open_order['id'] = '22' + open_order["id"] = "22" freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True) assert freqtrade.execute_entry(pair, stake_amount) assert enter_rate_mock.call_count == 2 assert enter_mm.call_count == 1 call_args = enter_mm.call_args_list[0][1] - assert call_args['pair'] == pair - assert call_args['rate'] == bid - assert pytest.approx(call_args['amount']) == round(stake_amount / bid * leverage, 8) + assert call_args["pair"] == pair + assert call_args["rate"] == bid + assert pytest.approx(call_args["amount"]) == round(stake_amount / bid * leverage, 8) enter_rate_mock.reset_mock() # Should create an open trade with an open order id @@ -873,10 +964,10 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert trade assert trade.is_open is True assert trade.has_open_orders - assert '22' in trade.open_orders_ids + assert "22" in trade.open_orders_ids # Test calling with price - open_order['id'] = '33' + open_order["id"] = "33" fix_price = 0.06 assert freqtrade.execute_entry(pair, stake_amount, fix_price, is_short=is_short) # Make sure get_rate wasn't called again @@ -884,46 +975,46 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert enter_mm.call_count == 2 call_args = enter_mm.call_args_list[1][1] - assert call_args['pair'] == pair - assert call_args['rate'] == fix_price - assert pytest.approx(call_args['amount']) == round(stake_amount / fix_price * leverage, 8) + assert call_args["pair"] == pair + assert call_args["rate"] == fix_price + assert pytest.approx(call_args["amount"]) == round(stake_amount / fix_price * leverage, 8) # In case of closed order - order['status'] = 'closed' - order['average'] = 10 - order['cost'] = 300 - order['id'] = '444' + order["status"] = "closed" + order["average"] = 10 + order["cost"] = 300 + order["id"] = "444" - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=order)) assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) trade = Trade.session.scalars(select(Trade)).all()[2] trade.is_short = is_short assert trade assert not trade.has_open_orders assert trade.open_rate == 10 - assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8) + assert trade.stake_amount == round(order["average"] * order["filled"] / leverage, 8) assert pytest.approx(trade.liquidation_price) == liq_price # In case of rejected or expired order and partially filled - order['status'] = 'expired' - order['amount'] = 30.0 - order['filled'] = 20.0 - order['remaining'] = 10.00 - order['average'] = 0.5 - order['cost'] = 10.0 - order['id'] = '555' - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order)) + order["status"] = "expired" + order["amount"] = 30.0 + order["filled"] = 20.0 + order["remaining"] = 10.00 + order["average"] = 0.5 + order["cost"] = 10.0 + order["id"] = "555" + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=order)) assert freqtrade.execute_entry(pair, stake_amount) trade = Trade.session.scalars(select(Trade)).all()[3] trade.is_short = is_short assert trade assert not trade.has_open_orders assert trade.open_rate == 0.5 - assert trade.stake_amount == round(order['average'] * order['filled'] / leverage, 8) + assert trade.stake_amount == round(order["average"] * order["filled"] / leverage, 8) # Test with custom stake - order['status'] = 'open' - order['id'] = '556' + order["status"] = "open" + order["id"] = "556" freqtrade.strategy.custom_stake_amount = lambda **kwargs: 150.0 assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) @@ -933,7 +1024,7 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert pytest.approx(trade.stake_amount) == 150 # Exception case - order['id'] = '557' + order["id"] = "557" freqtrade.strategy.custom_stake_amount = lambda **kwargs: 20 / 0 assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) trade = Trade.session.scalars(select(Trade)).all()[5] @@ -942,27 +1033,27 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert pytest.approx(trade.stake_amount) == 2.0 # In case of the order is rejected and not filled at all - order['status'] = 'rejected' - order['amount'] = 30.0 * leverage - order['filled'] = 0.0 - order['remaining'] = 30.0 - order['average'] = 0.5 - order['cost'] = 0.0 - order['id'] = '66' - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=order)) + order["status"] = "rejected" + order["amount"] = 30.0 * leverage + order["filled"] = 0.0 + order["remaining"] = 30.0 + order["average"] = 0.5 + order["cost"] = 0.0 + order["id"] = "66" + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=order)) assert not freqtrade.execute_entry(pair, stake_amount) - assert freqtrade.strategy.leverage.call_count == 0 if trading_mode == 'spot' else 2 + assert freqtrade.strategy.leverage.call_count == 0 if trading_mode == "spot" else 2 # Fail to get price... - mocker.patch(f'{EXMS}.get_rate', MagicMock(return_value=0.0)) + mocker.patch(f"{EXMS}.get_rate", MagicMock(return_value=0.0)) with pytest.raises(PricingError, match="Could not determine entry price."): freqtrade.execute_entry(pair, stake_amount, is_short=is_short) # In case of custom entry price - mocker.patch(f'{EXMS}.get_rate', return_value=0.50) - order['status'] = 'open' - order['id'] = '5566' + mocker.patch(f"{EXMS}.get_rate", return_value=0.50) + order["status"] = "open" + order["id"] = "5566" freqtrade.strategy.custom_entry_price = lambda **kwargs: 0.508 assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) trade = Trade.session.scalars(select(Trade)).all()[6] @@ -972,8 +1063,8 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, # In case of custom entry price set to None - order['status'] = 'open' - order['id'] = '5567' + order["status"] = "open" + order["id"] = "5567" freqtrade.strategy.custom_entry_price = lambda **kwargs: None mocker.patch.multiple( @@ -988,8 +1079,8 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, assert trade.open_rate_requested == 10 # In case of custom entry price not float type - order['status'] = 'open' - order['id'] = '5568' + order["status"] = "open" + order["id"] = "5568" freqtrade.strategy.custom_entry_price = lambda **kwargs: "string price" assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) trade = Trade.session.scalars(select(Trade)).all()[8] @@ -1003,8 +1094,8 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, # In case of too high stake amount - order['status'] = 'open' - order['id'] = '55672' + order["status"] = "open" + order["id"] = "55672" mocker.patch.multiple( EXMS, @@ -1017,13 +1108,13 @@ def test_execute_entry(mocker, default_conf_usdt, fee, limit_order, trade.is_short = is_short assert pytest.approx(trade.stake_amount) == 500 - order['id'] = '55673' + order["id"] = "55673" freqtrade.strategy.leverage.reset_mock() assert freqtrade.execute_entry(pair, 200, leverage_=3) assert freqtrade.strategy.leverage.call_count == 0 trade = Trade.session.scalars(select(Trade)).all()[10] - assert trade.leverage == 1 if trading_mode == 'spot' else 3 + assert trade.leverage == 1 if trading_mode == "spot" else 3 @pytest.mark.parametrize("is_short", [False, True]) @@ -1031,27 +1122,23 @@ def test_execute_entry_confirm_error(mocker, default_conf_usdt, fee, limit_order freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=MagicMock(return_value=limit_order[entry_side(is_short)]), get_rate=MagicMock(return_value=0.11), get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, ) stake_amount = 2 - pair = 'ETH/USDT' + pair = "ETH/USDT" freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=ValueError) assert freqtrade.execute_entry(pair, stake_amount) - limit_order[entry_side(is_short)]['id'] = '222' + limit_order[entry_side(is_short)]["id"] = "222" freqtrade.strategy.confirm_trade_entry = MagicMock(side_effect=Exception) assert freqtrade.execute_entry(pair, stake_amount) - limit_order[entry_side(is_short)]['id'] = '2223' + limit_order[entry_side(is_short)]["id"] = "2223" freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True) assert freqtrade.execute_entry(pair, stake_amount) @@ -1059,18 +1146,44 @@ def test_execute_entry_confirm_error(mocker, default_conf_usdt, fee, limit_order assert not freqtrade.execute_entry(pair, stake_amount) +@pytest.mark.parametrize("is_short", [False, True]) +def test_execute_entry_fully_canceled_on_create( + mocker, default_conf_usdt, fee, limit_order_open, is_short +) -> None: + freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) + + mock_hce = mocker.spy(freqtrade, "handle_cancel_enter") + order = limit_order_open[entry_side(is_short)] + pair = "ETH/USDT" + order["symbol"] = pair + order["status"] = "canceled" + order["filled"] = 0.0 + + mocker.patch.multiple( + EXMS, + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock(return_value=order), + get_rate=MagicMock(return_value=0.11), + get_min_pair_stake_amount=MagicMock(return_value=1), + get_fee=fee, + ) + stake_amount = 2 + + assert freqtrade.execute_entry(pair, stake_amount) + assert mock_hce.call_count == 1 + # an order that immediately cancels completely should delete the order. + trades = Trade.get_trades().all() + assert len(trades) == 0 + + @pytest.mark.parametrize("is_short", [False, True]) def test_execute_entry_min_leverage(mocker, default_conf_usdt, fee, limit_order, is_short) -> None: - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=MagicMock(return_value=limit_order[entry_side(is_short)]), get_rate=MagicMock(return_value=0.11), # Minimum stake-amount is ~5$ @@ -1080,7 +1193,7 @@ def test_execute_entry_min_leverage(mocker, default_conf_usdt, fee, limit_order, get_max_leverage=MagicMock(return_value=5.0), ) stake_amount = 2 - pair = 'SOL/BUSD:BUSD' + pair = "SOL/BUSD:BUSD" freqtrade.strategy.leverage = MagicMock(return_value=5.0) assert freqtrade.execute_entry(pair, stake_amount, is_short=is_short) @@ -1089,27 +1202,28 @@ def test_execute_entry_min_leverage(mocker, default_conf_usdt, fee, limit_order, # assert trade.stake_amount == 2 -@pytest.mark.parametrize('return_value,side_effect,log_message', [ - (False, None, 'Found no enter signals for whitelisted currencies. Trying again...'), - (None, DependencyException, 'Unable to create trade for ETH/USDT: ') -]) -def test_enter_positions(mocker, default_conf_usdt, return_value, side_effect, - log_message, caplog) -> None: +@pytest.mark.parametrize( + "return_value,side_effect,log_message", + [ + (False, None, "Found no enter signals for whitelisted currencies. Trying again..."), + (None, DependencyException, "Unable to create trade for ETH/USDT: "), + ], +) +def test_enter_positions( + mocker, default_conf_usdt, return_value, side_effect, log_message, caplog +) -> None: caplog.set_level(logging.DEBUG) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mock_ct = mocker.patch( - 'freqtrade.freqtradebot.FreqtradeBot.create_trade', - MagicMock( - return_value=return_value, - side_effect=side_effect - ) + "freqtrade.freqtradebot.FreqtradeBot.create_trade", + MagicMock(return_value=return_value, side_effect=side_effect), ) n = freqtrade.enter_positions() assert n == 0 assert log_has(log_message, caplog) # create_trade should be called once for every pair in the whitelist. - assert mock_ct.call_count == len(default_conf_usdt['exchange']['pair_whitelist']) + assert mock_ct.call_count == len(default_conf_usdt["exchange"]["pair_whitelist"]) @pytest.mark.usefixtures("init_persistence") @@ -1117,32 +1231,33 @@ def test_enter_positions(mocker, default_conf_usdt, return_value, side_effect, def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True)) - mocker.patch(f'{EXMS}.fetch_order', return_value=limit_order[entry_side(is_short)]) - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_trade", MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.fetch_order", return_value=limit_order[entry_side(is_short)]) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) - order_id = '123' + order_id = "123" trade = Trade( - pair='ETH/USDT', - fee_open=0.001, - fee_close=0.001, - open_rate=0.01, - open_date=dt_now(), - stake_amount=0.01, - amount=11, - exchange="binance", - is_short=is_short, - leverage=1, - ) - trade.orders.append(Order( - ft_order_side=entry_side(is_short), - price=0.01, - ft_pair=trade.pair, - ft_amount=trade.amount, - ft_price=trade.open_rate, - order_id=order_id, - - )) + pair="ETH/USDT", + fee_open=0.001, + fee_close=0.001, + open_rate=0.01, + open_date=dt_now(), + stake_amount=0.01, + amount=11, + exchange="binance", + is_short=is_short, + leverage=1, + ) + trade.orders.append( + Order( + ft_order_side=entry_side(is_short), + price=0.01, + ft_pair=trade.pair, + ft_amount=trade.amount, + ft_price=trade.open_rate, + order_id=order_id, + ) + ) Trade.session.add(trade) Trade.commit() trades = [trade] @@ -1150,9 +1265,9 @@ def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog n = freqtrade.exit_positions(trades) assert n == 0 # Test amount not modified by fee-logic - assert not log_has_re(r'Applying fee to amount for Trade .*', caplog) + assert not log_has_re(r"Applying fee to amount for Trade .*", caplog) - gra = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0) + gra = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", return_value=0.0) # test amount modified by fee-logic n = freqtrade.exit_positions(trades) assert n == 0 @@ -1164,11 +1279,11 @@ def test_exit_positions(mocker, default_conf_usdt, limit_order, is_short, caplog def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog, is_short) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) order = limit_order[entry_side(is_short)] - mocker.patch(f'{EXMS}.fetch_order', return_value=order) + mocker.patch(f"{EXMS}.fetch_order", return_value=order) - order_id = '123' + order_id = "123" trade = Trade( - pair='ETH/USDT', + pair="ETH/USDT", fee_open=0.001, fee_close=0.001, open_rate=0.01, @@ -1179,16 +1294,17 @@ def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog is_short=is_short, leverage=1, ) - trade.orders.append(Order( - ft_order_side=entry_side(is_short), - price=0.01, - ft_pair=trade.pair, - ft_amount=trade.amount, - ft_price=trade.open_rate, - order_id=order_id, - ft_is_open=False, - - )) + trade.orders.append( + Order( + ft_order_side=entry_side(is_short), + price=0.01, + ft_pair=trade.pair, + ft_amount=trade.amount, + ft_price=trade.open_rate, + order_id=order_id, + ft_is_open=False, + ) + ) Trade.session.add(trade) Trade.commit() freqtrade.wallets.update() @@ -1196,13 +1312,12 @@ def test_exit_positions_exception(mocker, default_conf_usdt, limit_order, caplog # Test raise of DependencyException exception mocker.patch( - 'freqtrade.freqtradebot.FreqtradeBot.handle_trade', - side_effect=DependencyException() + "freqtrade.freqtradebot.FreqtradeBot.handle_trade", side_effect=DependencyException() ) caplog.clear() n = freqtrade.exit_positions(trades) assert n == 0 - assert log_has('Unable to exit trade ETH/USDT: ', caplog) + assert log_has("Unable to exit trade ETH/USDT: ", caplog) @pytest.mark.parametrize("is_short", [False, True]) @@ -1210,12 +1325,12 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) order = limit_order[entry_side(is_short)] - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True)) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter') - mocker.patch(f'{EXMS}.fetch_order', return_value=order) - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.0) - order_id = order['id'] + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_trade", MagicMock(return_value=True)) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot._notify_enter") + mocker.patch(f"{EXMS}.fetch_order", return_value=order) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", return_value=0.0) + order_id = order["id"] trade = Trade( fee_open=0.001, @@ -1227,26 +1342,27 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca is_short=is_short, leverage=1, ) - trade.orders.append(Order( - ft_order_side=entry_side(is_short), - price=0.01, - order_id=order_id, - - )) + trade.orders.append( + Order( + ft_order_side=entry_side(is_short), + price=0.01, + order_id=order_id, + ) + ) freqtrade.strategy.order_filled = MagicMock(return_value=None) assert not freqtrade.update_trade_state(trade, None) - assert log_has_re(r'Orderid for trade .* is empty.', caplog) + assert log_has_re(r"Orderid for trade .* is empty.", caplog) caplog.clear() # Add datetime explicitly since sqlalchemy defaults apply only once written to database freqtrade.update_trade_state(trade, order_id) # Test amount not modified by fee-logic - assert not log_has_re(r'Applying fee to .*', caplog) + assert not log_has_re(r"Applying fee to .*", caplog) caplog.clear() assert not trade.has_open_orders - assert trade.amount == order['amount'] + assert trade.amount == order["amount"] assert freqtrade.strategy.order_filled.call_count == 1 - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=0.01) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", return_value=0.01) assert trade.amount == 30.0 # test amount modified by fee-logic freqtrade.update_trade_state(trade, order_id) @@ -1257,14 +1373,14 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca # Assert we call handle_trade() if trade is feasible for execution freqtrade.update_trade_state(trade, order_id) - assert log_has_re('Found open order for.*', caplog) + assert log_has_re("Found open order for.*", caplog) limit_buy_order_usdt_new = deepcopy(limit_order) - limit_buy_order_usdt_new['filled'] = 0.0 - limit_buy_order_usdt_new['status'] = 'canceled' + limit_buy_order_usdt_new["filled"] = 0.0 + limit_buy_order_usdt_new["status"] = "canceled" freqtrade.strategy.order_filled = MagicMock(return_value=None) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', side_effect=ValueError) - mocker.patch(f'{EXMS}.fetch_order', return_value=limit_buy_order_usdt_new) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", side_effect=ValueError) + mocker.patch(f"{EXMS}.fetch_order", return_value=limit_buy_order_usdt_new) res = freqtrade.update_trade_state(trade, order_id) # Cancelled empty assert res is True @@ -1272,30 +1388,34 @@ def test_update_trade_state(mocker, default_conf_usdt, limit_order, is_short, ca @pytest.mark.parametrize("is_short", [False, True]) -@pytest.mark.parametrize('initial_amount,has_rounding_fee', [ - (30.0 + 1e-14, True), - (8.0, False) -]) +@pytest.mark.parametrize("initial_amount,has_rounding_fee", [(30.0 + 1e-14, True), (8.0, False)]) def test_update_trade_state_withorderdict( - default_conf_usdt, trades_for_order, limit_order, fee, mocker, initial_amount, - has_rounding_fee, is_short, caplog + default_conf_usdt, + trades_for_order, + limit_order, + fee, + mocker, + initial_amount, + has_rounding_fee, + is_short, + caplog, ): order = limit_order[entry_side(is_short)] - trades_for_order[0]['amount'] = initial_amount + trades_for_order[0]["amount"] = initial_amount order_id = "oid_123456" - order['id'] = order_id - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter') + order["id"] = order_id + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot._notify_enter") # fetch_order should not be called!! - mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(side_effect=ValueError)) patch_exchange(mocker) - amount = sum(x['amount'] for x in trades_for_order) + amount = sum(x["amount"] for x in trades_for_order) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) caplog.clear() trade = Trade( - pair='LTC/USDT', + pair="LTC/USDT", amount=amount, - exchange='binance', + exchange="binance", open_rate=2.0, open_date=dt_now(), fee_open=fee.return_value, @@ -1312,52 +1432,52 @@ def test_update_trade_state_withorderdict( order_id=order_id, ) ) - log_text = r'Applying fee on amount for .*' + log_text = r"Applying fee on amount for .*" freqtrade.update_trade_state(trade, order_id, order) assert trade.amount != amount if has_rounding_fee: assert pytest.approx(trade.amount) == 29.992 assert log_has_re(log_text, caplog) else: - assert pytest.approx(trade.amount) == order['amount'] + assert pytest.approx(trade.amount) == order["amount"] assert not log_has_re(log_text, caplog) @pytest.mark.parametrize("is_short", [False, True]) -def test_update_trade_state_exception(mocker, default_conf_usdt, is_short, limit_order, - caplog) -> None: +def test_update_trade_state_exception( + mocker, default_conf_usdt, is_short, limit_order, caplog +) -> None: order = limit_order[entry_side(is_short)] freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch(f'{EXMS}.fetch_order', return_value=order) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter') + mocker.patch(f"{EXMS}.fetch_order", return_value=order) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot._notify_enter") # TODO: should not be magicmock trade = MagicMock() trade.amount = 123 - open_order_id = '123' + open_order_id = "123" # Test raise of OperationalException exception mocker.patch( - 'freqtrade.freqtradebot.FreqtradeBot.get_real_amount', - side_effect=DependencyException() + "freqtrade.freqtradebot.FreqtradeBot.get_real_amount", side_effect=DependencyException() ) freqtrade.update_trade_state(trade, open_order_id) - assert log_has('Could not update trade amount: ', caplog) + assert log_has("Could not update trade amount: ", caplog) def test_update_trade_state_orderexception(mocker, default_conf_usdt, caplog) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=InvalidOrderException)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(side_effect=InvalidOrderException)) # TODO: should not be magicmock trade = MagicMock() - open_order_id = '123' + open_order_id = "123" # Test raise of OperationalException exception grm_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", MagicMock()) freqtrade.update_trade_state(trade, open_order_id) assert grm_mock.call_count == 0 - assert log_has(f'Unable to fetch order {open_order_id}: ', caplog) + assert log_has(f"Unable to fetch order {open_order_id}: ", caplog) @pytest.mark.parametrize("is_short", [False, True]) @@ -1367,20 +1487,20 @@ def test_update_trade_state_sell( buy_order = limit_order[entry_side(is_short)] open_order = limit_order_open[exit_side(is_short)] l_order = limit_order[exit_side(is_short)] - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) # fetch_order should not be called!! - mocker.patch(f'{EXMS}.fetch_order', MagicMock(side_effect=ValueError)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(side_effect=ValueError)) wallet_mock = MagicMock() - mocker.patch('freqtrade.wallets.Wallets.update', wallet_mock) + mocker.patch("freqtrade.wallets.Wallets.update", wallet_mock) patch_exchange(mocker) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) amount = l_order["amount"] wallet_mock.reset_mock() trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=0.0025, fee_close=0.0025, @@ -1390,25 +1510,28 @@ def test_update_trade_state_sell( leverage=1, is_short=is_short, ) - order = Order.parse_from_ccxt_object(buy_order, 'LTC/ETH', entry_side(is_short)) + order = Order.parse_from_ccxt_object(buy_order, "LTC/ETH", entry_side(is_short)) trade.orders.append(order) - order = Order.parse_from_ccxt_object(open_order, 'LTC/ETH', exit_side(is_short)) + order = Order.parse_from_ccxt_object(open_order, "LTC/ETH", exit_side(is_short)) trade.orders.append(order) - assert order.status == 'open' + assert order.status == "open" freqtrade.update_trade_state(trade, trade.open_orders_ids[-1], l_order) - assert trade.amount == l_order['amount'] + assert trade.amount == l_order["amount"] # Wallet needs to be updated after closing a limit-sell order to re-enable buying assert wallet_mock.call_count == 1 assert not trade.is_open # Order is updated by update_trade_state - assert order.status == 'closed' + assert order.status == "closed" -@pytest.mark.parametrize('is_short,close_profit', [ - (False, 0.09451372), - (True, 0.08635224), -]) +@pytest.mark.parametrize( + "is_short,close_profit", + [ + (False, 0.09451372), + (True, 0.08635224), + ], +) def test_handle_trade( default_conf_usdt, limit_order_open, limit_order, fee, mocker, is_short, close_profit ) -> None: @@ -1419,15 +1542,13 @@ def test_handle_trade( patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.19, - 'ask': 2.2, - 'last': 2.19 - }), - create_order=MagicMock(side_effect=[ - enter_order, - open_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 2.19, "ask": 2.2, "last": 2.19}), + create_order=MagicMock( + side_effect=[ + enter_order, + open_order, + ] + ), get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -1443,14 +1564,19 @@ def test_handle_trade( assert trade.is_open is True freqtrade.wallets.update() - patch_get_signal(freqtrade, enter_long=False, exit_short=is_short, - exit_long=not is_short, exit_tag='sell_signal1') + patch_get_signal( + freqtrade, + enter_long=False, + exit_short=is_short, + exit_long=not is_short, + exit_tag="sell_signal1", + ) assert freqtrade.handle_trade(trade) is True - assert trade.open_orders_ids[-1] == exit_order['id'] + assert trade.open_orders_ids[-1] == exit_order["id"] # Simulate fulfilled LIMIT_SELL order for trade trade.orders[-1].ft_is_open = False - trade.orders[-1].status = 'closed' + trade.orders[-1].status = "closed" trade.orders[-1].filled = trade.orders[-1].remaining trade.orders[-1].remaining = 0.0 @@ -1460,7 +1586,7 @@ def test_handle_trade( assert pytest.approx(trade.close_profit) == close_profit assert pytest.approx(trade.calc_profit(trade.close_rate)) == 5.685 assert trade.close_date is not None - assert trade.exit_reason == 'sell_signal1' + assert trade.exit_reason == "sell_signal1" @pytest.mark.parametrize("is_short", [False, True]) @@ -1473,10 +1599,12 @@ def test_handle_overlapping_signals( mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - create_order=MagicMock(side_effect=[ - open_order, - {'id': 1234553382}, - ]), + create_order=MagicMock( + side_effect=[ + open_order, + {"id": 1234553382}, + ] + ), get_fee=fee, ) @@ -1540,9 +1668,9 @@ def test_handle_overlapping_signals( @pytest.mark.parametrize("is_short", [False, True]) -def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog, - is_short) -> None: - +def test_handle_trade_roi( + default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog, is_short +) -> None: open_order = limit_order_open[entry_side(is_short)] caplog.set_level(logging.DEBUG) @@ -1551,10 +1679,12 @@ def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - create_order=MagicMock(side_effect=[ - open_order, - {'id': 1234553382}, - ]), + create_order=MagicMock( + side_effect=[ + open_order, + {"id": 1234553382}, + ] + ), get_fee=fee, ) @@ -1576,15 +1706,13 @@ def test_handle_trade_roi(default_conf_usdt, ticker_usdt, limit_order_open, fee, caplog.clear() patch_get_signal(freqtrade) assert freqtrade.handle_trade(trade) - assert log_has("ETH/USDT - Required profit reached. exit_type=ExitType.ROI", - caplog) + assert log_has("ETH/USDT - Required profit reached. exit_type=ExitType.ROI", caplog) @pytest.mark.parametrize("is_short", [False, True]) def test_handle_trade_use_exit_signal( default_conf_usdt, ticker_usdt, limit_order_open, fee, mocker, caplog, is_short ) -> None: - enter_open_order = limit_order_open[exit_side(is_short)] exit_open_order = limit_order_open[entry_side(is_short)] @@ -1594,10 +1722,12 @@ def test_handle_trade_use_exit_signal( mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - create_order=MagicMock(side_effect=[ - enter_open_order, - exit_open_order, - ]), + create_order=MagicMock( + side_effect=[ + enter_open_order, + exit_open_order, + ] + ), get_fee=fee, ) @@ -1618,8 +1748,7 @@ def test_handle_trade_use_exit_signal( else: patch_get_signal(freqtrade, enter_long=False, exit_long=True) assert freqtrade.handle_trade(trade) - assert log_has("ETH/USDT - Sell signal received. exit_type=ExitType.EXIT_SIGNAL", - caplog) + assert log_has("ETH/USDT - Sell signal received. exit_type=ExitType.EXIT_SIGNAL", caplog) @pytest.mark.parametrize("is_short", [False, True]) @@ -1647,44 +1776,49 @@ def test_close_trade( trade.is_short = is_short assert trade - oobj = Order.parse_from_ccxt_object(enter_order, enter_order['symbol'], trade.entry_side) + oobj = Order.parse_from_ccxt_object(enter_order, enter_order["symbol"], trade.entry_side) trade.update_trade(oobj) - oobj = Order.parse_from_ccxt_object(exit_order, exit_order['symbol'], trade.exit_side) + oobj = Order.parse_from_ccxt_object(exit_order, exit_order["symbol"], trade.exit_side) trade.update_trade(oobj) assert trade.is_open is False - with pytest.raises(DependencyException, match=r'.*closed trade.*'): + with pytest.raises(DependencyException, match=r".*closed trade.*"): freqtrade.handle_trade(trade) def test_bot_loop_start_called_once(mocker, default_conf_usdt, caplog): ftbot = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.create_trade') + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.create_trade") patch_get_signal(ftbot) ftbot.strategy.bot_loop_start = MagicMock(side_effect=ValueError) ftbot.strategy.analyze = MagicMock() ftbot.process() - assert log_has_re(r'Strategy caused the following exception.*', caplog) + assert log_has_re(r"Strategy caused the following exception.*", caplog) assert ftbot.strategy.bot_loop_start.call_count == 1 assert ftbot.strategy.analyze.call_count == 1 @pytest.mark.parametrize("is_short", [False, True]) def test_manage_open_orders_entry_usercustom( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + is_short, ) -> None: - old_order = limit_sell_order_old if is_short else limit_buy_order_old - old_order['id'] = open_trade.open_orders_ids[0] + old_order["id"] = open_trade.open_orders_ids[0] default_conf_usdt["unfilledtimeout"] = {"entry": 1400, "exit": 30} rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock(return_value=old_order) cancel_enter_order = deepcopy(old_order) - cancel_enter_order['status'] = 'canceled' + cancel_enter_order["status"] = "canceled" cancel_order_wr_mock = MagicMock(return_value=cancel_enter_order) patch_exchange(mocker) @@ -1694,12 +1828,12 @@ def test_manage_open_orders_entry_usercustom( fetch_order=MagicMock(return_value=old_order), cancel_order=cancel_order_mock, cancel_order_with_result=cancel_order_wr_mock, - get_fee=fee + get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) open_trade.is_short = is_short - open_trade.orders[0].side = 'sell' if is_short else 'buy' - open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy' + open_trade.orders[0].side = "sell" if is_short else "buy" + open_trade.orders[0].ft_order_side = "sell" if is_short else "buy" Trade.session.add(open_trade) Trade.commit() @@ -1716,7 +1850,7 @@ def test_manage_open_orders_entry_usercustom( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() nb_trades = len(trades) assert nb_trades == 1 assert freqtrade.strategy.check_entry_timeout.call_count == 1 @@ -1729,7 +1863,7 @@ def test_manage_open_orders_entry_usercustom( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() nb_trades = len(trades) assert nb_trades == 1 assert freqtrade.strategy.check_entry_timeout.call_count == 1 @@ -1744,7 +1878,7 @@ def test_manage_open_orders_entry_usercustom( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() nb_trades = len(trades) assert nb_trades == 0 assert freqtrade.strategy.check_entry_timeout.call_count == 1 @@ -1752,16 +1886,22 @@ def test_manage_open_orders_entry_usercustom( @pytest.mark.parametrize("is_short", [False, True]) def test_manage_open_orders_entry( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + is_short, ) -> None: old_order = limit_sell_order_old if is_short else limit_buy_order_old rpc_mock = patch_RPCManager(mocker) - order = Order.parse_from_ccxt_object(old_order, 'mocked', 'buy') + order = Order.parse_from_ccxt_object(old_order, "mocked", "buy") open_trade.orders[0] = order limit_entry_cancel = deepcopy(old_order) - limit_entry_cancel['status'] = 'canceled' + limit_entry_cancel["status"] = "canceled" cancel_order_mock = MagicMock(return_value=limit_entry_cancel) patch_exchange(mocker) mocker.patch.multiple( @@ -1769,7 +1909,7 @@ def test_manage_open_orders_entry( fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=old_order), cancel_order_with_result=cancel_order_mock, - get_fee=fee + get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -1788,7 +1928,7 @@ def test_manage_open_orders_entry( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() nb_trades = len(trades) assert nb_trades == 0 # Custom user entry-timeout is never called @@ -1799,21 +1939,28 @@ def test_manage_open_orders_entry( @pytest.mark.parametrize("is_short", [False, True]) def test_adjust_entry_cancel( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + caplog, + is_short, ) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) old_order = limit_sell_order_old if is_short else limit_buy_order_old - old_order['id'] = open_trade.open_orders[0].order_id + old_order["id"] = open_trade.open_orders[0].order_id limit_entry_cancel = deepcopy(old_order) - limit_entry_cancel['status'] = 'canceled' + limit_entry_cancel["status"] = "canceled" cancel_order_mock = MagicMock(return_value=limit_entry_cancel) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=old_order), cancel_order_with_result=cancel_order_mock, - get_fee=fee + get_fee=fee, ) open_trade.is_short = is_short @@ -1826,17 +1973,12 @@ def test_adjust_entry_cancel( # check that order is cancelled freqtrade.strategy.adjust_entry_price = MagicMock(return_value=None) freqtrade.manage_open_orders() - trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_trade_id == Trade.id) - ).all() + trades = Trade.session.scalars(select(Trade).where(Order.ft_trade_id == Trade.id)).all() assert len(trades) == 0 assert len(Order.session.scalars(select(Order)).all()) == 0 - assert log_has_re( - f"{'Sell' if is_short else 'Buy'} order user requested order cancel*", caplog) - assert log_has_re( - f"{'Sell' if is_short else 'Buy'} order fully cancelled.*", caplog) + assert log_has_re(f"{'Sell' if is_short else 'Buy'} order user requested order cancel*", caplog) + assert log_has_re(f"{'Sell' if is_short else 'Buy'} order fully cancelled.*", caplog) # Entry adjustment is called assert freqtrade.strategy.adjust_entry_price.call_count == 1 @@ -1844,14 +1986,21 @@ def test_adjust_entry_cancel( @pytest.mark.parametrize("is_short", [False, True]) def test_adjust_entry_replace_fail( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + caplog, + is_short, ) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) old_order = limit_sell_order_old if is_short else limit_buy_order_old - old_order['id'] = open_trade.open_orders[0].order_id + old_order["id"] = open_trade.open_orders[0].order_id limit_entry_cancel = deepcopy(old_order) - limit_entry_cancel['status'] = 'open' + limit_entry_cancel["status"] = "open" cancel_order_mock = MagicMock(return_value=limit_entry_cancel) fetch_order_mock = MagicMock(return_value=old_order) mocker.patch.multiple( @@ -1859,9 +2008,9 @@ def test_adjust_entry_replace_fail( fetch_ticker=ticker_usdt, fetch_order=fetch_order_mock, cancel_order_with_result=cancel_order_mock, - get_fee=fee + get_fee=fee, ) - mocker.patch('freqtrade.freqtradebot.sleep') + mocker.patch("freqtrade.freqtradebot.sleep") open_trade.is_short = is_short Trade.session.add(open_trade) @@ -1873,16 +2022,12 @@ def test_adjust_entry_replace_fail( # Attempt replace order - which fails freqtrade.strategy.adjust_entry_price = MagicMock(return_value=12234) freqtrade.manage_open_orders() - trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_trade_id == Trade.id) - ).all() + trades = Trade.session.scalars(select(Trade).where(Order.ft_trade_id == Trade.id)).all() assert len(trades) == 0 assert len(Order.session.scalars(select(Order)).all()) == 0 assert fetch_order_mock.call_count == 4 - assert log_has_re( - r"Could not cancel order.*, therefore not replacing\.", caplog) + assert log_has_re(r"Could not cancel order.*, therefore not replacing\.", caplog) # Entry adjustment is called assert freqtrade.strategy.adjust_entry_price.call_count == 1 @@ -1890,14 +2035,21 @@ def test_adjust_entry_replace_fail( @pytest.mark.parametrize("is_short", [False, True]) def test_adjust_entry_replace_fail_create_order( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + caplog, + is_short, ) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) old_order = limit_sell_order_old if is_short else limit_buy_order_old - old_order['id'] = open_trade.open_orders[0].order_id + old_order["id"] = open_trade.open_orders[0].order_id limit_entry_cancel = deepcopy(old_order) - limit_entry_cancel['status'] = 'canceled' + limit_entry_cancel["status"] = "canceled" cancel_order_mock = MagicMock(return_value=limit_entry_cancel) fetch_order_mock = MagicMock(return_value=old_order) mocker.patch.multiple( @@ -1905,11 +2057,12 @@ def test_adjust_entry_replace_fail_create_order( fetch_ticker=ticker_usdt, fetch_order=fetch_order_mock, cancel_order_with_result=cancel_order_mock, - get_fee=fee + get_fee=fee, + ) + mocker.patch("freqtrade.freqtradebot.sleep") + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.execute_entry", side_effect=DependencyException() ) - mocker.patch('freqtrade.freqtradebot.sleep') - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_entry', - side_effect=DependencyException()) open_trade.is_short = is_short Trade.session.add(open_trade) @@ -1921,28 +2074,31 @@ def test_adjust_entry_replace_fail_create_order( # Attempt replace order - which fails freqtrade.strategy.adjust_entry_price = MagicMock(return_value=12234) freqtrade.manage_open_orders() - trades = Trade.session.scalars( - select(Trade) - .where(Trade.is_open.is_(True)) - ).all() + trades = Trade.session.scalars(select(Trade).where(Trade.is_open.is_(True))).all() assert len(trades) == 0 assert len(Order.session.scalars(select(Order)).all()) == 0 assert fetch_order_mock.call_count == 1 - assert log_has_re( - r"Could not replace order for.*", caplog) + assert log_has_re(r"Could not replace order for.*", caplog) @pytest.mark.parametrize("is_short", [False, True]) def test_adjust_entry_maintain_replace( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + caplog, + is_short, ) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) old_order = limit_sell_order_old if is_short else limit_buy_order_old - old_order['id'] = open_trade.open_orders_ids[0] + old_order["id"] = open_trade.open_orders_ids[0] limit_entry_cancel = deepcopy(old_order) - limit_entry_cancel['status'] = 'canceled' + limit_entry_cancel["status"] = "canceled" cancel_order_mock = MagicMock(return_value=limit_entry_cancel) mocker.patch.multiple( EXMS, @@ -1961,13 +2117,11 @@ def test_adjust_entry_maintain_replace( freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False) # Check that order is maintained - freqtrade.strategy.adjust_entry_price = MagicMock(return_value=old_order['price']) + freqtrade.strategy.adjust_entry_price = MagicMock(return_value=old_order["price"]) freqtrade.manage_open_orders() trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_is_open.is_(True)) - .where(Order.ft_trade_id == Trade.id) - ).all() + select(Trade).where(Order.ft_is_open.is_(True)).where(Order.ft_trade_id == Trade.id) + ).all() assert len(trades) == 1 assert len(Order.get_open_orders()) == 1 # Entry adjustment is called @@ -1982,41 +2136,45 @@ def test_adjust_entry_maintain_replace( assert freqtrade.strategy.adjust_entry_price.call_count == 1 trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_is_open.is_(True)) - .where(Order.ft_trade_id == Trade.id) - ).all() + select(Trade).where(Order.ft_is_open.is_(True)).where(Order.ft_trade_id == Trade.id) + ).all() assert len(trades) == 1 nb_all_orders = len(Order.session.scalars(select(Order)).all()) assert nb_all_orders == 2 # New order seems to be in closed status? # nb_open_orders = len(Order.get_open_orders()) # assert nb_open_orders == 1 - assert log_has_re( - f"{'Sell' if is_short else 'Buy'} order cancelled to be replaced*", caplog) + assert log_has_re(f"{'Sell' if is_short else 'Buy'} order cancelled to be replaced*", caplog) # Entry adjustment is called assert freqtrade.strategy.adjust_entry_price.call_count == 1 @pytest.mark.parametrize("is_short", [False, True]) def test_check_handle_cancelled_buy( - default_conf_usdt, ticker_usdt, limit_buy_order_old, open_trade, - limit_sell_order_old, fee, mocker, caplog, is_short + default_conf_usdt, + ticker_usdt, + limit_buy_order_old, + open_trade, + limit_sell_order_old, + fee, + mocker, + caplog, + is_short, ) -> None: - """ Handle Buy order cancelled on exchange""" + """Handle Buy order cancelled on exchange""" old_order = limit_sell_order_old if is_short else limit_buy_order_old rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock() patch_exchange(mocker) - old_order.update({"status": "canceled", 'filled': 0.0}) - old_order['side'] = 'buy' if is_short else 'sell' - old_order['id'] = open_trade.open_orders[0].order_id + old_order.update({"status": "canceled", "filled": 0.0}) + old_order["side"] = "buy" if is_short else "sell" + old_order["id"] = open_trade.open_orders[0].order_id mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=old_order), cancel_order=cancel_order_mock, - get_fee=fee + get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) open_trade.is_short = is_short @@ -2028,12 +2186,10 @@ def test_check_handle_cancelled_buy( assert cancel_order_mock.call_count == 0 assert rpc_mock.call_count == 2 trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_is_open.is_(True)) - .where(Order.ft_trade_id == Trade.id) - ).all() + select(Trade).where(Order.ft_is_open.is_(True)).where(Order.ft_trade_id == Trade.id) + ).all() assert len(trades) == 0 - exit_name = 'Buy' if is_short else 'Sell' + exit_name = "Buy" if is_short else "Sell" assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog) @@ -2050,7 +2206,7 @@ def test_manage_open_orders_buy_exception( fetch_ticker=ticker_usdt, fetch_order=MagicMock(side_effect=ExchangeError), cancel_order=cancel_order_mock, - get_fee=fee + get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -2067,30 +2223,30 @@ def test_manage_open_orders_buy_exception( @pytest.mark.parametrize("is_short", [False, True]) def test_manage_open_orders_exit_usercustom( - default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker, - is_short, open_trade_usdt, caplog + default_conf_usdt, ticker_usdt, limit_sell_order_old, mocker, is_short, open_trade_usdt, caplog ) -> None: default_conf_usdt["unfilledtimeout"] = {"entry": 1440, "exit": 1440, "exit_timeout_count": 1} - limit_sell_order_old['amount'] = open_trade_usdt.amount - limit_sell_order_old['remaining'] = open_trade_usdt.amount + limit_sell_order_old["amount"] = open_trade_usdt.amount + limit_sell_order_old["remaining"] = open_trade_usdt.amount if is_short: - limit_sell_order_old['side'] = 'buy' + limit_sell_order_old["side"] = "buy" open_trade_usdt.is_short = is_short - open_exit_order = Order.parse_from_ccxt_object(limit_sell_order_old, 'mocked', - 'buy' if is_short else 'sell') + open_exit_order = Order.parse_from_ccxt_object( + limit_sell_order_old, "mocked", "buy" if is_short else "sell" + ) open_trade_usdt.orders[-1] = open_exit_order rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock() patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0) - et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit') + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.0) + et_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit") mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=limit_sell_order_old), - cancel_order=cancel_order_mock + cancel_order=cancel_order_mock, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -2134,23 +2290,24 @@ def test_manage_open_orders_exit_usercustom( # 2nd canceled trade - Fail execute exit caplog.clear() - mocker.patch('freqtrade.persistence.Trade.get_canceled_exit_order_count', return_value=1) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit', - side_effect=DependencyException) + mocker.patch("freqtrade.persistence.Trade.get_canceled_exit_order_count", return_value=1) + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit", side_effect=DependencyException + ) freqtrade.manage_open_orders() - assert log_has_re('Unable to emergency exit .*', caplog) + assert log_has_re("Unable to emergency exit .*", caplog) - et_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit') + et_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.execute_trade_exit") caplog.clear() # 2nd canceled trade ... # If cancelling fails - no emergency exit! - with patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit', return_value=False): + with patch("freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit", return_value=False): freqtrade.manage_open_orders() assert et_mock.call_count == 0 freqtrade.manage_open_orders() - assert log_has_re('Emergency exiting trade.*', caplog) + assert log_has_re("Emergency exiting trade.*", caplog) assert et_mock.call_count == 1 @@ -2160,8 +2317,8 @@ def test_manage_open_orders_exit( ) -> None: rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock() - limit_sell_order_old['id'] = '123456789_exit' - limit_sell_order_old['side'] = 'buy' if is_short else 'sell' + limit_sell_order_old["id"] = "123456789_exit" + limit_sell_order_old["side"] = "buy" if is_short else "sell" patch_exchange(mocker) mocker.patch.multiple( EXMS, @@ -2194,22 +2351,21 @@ def test_manage_open_orders_exit( @pytest.mark.parametrize("is_short", [False, True]) def test_check_handle_cancelled_exit( - default_conf_usdt, ticker_usdt, limit_sell_order_old, open_trade_usdt, - is_short, mocker, caplog + default_conf_usdt, ticker_usdt, limit_sell_order_old, open_trade_usdt, is_short, mocker, caplog ) -> None: - """ Handle sell order cancelled on exchange""" + """Handle sell order cancelled on exchange""" rpc_mock = patch_RPCManager(mocker) cancel_order_mock = MagicMock() - limit_sell_order_old.update({"status": "canceled", 'filled': 0.0}) - limit_sell_order_old['side'] = 'buy' if is_short else 'sell' - limit_sell_order_old['id'] = open_trade_usdt.open_orders[0].order_id + limit_sell_order_old.update({"status": "canceled", "filled": 0.0}) + limit_sell_order_old["side"] = "buy" if is_short else "sell" + limit_sell_order_old["id"] = open_trade_usdt.open_orders[0].order_id patch_exchange(mocker) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=limit_sell_order_old), - cancel_order_with_result=cancel_order_mock + cancel_order_with_result=cancel_order_mock, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -2225,25 +2381,30 @@ def test_check_handle_cancelled_exit( assert cancel_order_mock.call_count == 0 assert rpc_mock.call_count == 2 assert open_trade_usdt.is_open is True - exit_name = 'Buy' if is_short else 'Sell' + exit_name = "Buy" if is_short else "Sell" assert log_has_re(f"{exit_name} order cancelled on exchange for Trade.*", caplog) @pytest.mark.parametrize("is_short", [False, True]) @pytest.mark.parametrize("leverage", [1, 3, 5, 10]) def test_manage_open_orders_partial( - default_conf_usdt, ticker_usdt, limit_buy_order_old_partial, is_short, leverage, - open_trade, mocker + default_conf_usdt, + ticker_usdt, + limit_buy_order_old_partial, + is_short, + leverage, + open_trade, + mocker, ) -> None: rpc_mock = patch_RPCManager(mocker) open_trade.is_short = is_short open_trade.leverage = leverage - open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy' + open_trade.orders[0].ft_order_side = "sell" if is_short else "buy" - limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id - limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy' + limit_buy_order_old_partial["id"] = open_trade.orders[0].order_id + limit_buy_order_old_partial["side"] = "sell" if is_short else "buy" limit_buy_canceled = deepcopy(limit_buy_order_old_partial) - limit_buy_canceled['status'] = 'canceled' + limit_buy_canceled["status"] = "canceled" cancel_order_mock = MagicMock(return_value=limit_buy_canceled) patch_exchange(mocker) @@ -2251,7 +2412,7 @@ def test_manage_open_orders_partial( EXMS, fetch_ticker=ticker_usdt, fetch_order=MagicMock(return_value=limit_buy_order_old_partial), - cancel_order_with_result=cancel_order_mock + cancel_order_with_result=cancel_order_mock, ) freqtrade = FreqtradeBot(default_conf_usdt) prior_stake = open_trade.stake_amount @@ -2263,9 +2424,7 @@ def test_manage_open_orders_partial( freqtrade.manage_open_orders() assert cancel_order_mock.call_count == 1 assert rpc_mock.call_count == 3 - trades = Trade.session.scalars( - select(Trade) - ).all() + trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 1 assert trades[0].amount == 23.0 assert trades[0].stake_amount == open_trade.open_rate * trades[0].amount / leverage @@ -2275,20 +2434,27 @@ def test_manage_open_orders_partial( @pytest.mark.parametrize("is_short", [False, True]) def test_manage_open_orders_partial_fee( - default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short, - limit_buy_order_old_partial, trades_for_order, - limit_buy_order_old_partial_canceled, mocker + default_conf_usdt, + ticker_usdt, + open_trade, + caplog, + fee, + is_short, + limit_buy_order_old_partial, + trades_for_order, + limit_buy_order_old_partial_canceled, + mocker, ) -> None: open_trade.is_short = is_short - open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy' + open_trade.orders[0].ft_order_side = "sell" if is_short else "buy" rpc_mock = patch_RPCManager(mocker) - limit_buy_order_old_partial['id'] = open_trade.orders[0].order_id - limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0] - limit_buy_order_old_partial['side'] = 'sell' if is_short else 'buy' - limit_buy_order_old_partial_canceled['side'] = 'sell' if is_short else 'buy' + limit_buy_order_old_partial["id"] = open_trade.orders[0].order_id + limit_buy_order_old_partial_canceled["id"] = open_trade.open_orders_ids[0] + limit_buy_order_old_partial["side"] = "sell" if is_short else "buy" + limit_buy_order_old_partial_canceled["side"] = "sell" if is_short else "buy" cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled) - mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=0)) + mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=0)) patch_exchange(mocker) mocker.patch.multiple( EXMS, @@ -2299,7 +2465,7 @@ def test_manage_open_orders_partial_fee( ) freqtrade = FreqtradeBot(default_conf_usdt) - assert open_trade.amount == limit_buy_order_old_partial['amount'] + assert open_trade.amount == limit_buy_order_old_partial["amount"] open_trade.fee_open = fee() open_trade.fee_close = fee() @@ -2313,14 +2479,14 @@ def test_manage_open_orders_partial_fee( assert cancel_order_mock.call_count == 1 assert rpc_mock.call_count == 3 - trades = Trade.session.scalars( - select(Trade) - .where(Order.ft_trade_id == Trade.id) - ).all() + trades = Trade.session.scalars(select(Trade).where(Order.ft_trade_id == Trade.id)).all() assert len(trades) == 1 # Verify that trade has been updated - assert trades[0].amount == (limit_buy_order_old_partial['amount'] - - limit_buy_order_old_partial['remaining']) - 0.023 + assert ( + trades[0].amount + == (limit_buy_order_old_partial["amount"] - limit_buy_order_old_partial["remaining"]) + - 0.023 + ) assert not trades[0].has_open_orders assert trades[0].fee_updated(open_trade.entry_side) assert pytest.approx(trades[0].fee_open) == 0.001 @@ -2328,17 +2494,24 @@ def test_manage_open_orders_partial_fee( @pytest.mark.parametrize("is_short", [False, True]) def test_manage_open_orders_partial_except( - default_conf_usdt, ticker_usdt, open_trade, caplog, fee, is_short, - limit_buy_order_old_partial, trades_for_order, - limit_buy_order_old_partial_canceled, mocker + default_conf_usdt, + ticker_usdt, + open_trade, + caplog, + fee, + is_short, + limit_buy_order_old_partial, + trades_for_order, + limit_buy_order_old_partial_canceled, + mocker, ) -> None: open_trade.is_short = is_short - open_trade.orders[0].ft_order_side = 'sell' if is_short else 'buy' + open_trade.orders[0].ft_order_side = "sell" if is_short else "buy" rpc_mock = patch_RPCManager(mocker) - limit_buy_order_old_partial_canceled['id'] = open_trade.open_orders_ids[0] - limit_buy_order_old_partial['id'] = open_trade.open_orders_ids[0] + limit_buy_order_old_partial_canceled["id"] = open_trade.open_orders_ids[0] + limit_buy_order_old_partial["id"] = open_trade.open_orders_ids[0] if is_short: - limit_buy_order_old_partial['side'] = 'sell' + limit_buy_order_old_partial["side"] = "sell" cancel_order_mock = MagicMock(return_value=limit_buy_order_old_partial_canceled) patch_exchange(mocker) mocker.patch.multiple( @@ -2348,11 +2521,13 @@ def test_manage_open_orders_partial_except( cancel_order_with_result=cancel_order_mock, get_trades_for_order=MagicMock(return_value=trades_for_order), ) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', - MagicMock(side_effect=DependencyException)) + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.get_real_amount", + MagicMock(side_effect=DependencyException), + ) freqtrade = FreqtradeBot(default_conf_usdt) - assert open_trade.amount == limit_buy_order_old_partial['amount'] + assert open_trade.amount == limit_buy_order_old_partial["amount"] open_trade.fee_open = fee() open_trade.fee_close = fee() @@ -2366,34 +2541,34 @@ def test_manage_open_orders_partial_except( assert cancel_order_mock.call_count == 1 assert rpc_mock.call_count == 3 - trades = Trade.session.scalars( - select(Trade) - ).all() + trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 1 # Verify that trade has been updated - assert trades[0].amount == (limit_buy_order_old_partial['amount'] - - limit_buy_order_old_partial['remaining']) + assert trades[0].amount == ( + limit_buy_order_old_partial["amount"] - limit_buy_order_old_partial["remaining"] + ) assert not trades[0].has_open_orders assert trades[0].fee_open == fee() -def test_manage_open_orders_exception(default_conf_usdt, ticker_usdt, open_trade_usdt, mocker, - caplog) -> None: +def test_manage_open_orders_exception( + default_conf_usdt, ticker_usdt, open_trade_usdt, mocker, caplog +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) cancel_order_mock = MagicMock() mocker.patch.multiple( - 'freqtrade.freqtradebot.FreqtradeBot', + "freqtrade.freqtradebot.FreqtradeBot", handle_cancel_enter=MagicMock(), handle_cancel_exit=MagicMock(), ) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, - fetch_order=MagicMock(side_effect=ExchangeError('Oh snap')), - cancel_order=cancel_order_mock + fetch_order=MagicMock(side_effect=ExchangeError("Oh snap")), + cancel_order=cancel_order_mock, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -2402,12 +2577,14 @@ def test_manage_open_orders_exception(default_conf_usdt, ticker_usdt, open_trade caplog.clear() freqtrade.manage_open_orders() - assert log_has_re(r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30.00000000, " - r"is_short=False, leverage=1.0, " - r"open_rate=2.00000000, open_since=" - f"{open_trade_usdt.open_date.strftime('%Y-%m-%d %H:%M:%S')}" - r"\) due to Traceback \(most recent call last\):\n*", - caplog) + assert log_has_re( + r"Cannot query order for Trade\(id=1, pair=ADA/USDT, amount=30.00000000, " + r"is_short=False, leverage=1.0, " + r"open_rate=2.00000000, open_since=" + f"{open_trade_usdt.open_date.strftime('%Y-%m-%d %H:%M:%S')}" + r"\) due to Traceback \(most recent call last\):\n*", + caplog, + ) @pytest.mark.parametrize("is_short", [False, True]) @@ -2416,11 +2593,11 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_ patch_exchange(mocker) l_order = deepcopy(limit_order[entry_side(is_short)]) cancel_entry_order = deepcopy(limit_order[entry_side(is_short)]) - cancel_entry_order['status'] = 'canceled' - del cancel_entry_order['filled'] + cancel_entry_order["status"] = "canceled" + del cancel_entry_order["filled"] cancel_order_mock = MagicMock(return_value=cancel_entry_order) - mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock) + mocker.patch(f"{EXMS}.cancel_order_with_result", cancel_order_mock) freqtrade = FreqtradeBot(default_conf_usdt) freqtrade._notify_enter_cancel = MagicMock() @@ -2429,34 +2606,34 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_ Trade.session.add(trade) Trade.commit() - l_order['filled'] = 0.0 - l_order['status'] = 'open' - reason = CANCEL_REASON['TIMEOUT'] + l_order["filled"] = 0.0 + l_order["status"] = "open" + reason = CANCEL_REASON["TIMEOUT"] assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert cancel_order_mock.call_count == 1 cancel_order_mock.reset_mock() caplog.clear() - l_order['filled'] = 0.01 + l_order["filled"] = 0.01 assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert cancel_order_mock.call_count == 0 assert log_has_re("Order .* for .* not cancelled, as the filled amount.* unexitable.*", caplog) caplog.clear() cancel_order_mock.reset_mock() - l_order['filled'] = 2 + l_order["filled"] = 2 assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert cancel_order_mock.call_count == 1 # Order remained open for some reason (cancel failed) - cancel_entry_order['status'] = 'open' + cancel_entry_order["status"] = "open" cancel_order_mock = MagicMock(return_value=cancel_entry_order) - mocker.patch(f'{EXMS}.cancel_order_with_result', cancel_order_mock) + mocker.patch(f"{EXMS}.cancel_order_with_result", cancel_order_mock) assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert log_has_re(r"Order .* for .* not cancelled.", caplog) # min_pair_stake empty should not crash - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=None) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=None) assert not freqtrade.handle_cancel_enter( trade, limit_order[entry_side(is_short)], trade.open_orders[0], reason ) @@ -2464,10 +2641,10 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_ # Retry ... cbo = limit_order[entry_side(is_short)] - mocker.patch('freqtrade.freqtradebot.sleep') - cbo['status'] = 'open' - co_mock = mocker.patch(f'{EXMS}.cancel_order_with_result', return_value=cbo) - fo_mock = mocker.patch(f'{EXMS}.fetch_order', return_value=cbo) + mocker.patch("freqtrade.freqtradebot.sleep") + cbo["status"] = "open" + co_mock = mocker.patch(f"{EXMS}.cancel_order_with_result", return_value=cbo) + fo_mock = mocker.patch(f"{EXMS}.fetch_order", return_value=cbo) assert not freqtrade.handle_cancel_enter( trade, cbo, trade.open_orders[0], reason, replacing=True ) @@ -2476,19 +2653,23 @@ def test_handle_cancel_enter(mocker, caplog, default_conf_usdt, limit_order, is_ @pytest.mark.parametrize("is_short", [False, True]) -@pytest.mark.parametrize("limit_buy_order_canceled_empty", ['binance', 'kraken', 'bybit'], - indirect=['limit_buy_order_canceled_empty']) -def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_short, fee, - limit_buy_order_canceled_empty) -> None: +@pytest.mark.parametrize( + "limit_buy_order_canceled_empty", + ["binance", "kraken", "bybit"], + indirect=["limit_buy_order_canceled_empty"], +) +def test_handle_cancel_enter_exchanges( + mocker, caplog, default_conf_usdt, is_short, fee, limit_buy_order_canceled_empty +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) cancel_order_mock = mocker.patch( - f'{EXMS}.cancel_order_with_result', - return_value=limit_buy_order_canceled_empty) - notify_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot._notify_enter_cancel') + f"{EXMS}.cancel_order_with_result", return_value=limit_buy_order_canceled_empty + ) + notify_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot._notify_enter_cancel") freqtrade = FreqtradeBot(default_conf_usdt) - reason = CANCEL_REASON['TIMEOUT'] + reason = CANCEL_REASON["TIMEOUT"] trade = mock_trade_usdt_4(fee, is_short) Trade.session.add(trade) @@ -2498,22 +2679,17 @@ def test_handle_cancel_enter_exchanges(mocker, caplog, default_conf_usdt, is_sho ) assert cancel_order_mock.call_count == 0 assert log_has_re( - f'{trade.entry_side.capitalize()} order fully cancelled. ' - r'Removing .* from database\.', - caplog + f"{trade.entry_side.capitalize()} order fully cancelled. " r"Removing .* from database\.", + caplog, ) assert notify_mock.call_count == 1 @pytest.mark.parametrize("is_short", [False, True]) -@pytest.mark.parametrize('cancelorder', [ - {}, - {'remaining': None}, - 'String Return value', - 123 -]) -def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order, is_short, fee, - cancelorder) -> None: +@pytest.mark.parametrize("cancelorder", [{}, {"remaining": None}, "String Return value", 123]) +def test_handle_cancel_enter_corder_empty( + mocker, default_conf_usdt, limit_order, is_short, fee, cancelorder +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) l_order = limit_order[entry_side(is_short)] @@ -2521,7 +2697,7 @@ def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order mocker.patch.multiple( EXMS, cancel_order=cancel_order_mock, - fetch_order=MagicMock(side_effect=InvalidOrderException) + fetch_order=MagicMock(side_effect=InvalidOrderException), ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -2529,26 +2705,27 @@ def test_handle_cancel_enter_corder_empty(mocker, default_conf_usdt, limit_order trade = mock_trade_usdt_4(fee, is_short) Trade.session.add(trade) Trade.commit() - l_order['filled'] = 0.0 - l_order['status'] = 'open' - reason = CANCEL_REASON['TIMEOUT'] + l_order["filled"] = 0.0 + l_order["status"] = "open" + reason = CANCEL_REASON["TIMEOUT"] assert freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert cancel_order_mock.call_count == 1 cancel_order_mock.reset_mock() - l_order['filled'] = 1.0 + l_order["filled"] = 1.0 order = deepcopy(l_order) - order['status'] = 'canceled' - mocker.patch(f'{EXMS}.fetch_order', return_value=order) + order["status"] = "canceled" + mocker.patch(f"{EXMS}.fetch_order", return_value=order) assert not freqtrade.handle_cancel_enter(trade, l_order, trade.open_orders[0], reason) assert cancel_order_mock.call_count == 1 -@pytest.mark.parametrize('is_short', [True, False]) -@pytest.mark.parametrize('leverage', [1, 5]) -@pytest.mark.parametrize('amount', [2, 50]) -def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, - leverage, amount) -> None: +@pytest.mark.parametrize("is_short", [True, False]) +@pytest.mark.parametrize("leverage", [1, 5]) +@pytest.mark.parametrize("amount", [2, 50]) +def test_handle_cancel_exit_limit( + mocker, default_conf_usdt, fee, is_short, leverage, amount +) -> None: send_msg_mock = patch_RPCManager(mocker) patch_exchange(mocker) cancel_order_mock = MagicMock() @@ -2558,17 +2735,17 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, ) entry_price = 0.245441 - mocker.patch(f'{EXMS}.get_rate', return_value=entry_price) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.2) + mocker.patch(f"{EXMS}.get_rate", return_value=entry_price) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.2) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee') + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_order_fee") freqtrade = FreqtradeBot(default_conf_usdt) trade = Trade( - pair='LTC/USDT', + pair="LTC/USDT", amount=amount * leverage, - exchange='binance', + exchange="binance", open_rate=entry_price, open_date=dt_now() - timedelta(days=2), fee_open=fee.return_value, @@ -2585,7 +2762,7 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, ft_order_side=entry_side(is_short), ft_pair=trade.pair, ft_is_open=False, - order_id='buy_123456', + order_id="buy_123456", status="closed", symbol=trade.pair, order_type="market", @@ -2597,12 +2774,12 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, cost=trade.open_rate * trade.amount, order_date=trade.open_date, order_filled_date=trade.open_date, - ), + ), Order( ft_order_side=exit_side(is_short), ft_pair=trade.pair, ft_is_open=True, - order_id='sell_123456', + order_id="sell_123456", status="open", symbol=trade.pair, order_type="limit", @@ -2614,13 +2791,10 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, cost=trade.open_rate * trade.amount, order_date=trade.open_date, order_filled_date=trade.open_date, - ), + ), ] - order = {'id': "sell_123456", - 'remaining': 1, - 'amount': 1, - 'status': "open"} - reason = CANCEL_REASON['TIMEOUT'] + order = {"id": "sell_123456", "remaining": 1, "amount": 1, "status": "open"} + reason = CANCEL_REASON["TIMEOUT"] order_obj = trade.open_orders[-1] send_msg_mock.reset_mock() assert freqtrade.handle_cancel_exit(trade, order, order_obj, reason) @@ -2633,62 +2807,74 @@ def test_handle_cancel_exit_limit(mocker, default_conf_usdt, fee, is_short, send_msg_mock.reset_mock() # Partial exit - below exit threshold - order['amount'] = amount * leverage - order['filled'] = amount * 0.99 * leverage + order["amount"] = amount * leverage + order["filled"] = amount * 0.99 * leverage assert not freqtrade.handle_cancel_exit(trade, order, order_obj, reason) # Assert cancel_order was not called (callcount remains unchanged) assert cancel_order_mock.call_count == 1 assert send_msg_mock.call_count == 1 - assert (send_msg_mock.call_args_list[0][0][0]['reason'] - == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']) + assert ( + send_msg_mock.call_args_list[0][0][0]["reason"] + == CANCEL_REASON["PARTIALLY_FILLED_KEEP_OPEN"] + ) assert not freqtrade.handle_cancel_exit(trade, order, order_obj, reason) - assert (send_msg_mock.call_args_list[0][0][0]['reason'] - == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN']) + assert ( + send_msg_mock.call_args_list[0][0][0]["reason"] + == CANCEL_REASON["PARTIALLY_FILLED_KEEP_OPEN"] + ) # Message should not be iterated again - assert trade.exit_order_status == CANCEL_REASON['PARTIALLY_FILLED_KEEP_OPEN'] + assert trade.exit_order_status == CANCEL_REASON["PARTIALLY_FILLED_KEEP_OPEN"] assert send_msg_mock.call_count == 1 send_msg_mock.reset_mock() - order['filled'] = amount * 0.5 * leverage + order["filled"] = amount * 0.5 * leverage assert freqtrade.handle_cancel_exit(trade, order, order_obj, reason) assert send_msg_mock.call_count == 1 - assert (send_msg_mock.call_args_list[0][0][0]['reason'] - == CANCEL_REASON['PARTIALLY_FILLED']) + assert send_msg_mock.call_args_list[0][0][0]["reason"] == CANCEL_REASON["PARTIALLY_FILLED"] def test_handle_cancel_exit_cancel_exception(mocker, default_conf_usdt) -> None: patch_RPCManager(mocker) patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.0) - mocker.patch(f'{EXMS}.cancel_order_with_result', side_effect=InvalidOrderException()) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.0) + mocker.patch(f"{EXMS}.cancel_order_with_result", side_effect=InvalidOrderException()) freqtrade = FreqtradeBot(default_conf_usdt) # TODO: should not be magicmock trade = MagicMock() order_obj = MagicMock() - order_obj.order_id = '125' - reason = CANCEL_REASON['TIMEOUT'] - order = {'remaining': 1, - 'id': '125', - 'amount': 1, - 'status': "open"} + order_obj.order_id = "125" + reason = CANCEL_REASON["TIMEOUT"] + order = {"remaining": 1, "id": "125", "amount": 1, "status": "open"} assert not freqtrade.handle_cancel_exit(trade, order, order_obj, reason) # mocker.patch(f'{EXMS}.cancel_order_with_result', return_value=order) # assert not freqtrade.handle_cancel_exit(trade, order, reason) -@pytest.mark.parametrize("is_short, open_rate, amt", [ - (False, 2.0, 30.0), - (True, 2.02, 29.70297029), -]) -def test_execute_trade_exit_up(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, mocker, - ticker_usdt_sell_down, is_short, open_rate, amt) -> None: +@pytest.mark.parametrize( + "is_short, open_rate, amt", + [ + (False, 2.0, 30.0), + (True, 2.02, 29.70297029), + ], +) +def test_execute_trade_exit_up( + default_conf_usdt, + ticker_usdt, + fee, + ticker_usdt_sell_up, + mocker, + ticker_usdt_sell_down, + is_short, + open_rate, + amt, +) -> None: rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -2713,68 +2899,74 @@ def test_execute_trade_exit_up(default_conf_usdt, ticker_usdt, fee, ticker_usdt_ # Increase the price and sell it mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_down if is_short else ticker_usdt_sell_up + EXMS, fetch_ticker=ticker_usdt_sell_down if is_short else ticker_usdt_sell_up ) # Prevented sell ... freqtrade.execute_trade_exit( trade=trade, - limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']), - exit_check=ExitCheckTuple(exit_type=ExitType.ROI) + limit=(ticker_usdt_sell_down()["ask"] if is_short else ticker_usdt_sell_up()["bid"]), + exit_check=ExitCheckTuple(exit_type=ExitType.ROI), ) assert rpc_mock.call_count == 0 assert freqtrade.strategy.confirm_trade_exit.call_count == 1 - assert id(freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade']) != id(trade) - assert freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]['trade'].id == trade.id + assert id(freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]["trade"]) != id(trade) + assert freqtrade.strategy.confirm_trade_exit.call_args_list[0][1]["trade"].id == trade.id # Repatch with true freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True) freqtrade.execute_trade_exit( trade=trade, - limit=(ticker_usdt_sell_down()['ask'] if is_short else ticker_usdt_sell_up()['bid']), - exit_check=ExitCheckTuple(exit_type=ExitType.ROI) + limit=(ticker_usdt_sell_down()["ask"] if is_short else ticker_usdt_sell_up()["bid"]), + exit_check=ExitCheckTuple(exit_type=ExitType.ROI), ) assert freqtrade.strategy.confirm_trade_exit.call_count == 1 assert rpc_mock.call_count == 1 last_msg = rpc_mock.call_args_list[-1][0][0] assert { - 'trade_id': 1, - 'type': RPCMessageType.EXIT, - 'exchange': 'Binance', - 'pair': 'ETH/USDT', - 'gain': 'profit', - 'limit': 2.0 if is_short else 2.2, - 'order_rate': 2.0 if is_short else 2.2, - 'amount': pytest.approx(amt), - 'order_type': 'limit', - 'buy_tag': None, - 'direction': 'Short' if trade.is_short else 'Long', - 'leverage': 1.0, - 'enter_tag': None, - 'open_rate': open_rate, - 'current_rate': 2.01 if is_short else 2.3, - 'profit_amount': 0.29554455 if is_short else 5.685, - 'profit_ratio': 0.00493809 if is_short else 0.09451372, - 'stake_currency': 'USDT', - 'quote_currency': 'USDT', - 'fiat_currency': 'USD', - 'base_currency': 'ETH', - 'exit_reason': ExitType.ROI.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'stake_amount': pytest.approx(60), - 'is_final_exit': False, - 'final_profit_ratio': None, + "trade_id": 1, + "type": RPCMessageType.EXIT, + "exchange": "Binance", + "pair": "ETH/USDT", + "gain": "profit", + "limit": 2.0 if is_short else 2.2, + "order_rate": 2.0 if is_short else 2.2, + "amount": pytest.approx(amt), + "order_type": "limit", + "buy_tag": None, + "direction": "Short" if trade.is_short else "Long", + "leverage": 1.0, + "enter_tag": None, + "open_rate": open_rate, + "current_rate": 2.01 if is_short else 2.3, + "profit_amount": 0.29554455 if is_short else 5.685, + "profit_ratio": 0.00493809 if is_short else 0.09451372, + "stake_currency": "USDT", + "quote_currency": "USDT", + "fiat_currency": "USD", + "base_currency": "ETH", + "exit_reason": ExitType.ROI.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "sub_trade": False, + "cumulative_profit": 0.0, + "stake_amount": pytest.approx(60), + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg @pytest.mark.parametrize("is_short", [False, True]) -def test_execute_trade_exit_down(default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_down, - ticker_usdt_sell_up, mocker, is_short) -> None: +def test_execute_trade_exit_down( + default_conf_usdt, + ticker_usdt, + fee, + ticker_usdt_sell_down, + ticker_usdt_sell_up, + mocker, + is_short, +) -> None: rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -2796,57 +2988,72 @@ def test_execute_trade_exit_down(default_conf_usdt, ticker_usdt, fee, ticker_usd # Decrease the price and sell it mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down + EXMS, fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down ) freqtrade.execute_trade_exit( - trade=trade, limit=(ticker_usdt_sell_up if is_short else ticker_usdt_sell_down)()['bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)) + trade=trade, + limit=(ticker_usdt_sell_up if is_short else ticker_usdt_sell_down)()["bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS), + ) assert rpc_mock.call_count == 2 last_msg = rpc_mock.call_args_list[-1][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/USDT', - 'direction': 'Short' if trade.is_short else 'Long', - 'leverage': 1.0, - 'gain': 'loss', - 'limit': 2.2 if is_short else 2.01, - 'order_rate': 2.2 if is_short else 2.01, - 'amount': pytest.approx(29.70297029) if is_short else 30.0, - 'order_type': 'limit', - 'buy_tag': None, - 'enter_tag': None, - 'open_rate': 2.02 if is_short else 2.0, - 'current_rate': 2.2 if is_short else 2.0, - 'profit_amount': -5.65990099 if is_short else -0.00075, - 'profit_ratio': -0.0945681 if is_short else -1.247e-05, - 'stake_currency': 'USDT', - 'quote_currency': 'USDT', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'stake_amount': pytest.approx(60), - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/USDT", + "direction": "Short" if trade.is_short else "Long", + "leverage": 1.0, + "gain": "loss", + "limit": 2.2 if is_short else 2.01, + "order_rate": 2.2 if is_short else 2.01, + "amount": pytest.approx(29.70297029) if is_short else 30.0, + "order_type": "limit", + "buy_tag": None, + "enter_tag": None, + "open_rate": 2.02 if is_short else 2.0, + "current_rate": 2.2 if is_short else 2.0, + "profit_amount": -5.65990099 if is_short else -0.00075, + "profit_ratio": -0.0945681 if is_short else -1.247e-05, + "stake_currency": "USDT", + "quote_currency": "USDT", + "base_currency": "ETH", + "fiat_currency": "USD", + "exit_reason": ExitType.STOP_LOSS.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "sub_trade": False, + "cumulative_profit": 0.0, + "stake_amount": pytest.approx(60), + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg @pytest.mark.parametrize( - "is_short,amount,open_rate,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [ - (False, 30, 2.0, 2.3, 2.25, 7.18125, 0.11938903, 'profit'), - (True, 29.70297029, 2.02, 2.2, 2.25, -7.14876237, -0.11944465, 'loss'), - ]) + "is_short,amount,open_rate,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", + [ + (False, 30, 2.0, 2.3, 2.25, 7.18125, 0.11938903, "profit"), + (True, 29.70297029, 2.02, 2.2, 2.25, -7.14876237, -0.11944465, "loss"), + ], +) def test_execute_trade_exit_custom_exit_price( - default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, amount, open_rate, - current_rate, limit, profit_amount, profit_ratio, profit_or_loss, mocker) -> None: + default_conf_usdt, + ticker_usdt, + fee, + ticker_usdt_sell_up, + is_short, + amount, + open_rate, + current_rate, + limit, + profit_amount, + profit_ratio, + profit_or_loss, + mocker, +) -> None: rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -2856,7 +3063,7 @@ def test_execute_trade_exit_custom_exit_price( _dry_is_price_crossed=MagicMock(return_value=False), ) config = deepcopy(default_conf_usdt) - config['custom_price_max_distance_ratio'] = 0.1 + config["custom_price_max_distance_ratio"] = 0.1 patch_whitelist(mocker, config) freqtrade = FreqtradeBot(config) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -2872,10 +3079,7 @@ def test_execute_trade_exit_custom_exit_price( assert freqtrade.strategy.confirm_trade_exit.call_count == 0 # Increase the price and sell it - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_up - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker_usdt_sell_up) freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True) @@ -2883,8 +3087,8 @@ def test_execute_trade_exit_custom_exit_price( freqtrade.strategy.custom_exit_price = lambda **kwargs: 2.25 freqtrade.execute_trade_exit( trade=trade, - limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL, exit_reason='foo') + limit=ticker_usdt_sell_up()["ask" if is_short else "bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL, exit_reason="foo"), ) # Sell price must be different to default bid price @@ -2894,47 +3098,60 @@ def test_execute_trade_exit_custom_exit_price( assert rpc_mock.call_count == 1 last_msg = rpc_mock.call_args_list[-1][0][0] assert { - 'trade_id': 1, - 'type': RPCMessageType.EXIT, - 'exchange': 'Binance', - 'pair': 'ETH/USDT', - 'direction': 'Short' if trade.is_short else 'Long', - 'leverage': 1.0, - 'gain': profit_or_loss, - 'limit': limit, - 'order_rate': limit, - 'amount': pytest.approx(amount), - 'order_type': 'limit', - 'buy_tag': None, - 'enter_tag': None, - 'open_rate': open_rate, - 'current_rate': current_rate, - 'profit_amount': pytest.approx(profit_amount), - 'profit_ratio': profit_ratio, - 'stake_currency': 'USDT', - 'quote_currency': 'USDT', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'exit_reason': 'foo', - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'stake_amount': pytest.approx(60), - 'is_final_exit': False, - 'final_profit_ratio': None, + "trade_id": 1, + "type": RPCMessageType.EXIT, + "exchange": "Binance", + "pair": "ETH/USDT", + "direction": "Short" if trade.is_short else "Long", + "leverage": 1.0, + "gain": profit_or_loss, + "limit": limit, + "order_rate": limit, + "amount": pytest.approx(amount), + "order_type": "limit", + "buy_tag": None, + "enter_tag": None, + "open_rate": open_rate, + "current_rate": current_rate, + "profit_amount": pytest.approx(profit_amount), + "profit_ratio": profit_ratio, + "stake_currency": "USDT", + "quote_currency": "USDT", + "base_currency": "ETH", + "fiat_currency": "USD", + "exit_reason": "foo", + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "sub_trade": False, + "cumulative_profit": 0.0, + "stake_amount": pytest.approx(60), + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg @pytest.mark.parametrize( - "is_short,amount,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", [ - (False, 30, 2.3, 2.2, 5.685, 0.09451372, 'profit'), - (True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, 'loss'), - ]) + "is_short,amount,current_rate,limit,profit_amount,profit_ratio,profit_or_loss", + [ + (False, 30, 2.3, 2.2, 5.685, 0.09451372, "profit"), + (True, 29.70297029, 2.2, 2.3, -8.63762376, -0.1443212, "loss"), + ], +) def test_execute_trade_exit_market_order( - default_conf_usdt, ticker_usdt, fee, is_short, current_rate, amount, caplog, - limit, profit_amount, profit_ratio, profit_or_loss, ticker_usdt_sell_up, mocker + default_conf_usdt, + ticker_usdt, + fee, + is_short, + current_rate, + amount, + caplog, + limit, + profit_amount, + profit_ratio, + profit_or_loss, + ticker_usdt_sell_up, + mocker, ) -> None: """ amount @@ -2953,7 +3170,7 @@ def test_execute_trade_exit_market_order( long: (65.835/60.15) - 1 = 0.0945137157107232 short: 1 - (68.48762376237624/59.85) = -0.1443211990371971 """ - open_rate = ticker_usdt.return_value['ask' if is_short else 'bid'] + open_rate = ticker_usdt.return_value["ask" if is_short else "bid"] rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -2979,12 +3196,12 @@ def test_execute_trade_exit_market_order( fetch_ticker=ticker_usdt_sell_up, _dry_is_price_crossed=MagicMock(return_value=False), ) - freqtrade.config['order_types']['exit'] = 'market' + freqtrade.config["order_types"]["exit"] = "market" freqtrade.execute_trade_exit( trade=trade, - limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.ROI) + limit=ticker_usdt_sell_up()["ask" if is_short else "bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.ROI), ) assert not trade.is_open @@ -2993,52 +3210,55 @@ def test_execute_trade_exit_market_order( assert rpc_mock.call_count == 4 last_msg = rpc_mock.call_args_list[-2][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/USDT', - 'direction': 'Short' if trade.is_short else 'Long', - 'leverage': 1.0, - 'gain': profit_or_loss, - 'limit': limit, - 'order_rate': limit, - 'amount': pytest.approx(amount), - 'order_type': 'market', - 'buy_tag': None, - 'enter_tag': None, - 'open_rate': open_rate, - 'current_rate': current_rate, - 'profit_amount': pytest.approx(profit_amount), - 'profit_ratio': profit_ratio, - 'stake_currency': 'USDT', - 'quote_currency': 'USDT', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'exit_reason': ExitType.ROI.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'stake_amount': pytest.approx(60), - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/USDT", + "direction": "Short" if trade.is_short else "Long", + "leverage": 1.0, + "gain": profit_or_loss, + "limit": limit, + "order_rate": limit, + "amount": pytest.approx(amount), + "order_type": "market", + "buy_tag": None, + "enter_tag": None, + "open_rate": open_rate, + "current_rate": current_rate, + "profit_amount": pytest.approx(profit_amount), + "profit_ratio": profit_ratio, + "stake_currency": "USDT", + "quote_currency": "USDT", + "base_currency": "ETH", + "fiat_currency": "USD", + "exit_reason": ExitType.ROI.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "sub_trade": False, + "cumulative_profit": 0.0, + "stake_amount": pytest.approx(60), + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg @pytest.mark.parametrize("is_short", [False, True]) -def test_execute_trade_exit_insufficient_funds_error(default_conf_usdt, ticker_usdt, fee, is_short, - ticker_usdt_sell_up, mocker) -> None: +def test_execute_trade_exit_insufficient_funds_error( + default_conf_usdt, ticker_usdt, fee, is_short, ticker_usdt_sell_up, mocker +) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds') + mock_insuf = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds") mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, get_fee=fee, - create_order=MagicMock(side_effect=[ - {'id': 1234553382}, - InsufficientFundsError(), - ]), + create_order=MagicMock( + side_effect=[ + {"id": 1234553382}, + InsufficientFundsError(), + ] + ), ) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -3050,72 +3270,84 @@ def test_execute_trade_exit_insufficient_funds_error(default_conf_usdt, ticker_u assert trade # Increase the price and sell it - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_up - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker_usdt_sell_up) sell_reason = ExitCheckTuple(exit_type=ExitType.ROI) assert not freqtrade.execute_trade_exit( trade=trade, - limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'], - exit_check=sell_reason + limit=ticker_usdt_sell_up()["ask" if is_short else "bid"], + exit_check=sell_reason, ) assert mock_insuf.call_count == 1 -@pytest.mark.parametrize('profit_only,bid,ask,handle_first,handle_second,exit_type,is_short', [ - # Enable profit - (True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, False), - (True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, True), - # # Disable profit - (False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, False), - (False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, True), - # # Enable loss - # # * Shouldn't this be ExitType.STOP_LOSS.value - (True, 0.21, 0.22, False, False, None, False), - (True, 2.41, 2.42, False, False, None, True), - # Disable loss - (False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, False), - (False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, True), -]) +@pytest.mark.parametrize( + "profit_only,bid,ask,handle_first,handle_second,exit_type,is_short", + [ + # Enable profit + (True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, False), + (True, 2.18, 2.2, False, True, ExitType.EXIT_SIGNAL.value, True), + # # Disable profit + (False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, False), + (False, 3.19, 3.2, True, False, ExitType.EXIT_SIGNAL.value, True), + # # Enable loss + # # * Shouldn't this be ExitType.STOP_LOSS.value + (True, 0.21, 0.22, False, False, None, False), + (True, 2.41, 2.42, False, False, None, True), + # Disable loss + (False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, False), + (False, 0.10, 0.22, True, False, ExitType.EXIT_SIGNAL.value, True), + ], +) def test_exit_profit_only( - default_conf_usdt, limit_order, limit_order_open, is_short, - fee, mocker, profit_only, bid, ask, handle_first, handle_second, exit_type) -> None: + default_conf_usdt, + limit_order, + limit_order_open, + is_short, + fee, + mocker, + profit_only, + bid, + ask, + handle_first, + handle_second, + exit_type, +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) eside = entry_side(is_short) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': bid, - 'ask': ask, - 'last': bid - }), - create_order=MagicMock(side_effect=[ - limit_order[eside], - {'id': 1234553382}, - ]), + fetch_ticker=MagicMock(return_value={"bid": bid, "ask": ask, "last": bid}), + create_order=MagicMock( + side_effect=[ + limit_order[eside], + {"id": 1234553382}, + ] + ), get_fee=fee, ) - default_conf_usdt.update({ - 'use_exit_signal': True, - 'exit_profit_only': profit_only, - 'exit_profit_offset': 0.1, - }) + default_conf_usdt.update( + { + "use_exit_signal": True, + "exit_profit_only": profit_only, + "exit_profit_offset": 0.1, + } + ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) freqtrade.strategy.custom_exit = MagicMock(return_value=None) if exit_type == ExitType.EXIT_SIGNAL.value: freqtrade.strategy.min_roi_reached = MagicMock(return_value=False) else: - freqtrade.strategy.ft_stoploss_reached = MagicMock(return_value=ExitCheckTuple( - exit_type=ExitType.NONE)) + freqtrade.strategy.ft_stoploss_reached = MagicMock( + return_value=ExitCheckTuple(exit_type=ExitType.NONE) + ) freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() assert trade.is_short == is_short - oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside) + oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]["symbol"], eside) trade.update_order(limit_order[eside]) trade.update_trade(oobj) freqtrade.wallets.update() @@ -3132,21 +3364,22 @@ def test_exit_profit_only( assert freqtrade.handle_trade(trade) is True -def test_sell_not_enough_balance(default_conf_usdt, limit_order, limit_order_open, - fee, mocker, caplog) -> None: +def test_sell_not_enough_balance( + default_conf_usdt, limit_order, limit_order_open, fee, mocker, caplog +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 0.00002172, - 'ask': 0.00002173, - 'last': 0.00002172 - }), - create_order=MagicMock(side_effect=[ - limit_order_open['buy'], - {'id': 1234553382}, - ]), + fetch_ticker=MagicMock( + return_value={"bid": 0.00002172, "ask": 0.00002173, "last": 0.00002172} + ), + create_order=MagicMock( + side_effect=[ + limit_order_open["buy"], + {"id": 1234553382}, + ] + ), get_fee=fee, ) @@ -3159,31 +3392,28 @@ def test_sell_not_enough_balance(default_conf_usdt, limit_order, limit_order_ope trade = Trade.session.scalars(select(Trade)).first() amnt = trade.amount - oobj = Order.parse_from_ccxt_object(limit_order['buy'], limit_order['buy']['symbol'], 'buy') + oobj = Order.parse_from_ccxt_object(limit_order["buy"], limit_order["buy"]["symbol"], "buy") trade.update_trade(oobj) patch_get_signal(freqtrade, enter_long=False, exit_long=True) - mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=trade.amount * 0.985)) + mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=trade.amount * 0.985)) assert freqtrade.handle_trade(trade) is True - assert log_has_re(r'.*Falling back to wallet-amount.', caplog) + assert log_has_re(r".*Falling back to wallet-amount.", caplog) assert trade.amount != amnt -@pytest.mark.parametrize('amount_wallet,has_err', [ - (95.29, False), - (91.29, True) -]) +@pytest.mark.parametrize("amount_wallet,has_err", [(95.29, False), (91.29, True)]) def test__safe_exit_amount(default_conf_usdt, fee, caplog, mocker, amount_wallet, has_err): patch_RPCManager(mocker) patch_exchange(mocker) amount = 95.33 amount_wallet = amount_wallet - mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=amount_wallet)) - wallet_update = mocker.patch('freqtrade.wallets.Wallets.update') + mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=amount_wallet)) + wallet_update = mocker.patch("freqtrade.wallets.Wallets.update") trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, @@ -3197,19 +3427,20 @@ def test__safe_exit_amount(default_conf_usdt, fee, caplog, mocker, amount_wallet wallet_update.reset_mock() assert trade.amount != amount_wallet assert freqtrade._safe_exit_amount(trade, trade.pair, trade.amount) == amount_wallet - assert log_has_re(r'.*Falling back to wallet-amount.', caplog) + assert log_has_re(r".*Falling back to wallet-amount.", caplog) assert trade.amount == amount_wallet assert wallet_update.call_count == 1 caplog.clear() wallet_update.reset_mock() assert freqtrade._safe_exit_amount(trade, trade.pair, amount_wallet) == amount_wallet - assert not log_has_re(r'.*Falling back to wallet-amount.', caplog) + assert not log_has_re(r".*Falling back to wallet-amount.", caplog) assert wallet_update.call_count == 1 @pytest.mark.parametrize("is_short", [False, True]) -def test_locked_pairs(default_conf_usdt, ticker_usdt, fee, - ticker_usdt_sell_down, mocker, caplog, is_short) -> None: +def test_locked_pairs( + default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_down, mocker, caplog, is_short +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -3228,49 +3459,45 @@ def test_locked_pairs(default_conf_usdt, ticker_usdt, fee, assert trade # Decrease the price and sell it - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_down - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker_usdt_sell_down) freqtrade.execute_trade_exit( trade=trade, - limit=ticker_usdt_sell_down()['ask' if is_short else 'bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS) + limit=ticker_usdt_sell_down()["ask" if is_short else "bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS), ) - trade.close(ticker_usdt_sell_down()['bid']) - assert freqtrade.strategy.is_pair_locked(trade.pair, side='*') + trade.close(ticker_usdt_sell_down()["bid"]) + assert freqtrade.strategy.is_pair_locked(trade.pair, side="*") # Both sides are locked - assert freqtrade.strategy.is_pair_locked(trade.pair, side='long') - assert freqtrade.strategy.is_pair_locked(trade.pair, side='short') + assert freqtrade.strategy.is_pair_locked(trade.pair, side="long") + assert freqtrade.strategy.is_pair_locked(trade.pair, side="short") # reinit - should buy other pair. caplog.clear() freqtrade.enter_positions() - assert log_has_re(fr"Pair {trade.pair} \* is locked.*", caplog) + assert log_has_re(rf"Pair {trade.pair} \* is locked.*", caplog) @pytest.mark.parametrize("is_short", [False, True]) -def test_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open, is_short, - fee, mocker) -> None: +def test_ignore_roi_if_entry_signal( + default_conf_usdt, limit_order, limit_order_open, is_short, fee, mocker +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) eside = entry_side(is_short) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.19, - 'ask': 2.2, - 'last': 2.19 - }), - create_order=MagicMock(side_effect=[ - limit_order_open[eside], - {'id': 1234553382}, - ]), + fetch_ticker=MagicMock(return_value={"bid": 2.19, "ask": 2.2, "last": 2.19}), + create_order=MagicMock( + side_effect=[ + limit_order_open[eside], + {"id": 1234553382}, + ] + ), get_fee=fee, ) - default_conf_usdt['ignore_roi_if_entry_signal'] = True + default_conf_usdt["ignore_roi_if_entry_signal"] = True freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -3280,8 +3507,7 @@ def test_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_ trade = Trade.session.scalars(select(Trade)).first() trade.is_short = is_short - oobj = Order.parse_from_ccxt_object( - limit_order[eside], limit_order[eside]['symbol'], eside) + oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]["symbol"], eside) trade.update_trade(oobj) freqtrade.wallets.update() if is_short: @@ -3293,35 +3519,31 @@ def test_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_ # Test if entry-signal is absent (should sell due to roi = true) if is_short: - patch_get_signal(freqtrade, enter_long=False, exit_short=False, exit_tag='something') + patch_get_signal(freqtrade, enter_long=False, exit_short=False, exit_tag="something") else: - patch_get_signal(freqtrade, enter_long=False, exit_long=False, exit_tag='something') + patch_get_signal(freqtrade, enter_long=False, exit_long=False, exit_tag="something") assert freqtrade.handle_trade(trade) is True assert trade.exit_reason == ExitType.ROI.value -@pytest.mark.parametrize("is_short,val1,val2", [ - (False, 1.5, 1.1), - (True, 0.5, 0.9) -]) -def test_trailing_stop_loss(default_conf_usdt, limit_order_open, - is_short, val1, val2, fee, caplog, mocker) -> None: +@pytest.mark.parametrize("is_short,val1,val2", [(False, 1.5, 1.1), (True, 0.5, 0.9)]) +def test_trailing_stop_loss( + default_conf_usdt, limit_order_open, is_short, val1, val2, fee, caplog, mocker +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.0, - 'ask': 2.0, - 'last': 2.0 - }), - create_order=MagicMock(side_effect=[ - limit_order_open[entry_side(is_short)], - {'id': 1234553382}, - ]), + fetch_ticker=MagicMock(return_value={"bid": 2.0, "ask": 2.0, "last": 2.0}), + create_order=MagicMock( + side_effect=[ + limit_order_open[entry_side(is_short)], + {"id": 1234553382}, + ] + ), get_fee=fee, ) - default_conf_usdt['trailing_stop'] = True + default_conf_usdt["trailing_stop"] = True patch_whitelist(mocker, default_conf_usdt) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -3333,69 +3555,82 @@ def test_trailing_stop_loss(default_conf_usdt, limit_order_open, assert freqtrade.handle_trade(trade) is False # Raise praise into profits - mocker.patch(f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': 2.0 * val1, - 'ask': 2.0 * val1, - 'last': 2.0 * val1 - })) + mocker.patch( + f"{EXMS}.fetch_ticker", + MagicMock(return_value={"bid": 2.0 * val1, "ask": 2.0 * val1, "last": 2.0 * val1}), + ) # Stoploss should be adjusted assert freqtrade.handle_trade(trade) is False caplog.clear() # Price fell - mocker.patch(f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': 2.0 * val2, - 'ask': 2.0 * val2, - 'last': 2.0 * val2 - })) + mocker.patch( + f"{EXMS}.fetch_ticker", + MagicMock(return_value={"bid": 2.0 * val2, "ask": 2.0 * val2, "last": 2.0 * val2}), + ) caplog.set_level(logging.DEBUG) # Sell as trailing-stop is reached assert freqtrade.handle_trade(trade) is True stop_multi = 1.1 if is_short else 0.9 - assert log_has(f"ETH/USDT - HIT STOP: current price at {(2.0 * val2):6f}, " - f"stoploss is {(2.0 * val1 * stop_multi):6f}, " - f"initial stoploss was at {(2.0 * stop_multi):6f}, trade opened at 2.000000", - caplog) + assert log_has( + f"ETH/USDT - HIT STOP: current price at {(2.0 * val2):6f}, " + f"stoploss is {(2.0 * val1 * stop_multi):6f}, " + f"initial stoploss was at {(2.0 * stop_multi):6f}, trade opened at 2.000000", + caplog, + ) assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value -@pytest.mark.parametrize('offset,trail_if_reached,second_sl,is_short', [ - (0, False, 2.0394, False), - (0.011, False, 2.0394, False), - (0.055, True, 1.8, False), - (0, False, 2.1614, True), - (0.011, False, 2.1614, True), - (0.055, True, 2.42, True), -]) +@pytest.mark.parametrize( + "offset,trail_if_reached,second_sl,is_short", + [ + (0, False, 2.0394, False), + (0.011, False, 2.0394, False), + (0.055, True, 1.8, False), + (0, False, 2.1614, True), + (0.011, False, 2.1614, True), + (0.055, True, 2.42, True), + ], +) def test_trailing_stop_loss_positive( - default_conf_usdt, limit_order, limit_order_open, - offset, fee, caplog, mocker, trail_if_reached, second_sl, is_short + default_conf_usdt, + limit_order, + limit_order_open, + offset, + fee, + caplog, + mocker, + trail_if_reached, + second_sl, + is_short, ) -> None: - enter_price = limit_order[entry_side(is_short)]['price'] + enter_price = limit_order[entry_side(is_short)]["price"] patch_RPCManager(mocker) patch_exchange(mocker) eside = entry_side(is_short) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': enter_price - (-0.01 if is_short else 0.01), - 'ask': enter_price - (-0.01 if is_short else 0.01), - 'last': enter_price - (-0.01 if is_short else 0.01), - }), - create_order=MagicMock(side_effect=[ - limit_order[eside], - {'id': 1234553382}, - ]), + fetch_ticker=MagicMock( + return_value={ + "bid": enter_price - (-0.01 if is_short else 0.01), + "ask": enter_price - (-0.01 if is_short else 0.01), + "last": enter_price - (-0.01 if is_short else 0.01), + } + ), + create_order=MagicMock( + side_effect=[ + limit_order[eside], + {"id": 1234553382}, + ] + ), get_fee=fee, ) - default_conf_usdt['trailing_stop'] = True - default_conf_usdt['trailing_stop_positive'] = 0.01 + default_conf_usdt["trailing_stop"] = True + default_conf_usdt["trailing_stop_positive"] = 0.01 if offset: - default_conf_usdt['trailing_stop_positive_offset'] = offset - default_conf_usdt['trailing_only_offset_is_reached'] = trail_if_reached + default_conf_usdt["trailing_stop_positive_offset"] = offset + default_conf_usdt["trailing_only_offset_is_reached"] = trail_if_reached patch_whitelist(mocker, default_conf_usdt) freqtrade = FreqtradeBot(default_conf_usdt) @@ -3405,7 +3640,7 @@ def test_trailing_stop_loss_positive( trade = Trade.session.scalars(select(Trade)).first() assert trade.is_short == is_short - oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]['symbol'], eside) + oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]["symbol"], eside) trade.update_order(limit_order[eside]) trade.update_trade(oobj) caplog.set_level(logging.DEBUG) @@ -3414,18 +3649,22 @@ def test_trailing_stop_loss_positive( # Raise ticker_usdt above buy price mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': enter_price + (-0.06 if is_short else 0.06), - 'ask': enter_price + (-0.06 if is_short else 0.06), - 'last': enter_price + (-0.06 if is_short else 0.06), - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": enter_price + (-0.06 if is_short else 0.06), + "ask": enter_price + (-0.06 if is_short else 0.06), + "last": enter_price + (-0.06 if is_short else 0.06), + } + ), ) caplog.clear() # stop-loss not reached, adjusted stoploss assert freqtrade.handle_trade(trade) is False - caplog_text = (f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: " - f"{'2.49' if not is_short else '2.24'}%") + caplog_text = ( + f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: " + f"{'2.49' if not is_short else '2.24'}%" + ) if trail_if_reached: assert not log_has(caplog_text, caplog) assert not log_has("ETH/USDT - Adjusting stoploss...", caplog) @@ -3436,28 +3675,32 @@ def test_trailing_stop_loss_positive( caplog.clear() mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': enter_price + (-0.135 if is_short else 0.125), - 'ask': enter_price + (-0.135 if is_short else 0.125), - 'last': enter_price + (-0.135 if is_short else 0.125), - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": enter_price + (-0.135 if is_short else 0.125), + "ask": enter_price + (-0.135 if is_short else 0.125), + "last": enter_price + (-0.135 if is_short else 0.125), + } + ), ) assert freqtrade.handle_trade(trade) is False assert log_has( f"ETH/USDT - Using positive stoploss: 0.01 offset: {offset} profit: " f"{'5.72' if not is_short else '5.67'}%", - caplog + caplog, ) assert log_has("ETH/USDT - Adjusting stoploss...", caplog) mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': enter_price + (-0.02 if is_short else 0.02), - 'ask': enter_price + (-0.02 if is_short else 0.02), - 'last': enter_price + (-0.02 if is_short else 0.02), - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": enter_price + (-0.02 if is_short else 0.02), + "ask": enter_price + (-0.02 if is_short else 0.02), + "last": enter_price + (-0.02 if is_short else 0.02), + } + ), ) # Lower price again (but still positive) assert freqtrade.handle_trade(trade) is True @@ -3466,34 +3709,28 @@ def test_trailing_stop_loss_positive( f"stoploss is {trade.stop_loss:.6f}, " f"initial stoploss was at {'2.42' if is_short else '1.80'}0000, " f"trade opened at {2.2 if is_short else 2.0}00000", - caplog) + caplog, + ) assert trade.exit_reason == ExitType.TRAILING_STOP_LOSS.value @pytest.mark.parametrize("is_short", [False, True]) -def test_disable_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limit_order_open, - is_short, fee, mocker) -> None: +def test_disable_ignore_roi_if_entry_signal( + default_conf_usdt, limit_order, limit_order_open, is_short, fee, mocker +) -> None: patch_RPCManager(mocker) patch_exchange(mocker) eside = entry_side(is_short) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.0, - 'ask': 2.0, - 'last': 2.0 - }), - create_order=MagicMock(side_effect=[ - limit_order_open[eside], - {'id': 1234553382}, - {'id': 1234553383} - ]), + fetch_ticker=MagicMock(return_value={"bid": 2.0, "ask": 2.0, "last": 2.0}), + create_order=MagicMock( + side_effect=[limit_order_open[eside], {"id": 1234553382}, {"id": 1234553383}] + ), get_fee=fee, _dry_is_price_crossed=MagicMock(return_value=False), ) - default_conf_usdt['exit_pricing'] = { - 'ignore_roi_if_entry_signal': False - } + default_conf_usdt["exit_pricing"] = {"ignore_roi_if_entry_signal": False} freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) freqtrade.strategy.min_roi_reached = MagicMock(return_value=True) @@ -3503,8 +3740,7 @@ def test_disable_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limi trade = Trade.session.scalars(select(Trade)).first() trade.is_short = is_short - oobj = Order.parse_from_ccxt_object( - limit_order[eside], limit_order[eside]['symbol'], eside) + oobj = Order.parse_from_ccxt_object(limit_order[eside], limit_order[eside]["symbol"], eside) trade.update_trade(oobj) # Sell due to min_roi_reached patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short, exit_short=is_short) @@ -3516,14 +3752,15 @@ def test_disable_ignore_roi_if_entry_signal(default_conf_usdt, limit_order, limi assert trade.exit_reason == ExitType.ROI.value -def test_get_real_amount_quote(default_conf_usdt, trades_for_order, buy_order_fee, fee, caplog, - mocker): - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - amount = sum(x['amount'] for x in trades_for_order) +def test_get_real_amount_quote( + default_conf_usdt, trades_for_order, buy_order_fee, fee, caplog, mocker +): + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + amount = sum(x["amount"] for x in trades_for_order) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, @@ -3531,26 +3768,27 @@ def test_get_real_amount_quote(default_conf_usdt, trades_for_order, buy_order_fe freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) caplog.clear() - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount is reduced by "fee" assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == (amount * 0.001) assert log_has( - 'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False,' - ' leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.', - caplog + "Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False," + " leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.008.", + caplog, ) -def test_get_real_amount_quote_dust(default_conf_usdt, trades_for_order, buy_order_fee, fee, - caplog, mocker): - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - walletmock = mocker.patch('freqtrade.wallets.Wallets.update') - mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=8.1122) - amount = sum(x['amount'] for x in trades_for_order) +def test_get_real_amount_quote_dust( + default_conf_usdt, trades_for_order, buy_order_fee, fee, caplog, mocker +): + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + walletmock = mocker.patch("freqtrade.wallets.Wallets.update") + mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=8.1122) + amount = sum(x["amount"] for x in trades_for_order) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, @@ -3558,74 +3796,94 @@ def test_get_real_amount_quote_dust(default_conf_usdt, trades_for_order, buy_ord freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) walletmock.reset_mock() - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount is kept as is assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None assert walletmock.call_count == 1 - assert log_has_re(r'Fee amount for Trade.* was in base currency ' - '- Eating Fee 0.008 into dust', caplog) + assert log_has_re( + r"Fee amount for Trade.* was in base currency - Eating Fee 0.008 into dust", caplog + ) def test_get_real_amount_no_trade(default_conf_usdt, buy_order_fee, caplog, mocker, fee): - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) - amount = buy_order_fee['amount'] + amount = buy_order_fee["amount"] trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount is reduced by "fee" assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None assert log_has( - 'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, ' - 'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: ' - 'myTrade-Dict empty found', - caplog + "Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, " + "is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed) failed: " + "myTrade-Dict empty found", + caplog, ) @pytest.mark.parametrize( - 'fee_par,fee_reduction_amount,use_ticker_usdt_rate,expected_log', [ + "fee_par,fee_reduction_amount,use_ticker_usdt_rate,expected_log", + [ # basic, amount does not change - ({'cost': 0.008, 'currency': 'ETH'}, 0, False, None), + ({"cost": 0.008, "currency": "ETH"}, 0, False, None), # no currency in fee - ({'cost': 0.004, 'currency': None}, 0, True, None), + ({"cost": 0.004, "currency": None}, 0, True, None), # BNB no rate - ({'cost': 0.00094518, 'currency': 'BNB'}, 0, True, ( - 'Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False, ' - 'leverage=1.0, open_rate=0.24544100, open_since=closed) [buy]: 0.00094518 BNB -' - ' rate: None' - )), + ( + {"cost": 0.00094518, "currency": "BNB"}, + 0, + True, + ( + "Fee for Trade Trade(id=None, pair=LTC/ETH, amount=8.00000000, is_short=False, " + "leverage=1.0, open_rate=0.24544100, open_since=closed) [buy]: 0.00094518 BNB -" + " rate: None" + ), + ), # from order - ({'cost': 0.004, 'currency': 'LTC'}, 0.004, False, ( - 'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, ' - 'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004.' - )), + ( + {"cost": 0.004, "currency": "LTC"}, + 0.004, + False, + ( + "Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, " + "is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), fee=0.004." + ), + ), # invalid, no currency in from fee dict - ({'cost': 0.008, 'currency': None}, 0, True, None), - ]) + ({"cost": 0.008, "currency": None}, 0, True, None), + ], +) def test_get_real_amount( - default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker, caplog, - fee_par, fee_reduction_amount, use_ticker_usdt_rate, expected_log + default_conf_usdt, + trades_for_order, + buy_order_fee, + fee, + mocker, + caplog, + fee_par, + fee_reduction_amount, + use_ticker_usdt_rate, + expected_log, ): - buy_order = deepcopy(buy_order_fee) - buy_order['fee'] = fee_par - trades_for_order[0]['fee'] = fee_par + buy_order["fee"] = fee_par + trades_for_order[0]["fee"] = fee_par - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - amount = sum(x['amount'] for x in trades_for_order) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + amount = sum(x["amount"] for x in trades_for_order) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, open_rate=0.245441, @@ -3633,10 +3891,10 @@ def test_get_real_amount( freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) if not use_ticker_usdt_rate: - mocker.patch(f'{EXMS}.fetch_ticker', side_effect=ExchangeError) + mocker.patch(f"{EXMS}.fetch_ticker", side_effect=ExchangeError) caplog.clear() - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") res = freqtrade.get_real_amount(trade, buy_order, order_obj) if fee_reduction_amount == 0: assert res is None @@ -3648,54 +3906,64 @@ def test_get_real_amount( @pytest.mark.parametrize( - 'fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount', [ + "fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount", + [ # basic, amount is reduced by fee (None, None, 0.001, 0.001, 7.992), # different fee currency on both trades, fee is average of both trade's fee - (0.02, 'BNB', 0.0005, 0.001518575, 7.996), - ]) + (0.02, "BNB", 0.0005, 0.001518575, 7.996), + ], +) def test_get_real_amount_multi( - default_conf_usdt, trades_for_order2, buy_order_fee, caplog, fee, mocker, markets, - fee_cost, fee_currency, fee_reduction_amount, expected_fee, expected_log_amount, + default_conf_usdt, + trades_for_order2, + buy_order_fee, + caplog, + fee, + mocker, + markets, + fee_cost, + fee_currency, + fee_reduction_amount, + expected_fee, + expected_log_amount, ): - trades_for_order = deepcopy(trades_for_order2) if fee_cost: - trades_for_order[0]['fee']['cost'] = fee_cost + trades_for_order[0]["fee"]["cost"] = fee_cost if fee_currency: - trades_for_order[0]['fee']['currency'] = fee_currency + trades_for_order[0]["fee"]["currency"] = fee_currency - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - amount = float(sum(x['amount'] for x in trades_for_order)) - default_conf_usdt['stake_currency'] = "ETH" + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + amount = float(sum(x["amount"] for x in trades_for_order)) + default_conf_usdt["stake_currency"] = "ETH" trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, - open_rate=0.245441 + open_rate=0.245441, ) # Fake markets entry to enable fee parsing - markets['BNB/ETH'] = markets['ETH/USDT'] + markets["BNB/ETH"] = markets["ETH/USDT"] freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) - mocker.patch(f'{EXMS}.fetch_ticker', - return_value={'ask': 0.19, 'last': 0.2}) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"ask": 0.19, "last": 0.2}) # Amount is reduced by "fee" expected_amount = amount * fee_reduction_amount - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) == expected_amount assert log_has( ( - 'Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, ' - 'is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), ' - f'fee={expected_amount}.' + "Applying fee on amount for Trade(id=None, pair=LTC/ETH, amount=8.00000000, " + "is_short=False, leverage=1.0, open_rate=0.24544100, open_since=closed), " + f"fee={expected_amount}." ), - caplog + caplog, ) assert trade.fee_open == expected_fee @@ -3706,38 +3974,39 @@ def test_get_real_amount_multi( assert trade.fee_close_currency is None -def test_get_real_amount_invalid_order(default_conf_usdt, trades_for_order, buy_order_fee, fee, - mocker): +def test_get_real_amount_invalid_order( + default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker +): limit_buy_order_usdt = deepcopy(buy_order_fee) - limit_buy_order_usdt['fee'] = {'cost': 0.004} + limit_buy_order_usdt["fee"] = {"cost": 0.004} - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) - amount = float(sum(x['amount'] for x in trades_for_order)) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) + amount = float(sum(x["amount"] for x in trades_for_order)) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, open_rate=0.245441, ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount does not change assert freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) is None -def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_doublefee, - fee, mocker): - - tfo_mock = mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) - mocker.patch(f'{EXMS}.get_valid_pair_combination', return_value='BNB/USDT') - mocker.patch(f'{EXMS}.fetch_ticker', return_value={'last': 200}) +def test_get_real_amount_fees_order( + default_conf_usdt, market_buy_order_usdt_doublefee, fee, mocker +): + tfo_mock = mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) + mocker.patch(f"{EXMS}.get_valid_pair_combination", return_value="BNB/USDT") + mocker.patch(f"{EXMS}.fetch_ticker", return_value={"last": 200}) trade = Trade( - pair='LTC/USDT', + pair="LTC/USDT", amount=30.0, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, open_rate=0.245441, @@ -3746,79 +4015,82 @@ def test_get_real_amount_fees_order(default_conf_usdt, market_buy_order_usdt_dou # Amount does not change assert trade.fee_open == 0.0025 - order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(market_buy_order_usdt_doublefee, "LTC/ETH", "buy") assert freqtrade.get_real_amount(trade, market_buy_order_usdt_doublefee, order_obj) is None assert tfo_mock.call_count == 0 # Fetch fees from trades dict if available to get "proper" values assert round(trade.fee_open, 4) == 0.001 -def test_get_real_amount_wrong_amount(default_conf_usdt, trades_for_order, buy_order_fee, fee, - mocker): +def test_get_real_amount_wrong_amount( + default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker +): limit_buy_order_usdt = deepcopy(buy_order_fee) - limit_buy_order_usdt['amount'] = limit_buy_order_usdt['amount'] - 0.001 + limit_buy_order_usdt["amount"] = limit_buy_order_usdt["amount"] - 0.001 - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - amount = float(sum(x['amount'] for x in trades_for_order)) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + amount = float(sum(x["amount"] for x in trades_for_order)) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount does not change with pytest.raises(DependencyException, match=r"Half bought\? Amounts don't match"): freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) -def test_get_real_amount_wrong_amount_rounding(default_conf_usdt, trades_for_order, buy_order_fee, - fee, mocker): +def test_get_real_amount_wrong_amount_rounding( + default_conf_usdt, trades_for_order, buy_order_fee, fee, mocker +): # Floats should not be compared directly. limit_buy_order_usdt = deepcopy(buy_order_fee) - trades_for_order[0]['amount'] = trades_for_order[0]['amount'] + 1e-15 + trades_for_order[0]["amount"] = trades_for_order[0]["amount"] + 1e-15 - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades_for_order) - amount = float(sum(x['amount'] for x in trades_for_order)) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades_for_order) + amount = float(sum(x["amount"] for x in trades_for_order)) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, open_rate=0.245441, ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") # Amount changes by fee amount. - assert pytest.approx(freqtrade.get_real_amount( - trade, limit_buy_order_usdt, order_obj)) == (amount * 0.001) + assert pytest.approx(freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj)) == ( + amount * 0.001 + ) def test_get_real_amount_open_trade_usdt(default_conf_usdt, fee, mocker): amount = 12345 trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, ) order = { - 'id': 'mocked_order', - 'amount': amount, - 'status': 'open', - 'side': 'buy', - 'price': 0.245441, + "id": "mocked_order", + "amount": amount, + "status": "open", + "side": "buy", + "price": 0.245441, } freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(order, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(order, "LTC/ETH", "buy") assert freqtrade.get_real_amount(trade, order, order_obj) is None @@ -3826,54 +4098,43 @@ def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, limit_buy_order_usdt = deepcopy(buy_order_fee) # Fees amount in "POINT" - trades = [{ - "info": { - }, - "id": "some_trade_id", - "timestamp": 1660092505903, - "datetime": "2022-08-10T00:48:25.903Z", - "symbol": "CEL/USDT", - "order": "some_order_id", - "type": None, - "side": "sell", - "takerOrMaker": "taker", - "price": 1.83255, - "amount": 83.126, - "cost": 152.3325513, - "fee": { - "currency": "POINT", - "cost": 0.3046651026 - }, - "fees": [ - { - "cost": "0", - "currency": "USDT" - }, - { - "cost": "0", - "currency": "GT" - }, - { - "cost": "0.3046651026", - "currency": "POINT" - } - ] - }] + trades = [ + { + "info": {}, + "id": "some_trade_id", + "timestamp": 1660092505903, + "datetime": "2022-08-10T00:48:25.903Z", + "symbol": "CEL/USDT", + "order": "some_order_id", + "type": None, + "side": "sell", + "takerOrMaker": "taker", + "price": 1.83255, + "amount": 83.126, + "cost": 152.3325513, + "fee": {"currency": "POINT", "cost": 0.3046651026}, + "fees": [ + {"cost": "0", "currency": "USDT"}, + {"cost": "0", "currency": "GT"}, + {"cost": "0.3046651026", "currency": "POINT"}, + ], + } + ] - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=trades) - amount = float(sum(x['amount'] for x in trades)) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=trades) + amount = float(sum(x["amount"] for x in trades)) trade = Trade( - pair='CEL/USDT', + pair="CEL/USDT", amount=amount, - exchange='binance', + exchange="binance", fee_open=fee.return_value, fee_close=fee.return_value, - open_rate=0.245441 + open_rate=0.245441, ) - limit_buy_order_usdt['amount'] = amount + limit_buy_order_usdt["amount"] = amount freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - order_obj = Order.parse_from_ccxt_object(buy_order_fee, 'LTC/ETH', 'buy') + order_obj = Order.parse_from_ccxt_object(buy_order_fee, "LTC/ETH", "buy") res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) assert res is None assert trade.fee_open_currency is None @@ -3881,39 +4142,43 @@ def test_get_real_amount_in_point(default_conf_usdt, buy_order_fee, fee, mocker, message = "Not updating buy-fee - rate: None, POINT." assert log_has(message, caplog) caplog.clear() - freqtrade.config['exchange']['unknown_fee_rate'] = 1 + freqtrade.config["exchange"]["unknown_fee_rate"] = 1 res = freqtrade.get_real_amount(trade, limit_buy_order_usdt, order_obj) assert res is None - assert trade.fee_open_currency == 'POINT' + assert trade.fee_open_currency == "POINT" assert pytest.approx(trade.fee_open_cost) == 0.3046651026 assert trade.fee_open == 0.002 assert trade.fee_open != fee.return_value assert not log_has(message, caplog) -@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [ - (8.0, 0.0, 10, None), - (8.0, 0.0, 0, None), - (8.0, 0.1, 0, 0.1), - (8.0, 0.1, 10, None), - (8.0, 0.1, 8.0, None), - (8.0, 0.1, 7.9, 0.1), -]) -def test_apply_fee_conditional(default_conf_usdt, fee, mocker, caplog, - amount, fee_abs, wallet, amount_exp): - walletmock = mocker.patch('freqtrade.wallets.Wallets.update') - mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet) +@pytest.mark.parametrize( + "amount,fee_abs,wallet,amount_exp", + [ + (8.0, 0.0, 10, None), + (8.0, 0.0, 0, None), + (8.0, 0.1, 0, 0.1), + (8.0, 0.1, 10, None), + (8.0, 0.1, 8.0, None), + (8.0, 0.1, 7.9, 0.1), + ], +) +def test_apply_fee_conditional( + default_conf_usdt, fee, mocker, caplog, amount, fee_abs, wallet, amount_exp +): + walletmock = mocker.patch("freqtrade.wallets.Wallets.update") + mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=wallet) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, fee_close=fee.return_value, ) order = Order( - ft_order_side='buy', - order_id='100', + ft_order_side="buy", + order_id="100", ft_pair=trade.pair, ft_is_open=True, ) @@ -3921,48 +4186,52 @@ def test_apply_fee_conditional(default_conf_usdt, fee, mocker, caplog, walletmock.reset_mock() # Amount is kept as is - assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order) == amount_exp + assert freqtrade.apply_fee_conditional(trade, "LTC", amount, fee_abs, order) == amount_exp assert walletmock.call_count == 1 if fee_abs != 0 and amount_exp is None: assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog) -@pytest.mark.parametrize('amount,fee_abs,wallet,amount_exp', [ - (8.0, 0.0, 16, None), - (8.0, 0.0, 0, None), - (8.0, 0.1, 8, 0.1), - (8.0, 0.1, 20, None), - (8.0, 0.1, 16.0, None), - (8.0, 0.1, 7.9, 0.1), - (8.0, 0.1, 12, 0.1), - (8.0, 0.1, 15.9, 0.1), -]) -def test_apply_fee_conditional_multibuy(default_conf_usdt, fee, mocker, caplog, - amount, fee_abs, wallet, amount_exp): - walletmock = mocker.patch('freqtrade.wallets.Wallets.update') - mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=wallet) +@pytest.mark.parametrize( + "amount,fee_abs,wallet,amount_exp", + [ + (8.0, 0.0, 16, None), + (8.0, 0.0, 0, None), + (8.0, 0.1, 8, 0.1), + (8.0, 0.1, 20, None), + (8.0, 0.1, 16.0, None), + (8.0, 0.1, 7.9, 0.1), + (8.0, 0.1, 12, 0.1), + (8.0, 0.1, 15.9, 0.1), + ], +) +def test_apply_fee_conditional_multibuy( + default_conf_usdt, fee, mocker, caplog, amount, fee_abs, wallet, amount_exp +): + walletmock = mocker.patch("freqtrade.wallets.Wallets.update") + mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=wallet) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=amount, - exchange='binance', + exchange="binance", open_rate=0.245441, fee_open=fee.return_value, - fee_close=fee.return_value + fee_close=fee.return_value, ) # One closed order order = Order( - ft_order_side='buy', - order_id='10', + ft_order_side="buy", + order_id="10", ft_pair=trade.pair, ft_is_open=False, filled=amount, - status="closed" + status="closed", ) trade.orders.append(order) # Add additional order - this should NOT eat into dust unless the wallet was bigger already. order1 = Order( - ft_order_side='buy', - order_id='100', + ft_order_side="buy", + order_id="100", ft_pair=trade.pair, ft_is_open=True, ) @@ -3972,28 +4241,38 @@ def test_apply_fee_conditional_multibuy(default_conf_usdt, fee, mocker, caplog, walletmock.reset_mock() # The new trade amount will be 2x amount - fee / wallet will have to be adapted to this. - assert freqtrade.apply_fee_conditional(trade, 'LTC', amount, fee_abs, order1) == amount_exp + assert freqtrade.apply_fee_conditional(trade, "LTC", amount, fee_abs, order1) == amount_exp assert walletmock.call_count == 1 if fee_abs != 0 and amount_exp is None: assert log_has_re(r"Fee amount.*Eating.*dust\.", caplog) -@pytest.mark.parametrize("delta, is_high_delta", [ - (0.1, False), - (100, True), -]) -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize( + "delta, is_high_delta", + [ + (0.1, False), + (100, True), + ], +) +@pytest.mark.parametrize("is_short", [False, True]) def test_order_book_depth_of_market( - default_conf_usdt, ticker_usdt, limit_order_open, - fee, mocker, order_book_l2, delta, is_high_delta, is_short + default_conf_usdt, + ticker_usdt, + limit_order_open, + fee, + mocker, + order_book_l2, + delta, + is_high_delta, + is_short, ): - ticker_side = 'ask' if is_short else 'bid' + ticker_side = "ask" if is_short else "bid" - default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True - default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = delta + default_conf_usdt["entry_pricing"]["check_depth_of_market"]["enabled"] = True + default_conf_usdt["entry_pricing"]["check_depth_of_market"]["bids_to_ask_delta"] = delta patch_RPCManager(mocker) patch_exchange(mocker) - mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2) + mocker.patch(f"{EXMS}.fetch_l2_order_book", order_book_l2) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, @@ -4002,7 +4281,7 @@ def test_order_book_depth_of_market( ) # Save state of current whitelist - whitelist = deepcopy(default_conf_usdt['exchange']['pair_whitelist']) + whitelist = deepcopy(default_conf_usdt["exchange"]["pair_whitelist"]) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) freqtrade.enter_positions() @@ -4016,50 +4295,63 @@ def test_order_book_depth_of_market( assert pytest.approx(trade.stake_amount) == 60.0 assert trade.is_open assert trade.open_date is not None - assert trade.exchange == 'binance' + assert trade.exchange == "binance" assert len(Trade.session.scalars(select(Trade)).all()) == 1 # Simulate fulfilled LIMIT_BUY order for trade oobj = Order.parse_from_ccxt_object( - limit_order_open[entry_side(is_short)], 'ADA/USDT', entry_side(is_short)) + limit_order_open[entry_side(is_short)], "ADA/USDT", entry_side(is_short) + ) trade.update_trade(oobj) assert trade.open_rate == ticker_usdt.return_value[ticker_side] - assert whitelist == default_conf_usdt['exchange']['pair_whitelist'] + assert whitelist == default_conf_usdt["exchange"]["pair_whitelist"] -@pytest.mark.parametrize('exception_thrown,ask,last,order_book_top,order_book', [ - (False, 0.045, 0.046, 2, None), - (True, 0.042, 0.046, 1, {'bids': [[]], 'asks': [[]]}) -]) -def test_order_book_entry_pricing1(mocker, default_conf_usdt, order_book_l2, exception_thrown, - ask, last, order_book_top, order_book, caplog) -> None: +@pytest.mark.parametrize( + "exception_thrown,ask,last,order_book_top,order_book", + [(False, 0.045, 0.046, 2, None), (True, 0.042, 0.046, 1, {"bids": [[]], "asks": [[]]})], +) +def test_order_book_entry_pricing1( + mocker, + default_conf_usdt, + order_book_l2, + exception_thrown, + ask, + last, + order_book_top, + order_book, + caplog, +) -> None: """ test if function get_rate will return the order book price instead of the ask rate """ patch_exchange(mocker) - ticker_usdt_mock = MagicMock(return_value={'ask': ask, 'last': last}) + ticker_usdt_mock = MagicMock(return_value={"ask": ask, "last": last}) mocker.patch.multiple( EXMS, fetch_l2_order_book=MagicMock(return_value=order_book) if order_book else order_book_l2, fetch_ticker=ticker_usdt_mock, ) - default_conf_usdt['exchange']['name'] = 'binance' - default_conf_usdt['entry_pricing']['use_order_book'] = True - default_conf_usdt['entry_pricing']['order_book_top'] = order_book_top - default_conf_usdt['entry_pricing']['price_last_balance'] = 0 - default_conf_usdt['telegram']['enabled'] = False + default_conf_usdt["exchange"]["name"] = "binance" + default_conf_usdt["entry_pricing"]["use_order_book"] = True + default_conf_usdt["entry_pricing"]["order_book_top"] = order_book_top + default_conf_usdt["entry_pricing"]["price_last_balance"] = 0 + default_conf_usdt["telegram"]["enabled"] = False freqtrade = FreqtradeBot(default_conf_usdt) if exception_thrown: with pytest.raises(PricingError): - freqtrade.exchange.get_rate('ETH/USDT', side="entry", is_short=False, refresh=True) + freqtrade.exchange.get_rate("ETH/USDT", side="entry", is_short=False, refresh=True) assert log_has_re( - r'ETH/USDT - Entry Price at location 1 from orderbook could not be determined.', caplog) + r"ETH/USDT - Entry Price at location 1 from orderbook could not be determined.", caplog + ) else: - assert freqtrade.exchange.get_rate( - 'ETH/USDT', side="entry", is_short=False, refresh=True) == 0.043935 + assert ( + freqtrade.exchange.get_rate("ETH/USDT", side="entry", is_short=False, refresh=True) + == 0.043935 + ) assert ticker_usdt_mock.call_count == 0 @@ -4068,46 +4360,49 @@ def test_check_depth_of_market(default_conf_usdt, mocker, order_book_l2) -> None test check depth of market """ patch_exchange(mocker) - mocker.patch.multiple( - EXMS, - fetch_l2_order_book=order_book_l2 - ) - default_conf_usdt['telegram']['enabled'] = False - default_conf_usdt['exchange']['name'] = 'binance' - default_conf_usdt['entry_pricing']['check_depth_of_market']['enabled'] = True + mocker.patch.multiple(EXMS, fetch_l2_order_book=order_book_l2) + default_conf_usdt["telegram"]["enabled"] = False + default_conf_usdt["exchange"]["name"] = "binance" + default_conf_usdt["entry_pricing"]["check_depth_of_market"]["enabled"] = True # delta is 100 which is impossible to reach. hence function will return false - default_conf_usdt['entry_pricing']['check_depth_of_market']['bids_to_ask_delta'] = 100 + default_conf_usdt["entry_pricing"]["check_depth_of_market"]["bids_to_ask_delta"] = 100 freqtrade = FreqtradeBot(default_conf_usdt) - conf = default_conf_usdt['entry_pricing']['check_depth_of_market'] - assert freqtrade._check_depth_of_market('ETH/BTC', conf, side=SignalDirection.LONG) is False + conf = default_conf_usdt["entry_pricing"]["check_depth_of_market"] + assert freqtrade._check_depth_of_market("ETH/BTC", conf, side=SignalDirection.LONG) is False -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize("is_short", [False, True]) def test_order_book_exit_pricing( - default_conf_usdt, limit_buy_order_usdt_open, limit_buy_order_usdt, fee, is_short, - limit_sell_order_usdt_open, mocker, order_book_l2, caplog) -> None: + default_conf_usdt, + limit_buy_order_usdt_open, + limit_buy_order_usdt, + fee, + is_short, + limit_sell_order_usdt_open, + mocker, + order_book_l2, + caplog, +) -> None: """ test order book ask strategy """ - mocker.patch(f'{EXMS}.fetch_l2_order_book', order_book_l2) - default_conf_usdt['exchange']['name'] = 'binance' - default_conf_usdt['exit_pricing']['use_order_book'] = True - default_conf_usdt['exit_pricing']['order_book_top'] = 1 - default_conf_usdt['telegram']['enabled'] = False + mocker.patch(f"{EXMS}.fetch_l2_order_book", order_book_l2) + default_conf_usdt["exchange"]["name"] = "binance" + default_conf_usdt["exit_pricing"]["use_order_book"] = True + default_conf_usdt["exit_pricing"]["order_book_top"] = 1 + default_conf_usdt["telegram"]["enabled"] = False patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - limit_buy_order_usdt_open, - limit_sell_order_usdt_open, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + limit_buy_order_usdt_open, + limit_sell_order_usdt_open, + ] + ), get_fee=fee, ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -4119,40 +4414,37 @@ def test_order_book_exit_pricing( assert trade time.sleep(0.01) # Race condition fix - oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, limit_buy_order_usdt['symbol'], 'buy') + oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, limit_buy_order_usdt["symbol"], "buy") trade.update_trade(oobj) freqtrade.wallets.update() assert trade.is_open is True if is_short: - patch_get_signal(freqtrade, enter_long=False, exit_short=True) + patch_get_signal(freqtrade, enter_long=False, exit_short=True) else: patch_get_signal(freqtrade, enter_long=False, exit_long=True) assert freqtrade.handle_trade(trade) is True - assert trade.close_rate_requested == order_book_l2.return_value['asks'][0][0] + assert trade.close_rate_requested == order_book_l2.return_value["asks"][0][0] - mocker.patch(f'{EXMS}.fetch_l2_order_book', return_value={'bids': [[]], 'asks': [[]]}) + mocker.patch(f"{EXMS}.fetch_l2_order_book", return_value={"bids": [[]], "asks": [[]]}) with pytest.raises(PricingError): freqtrade.handle_trade(trade) assert log_has_re( - r"ETH/USDT - Exit Price at location 1 from orderbook could not be determined\..*", - caplog) + r"ETH/USDT - Exit Price at location 1 from orderbook could not be determined\..*", caplog + ) def test_startup_state(default_conf_usdt, mocker): - default_conf_usdt['pairlist'] = {'method': 'VolumePairList', - 'config': {'number_assets': 20} - } - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + default_conf_usdt["pairlist"] = {"method": "VolumePairList", "config": {"number_assets": 20}} + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) worker = get_patched_worker(mocker, default_conf_usdt) assert worker.freqtrade.state is State.RUNNING def test_startup_trade_reinit(default_conf_usdt, edge_conf, mocker): - - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) reinit_mock = MagicMock() - mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', reinit_mock) + mocker.patch("freqtrade.persistence.Trade.stoploss_reinitialization", reinit_mock) ftbot = get_patched_freqtradebot(mocker, default_conf_usdt) ftbot.startup() @@ -4166,13 +4458,14 @@ def test_startup_trade_reinit(default_conf_usdt, edge_conf, mocker): @pytest.mark.usefixtures("init_persistence") -def test_sync_wallet_dry_run(mocker, default_conf_usdt, ticker_usdt, fee, limit_buy_order_usdt_open, - caplog): - default_conf_usdt['dry_run'] = True +def test_sync_wallet_dry_run( + mocker, default_conf_usdt, ticker_usdt, fee, limit_buy_order_usdt_open, caplog +): + default_conf_usdt["dry_run"] = True # Initialize to 2 times stake amount - default_conf_usdt['dry_run_wallet'] = 120.0 - default_conf_usdt['max_open_trades'] = 2 - default_conf_usdt['tradable_balance_ratio'] = 1.0 + default_conf_usdt["dry_run_wallet"] = 120.0 + default_conf_usdt["max_open_trades"] = 2 + default_conf_usdt["tradable_balance_ratio"] = 1.0 patch_exchange(mocker) mocker.patch.multiple( EXMS, @@ -4183,40 +4476,46 @@ def test_sync_wallet_dry_run(mocker, default_conf_usdt, ticker_usdt, fee, limit_ bot = get_patched_freqtradebot(mocker, default_conf_usdt) patch_get_signal(bot) - assert bot.wallets.get_free('USDT') == 120.0 + assert bot.wallets.get_free("USDT") == 120.0 n = bot.enter_positions() assert n == 2 trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 2 - bot.config['max_open_trades'] = 3 + bot.config["max_open_trades"] = 3 n = bot.enter_positions() assert n == 0 - assert log_has_re(r"Unable to create trade for XRP/USDT: " - r"Available balance \(0.0 USDT\) is lower than stake amount \(60.0 USDT\)", - caplog) + assert log_has_re( + r"Unable to create trade for XRP/USDT: " + r"Available balance \(0.0 USDT\) is lower than stake amount \(60.0 USDT\)", + caplog, + ) @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize("is_short,buy_calls,sell_calls", [ - (False, 1, 1), - (True, 1, 1), -]) -def test_cancel_all_open_orders(mocker, default_conf_usdt, fee, limit_order, limit_order_open, - is_short, buy_calls, sell_calls): - default_conf_usdt['cancel_open_orders_on_exit'] = True +@pytest.mark.parametrize( + "is_short,buy_calls,sell_calls", + [ + (False, 1, 1), + (True, 1, 1), + ], +) +def test_cancel_all_open_orders( + mocker, default_conf_usdt, fee, limit_order, limit_order_open, is_short, buy_calls, sell_calls +): + default_conf_usdt["cancel_open_orders_on_exit"] = True mocker.patch( - f'{EXMS}.fetch_order', + f"{EXMS}.fetch_order", side_effect=[ ExchangeError(), limit_order[exit_side(is_short)], limit_order_open[entry_side(is_short)], limit_order_open[exit_side(is_short)], - ] + ], ) - buy_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter') - sell_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit') + buy_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_cancel_enter") + sell_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_cancel_exit") freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) create_mock_trades(fee, is_short=is_short) @@ -4242,7 +4541,7 @@ def test_check_for_open_trades(mocker, default_conf_usdt, fee, is_short): freqtrade.check_for_open_trades() assert freqtrade.rpc.send_msg.call_count == 1 - assert 'Handle these trades manually' in freqtrade.rpc.send_msg.call_args[0][0]['status'] + assert "Handle these trades manually" in freqtrade.rpc.send_msg.call_args[0][0]["status"] @pytest.mark.parametrize("is_short", [False, True]) @@ -4255,32 +4554,34 @@ def test_startup_update_open_orders(mocker, default_conf_usdt, fee, caplog, is_s assert not log_has_re(r"Error updating Order .*", caplog) caplog.clear() - freqtrade.config['dry_run'] = False + freqtrade.config["dry_run"] = False freqtrade.startup_update_open_orders() assert len(Order.get_open_orders()) == 4 matching_buy_order = mock_order_4(is_short=is_short) - matching_buy_order.update({ - 'status': 'closed', - }) - mocker.patch(f'{EXMS}.fetch_order', return_value=matching_buy_order) + matching_buy_order.update( + { + "status": "closed", + } + ) + mocker.patch(f"{EXMS}.fetch_order", return_value=matching_buy_order) freqtrade.startup_update_open_orders() # Only stoploss and sell orders are kept open assert len(Order.get_open_orders()) == 3 caplog.clear() - mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError) + mocker.patch(f"{EXMS}.fetch_order", side_effect=ExchangeError) freqtrade.startup_update_open_orders() assert log_has_re(r"Error updating Order .*", caplog) - mocker.patch(f'{EXMS}.fetch_order', side_effect=InvalidOrderException) - hto_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order') + mocker.patch(f"{EXMS}.fetch_order", side_effect=InvalidOrderException) + hto_mock = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_cancel_order") # Orders which are no longer found after X days should be assumed as canceled. freqtrade.startup_update_open_orders() assert log_has_re(r"Order is older than \d days.*", caplog) assert hto_mock.call_count == 3 - assert hto_mock.call_args_list[0][0][0]['status'] == 'canceled' - assert hto_mock.call_args_list[1][0][0]['status'] == 'canceled' + assert hto_mock.call_args_list[0][0][0]["status"] == "canceled" + assert hto_mock.call_args_list[1][0][0]["status"] == "canceled" @pytest.mark.usefixtures("init_persistence") @@ -4289,7 +4590,7 @@ def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog): create_mock_trades_usdt(fee) trades = Trade.get_trades().all() - trades[-1].exchange = 'some_other_exchange' + trades[-1].exchange = "some_other_exchange" for trade in trades: assert trade.price_precision is None assert trade.amount_precision is None @@ -4298,7 +4599,7 @@ def test_startup_backpopulate_precision(mocker, default_conf_usdt, fee, caplog): freqtrade.startup_backpopulate_precision() trades = Trade.get_trades().all() for trade in trades: - if trade.exchange == 'some_other_exchange': + if trade.exchange == "some_other_exchange": assert trade.price_precision is None assert trade.amount_precision is None assert trade.precision_mode is None @@ -4314,19 +4615,21 @@ def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_ freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) def patch_with_fee(order): - order.update({'fee': {'cost': 0.1, 'rate': 0.01, - 'currency': order['symbol'].split('/')[0]}}) + order.update( + {"fee": {"cost": 0.1, "rate": 0.01, "currency": order["symbol"].split("/")[0]}} + ) return order - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - side_effect=[ - patch_with_fee(mock_order_2_sell(is_short=is_short)), - patch_with_fee(mock_order_3_sell(is_short=is_short)), - patch_with_fee(mock_order_2(is_short=is_short)), - patch_with_fee(mock_order_3(is_short=is_short)), - patch_with_fee(mock_order_4(is_short=is_short)), - ] - ) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", + side_effect=[ + patch_with_fee(mock_order_2_sell(is_short=is_short)), + patch_with_fee(mock_order_3_sell(is_short=is_short)), + patch_with_fee(mock_order_2(is_short=is_short)), + patch_with_fee(mock_order_3(is_short=is_short)), + patch_with_fee(mock_order_4(is_short=is_short)), + ], + ) create_mock_trades(fee, is_short=is_short) trades = Trade.get_trades().all() @@ -4349,7 +4652,7 @@ def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_ assert trade.fee_close_cost is None assert trade.fee_close_currency is None - freqtrade.config['dry_run'] = False + freqtrade.config["dry_run"] = False freqtrade.update_trades_without_assigned_fees() @@ -4375,9 +4678,9 @@ def test_update_trades_without_assigned_fees(mocker, default_conf_usdt, fee, is_ @pytest.mark.parametrize("is_short", [False, True]) def test_reupdate_enter_order_fees(mocker, default_conf_usdt, fee, caplog, is_short): freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_uts = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.update_trade_state') + mock_uts = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.update_trade_state") - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', return_value={'status': 'open'}) + mocker.patch(f"{EXMS}.fetch_order_or_stoploss_order", return_value={"status": "open"}) create_mock_trades(fee, is_short) trades = Trade.get_trades().all() @@ -4385,14 +4688,14 @@ def test_reupdate_enter_order_fees(mocker, default_conf_usdt, fee, caplog, is_sh # assert log_has_re(r"Trying to reupdate buy fees for .*", caplog) assert mock_uts.call_count == 1 assert mock_uts.call_args_list[0][0][0] == trades[3] - assert mock_uts.call_args_list[0][0][1] == mock_order_4(is_short)['id'] + assert mock_uts.call_args_list[0][0][1] == mock_order_4(is_short)["id"] assert log_has_re(r"Trying to refind lost order for .*", caplog) mock_uts.reset_mock() caplog.clear() # Test with trade without orders trade = Trade( - pair='XRP/ETH', + pair="XRP/ETH", stake_amount=60.0, fee_open=fee.return_value, fee_close=fee.return_value, @@ -4400,8 +4703,8 @@ def test_reupdate_enter_order_fees(mocker, default_conf_usdt, fee, caplog, is_sh is_open=True, amount=30, open_rate=2.0, - exchange='binance', - is_short=is_short + exchange="binance", + is_short=is_short, ) Trade.session.add(trade) @@ -4415,13 +4718,11 @@ def test_reupdate_enter_order_fees(mocker, default_conf_usdt, fee, caplog, is_sh def test_handle_insufficient_funds(mocker, default_conf_usdt, fee, is_short, caplog): caplog.set_level(logging.DEBUG) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_uts = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.update_trade_state') + mock_uts = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.update_trade_state") - mock_fo = mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - return_value={'status': 'open'}) + mock_fo = mocker.patch(f"{EXMS}.fetch_order_or_stoploss_order", return_value={"status": "open"}) def reset_open_orders(trade): - trade.is_short = is_short create_mock_trades(fee, is_short=is_short) @@ -4437,8 +4738,9 @@ def test_handle_insufficient_funds(mocker, default_conf_usdt, fee, is_short, cap freqtrade.handle_insufficient_funds(trade) order = trade.orders[0] - assert log_has_re(r"Order Order(.*order_id=" + order.order_id + ".*) is no longer open.", - caplog) + assert log_has_re( + r"Order Order(.*order_id=" + order.order_id + ".*) is no longer open.", caplog + ) assert mock_fo.call_count == 0 assert mock_uts.call_count == 0 # No change to orderid - as update_trade_state is mocked @@ -4500,14 +4802,13 @@ def test_handle_insufficient_funds(mocker, default_conf_usdt, fee, is_short, cap assert mock_fo.call_count == 1 assert mock_uts.call_count == 1 # sell-orderid is "refound" and added to the trade - assert trade.open_orders_ids[0] == order['id'] + assert trade.open_orders_ids[0] == order["id"] assert trade.has_open_sl_orders is False caplog.clear() # Test error case - mock_fo = mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - side_effect=ExchangeError()) + mock_fo = mocker.patch(f"{EXMS}.fetch_order_or_stoploss_order", side_effect=ExchangeError()) order = mock_order_5_stoploss(is_short=is_short) freqtrade.handle_insufficient_funds(trades[4]) @@ -4517,33 +4818,34 @@ def test_handle_insufficient_funds(mocker, default_conf_usdt, fee, is_short, cap @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("is_short", [False, True]) def test_handle_onexchange_order(mocker, default_conf_usdt, limit_order, is_short, caplog): - default_conf_usdt['dry_run'] = False + default_conf_usdt["dry_run"] = False freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_uts = mocker.spy(freqtrade, 'update_trade_state') + mock_uts = mocker.spy(freqtrade, "update_trade_state") entry_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] - mock_fo = mocker.patch(f'{EXMS}.fetch_orders', return_value=[ - entry_order, - exit_order, - ]) + mock_fo = mocker.patch( + f"{EXMS}.fetch_orders", + return_value=[ + entry_order, + exit_order, + ], + ) trade = Trade( - pair='ETH/USDT', + pair="ETH/USDT", fee_open=0.001, fee_close=0.001, - open_rate=entry_order['price'], + open_rate=entry_order["price"], open_date=dt_now(), - stake_amount=entry_order['cost'], - amount=entry_order['amount'], + stake_amount=entry_order["cost"], + amount=entry_order["amount"], exchange="binance", is_short=is_short, leverage=1, ) - trade.orders.append(Order.parse_from_ccxt_object( - entry_order, 'ADA/USDT', entry_side(is_short)) - ) + trade.orders.append(Order.parse_from_ccxt_object(entry_order, "ADA/USDT", entry_side(is_short))) Trade.session.add(trade) freqtrade.handle_onexchange_order(trade) assert log_has_re(r"Found previously unknown order .*", caplog) @@ -4560,42 +4862,51 @@ def test_handle_onexchange_order(mocker, default_conf_usdt, limit_order, is_shor @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("is_short", [False, True]) -@pytest.mark.parametrize("factor,adjusts", [ - (0.99, True), - (0.97, False), -]) +@pytest.mark.parametrize( + "factor,adjusts", + [ + (0.99, True), + (0.97, False), + ], +) def test_handle_onexchange_order_changed_amount( - mocker, default_conf_usdt, limit_order, is_short, caplog, - factor, adjusts, + mocker, + default_conf_usdt, + limit_order, + is_short, + caplog, + factor, + adjusts, ): - default_conf_usdt['dry_run'] = False + default_conf_usdt["dry_run"] = False freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_uts = mocker.spy(freqtrade, 'update_trade_state') + mock_uts = mocker.spy(freqtrade, "update_trade_state") entry_order = limit_order[entry_side(is_short)] - mock_fo = mocker.patch(f'{EXMS}.fetch_orders', return_value=[ - entry_order, - ]) + mock_fo = mocker.patch( + f"{EXMS}.fetch_orders", + return_value=[ + entry_order, + ], + ) trade = Trade( - pair='ETH/USDT', + pair="ETH/USDT", fee_open=0.001, - base_currency='ETH', + base_currency="ETH", fee_close=0.001, - open_rate=entry_order['price'], + open_rate=entry_order["price"], open_date=dt_now(), - stake_amount=entry_order['cost'], - amount=entry_order['amount'], + stake_amount=entry_order["cost"], + amount=entry_order["amount"], exchange="binance", is_short=is_short, leverage=1, ) freqtrade.wallets = MagicMock() - freqtrade.wallets.get_total = MagicMock(return_value=entry_order['amount'] * factor) + freqtrade.wallets.get_total = MagicMock(return_value=entry_order["amount"] * factor) - trade.orders.append(Order.parse_from_ccxt_object( - entry_order, 'ADA/USDT', entry_side(is_short)) - ) + trade.orders.append(Order.parse_from_ccxt_object(entry_order, "ADA/USDT", entry_side(is_short))) Trade.session.add(trade) # assert trade.amount > entry_order['amount'] @@ -4606,14 +4917,14 @@ def test_handle_onexchange_order_changed_amount( trade = Trade.session.scalars(select(Trade)).first() - assert log_has_re(r'.*has a total of .* but the Wallet shows.*', caplog) + assert log_has_re(r".*has a total of .* but the Wallet shows.*", caplog) if adjusts: # Trade amount is updated - assert trade.amount == entry_order['amount'] * factor - assert log_has_re(r'.*Adjusting trade amount to.*', caplog) + assert trade.amount == entry_order["amount"] * factor + assert log_has_re(r".*Adjusting trade amount to.*", caplog) else: - assert log_has_re(r'.*Refusing to adjust as the difference.*', caplog) - assert trade.amount == entry_order['amount'] + assert log_has_re(r".*Refusing to adjust as the difference.*", caplog) + assert trade.amount == entry_order["amount"] assert len(trade.orders) == 1 assert trade.is_open is True @@ -4622,44 +4933,51 @@ def test_handle_onexchange_order_changed_amount( @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("is_short", [False, True]) def test_handle_onexchange_order_exit(mocker, default_conf_usdt, limit_order, is_short, caplog): - default_conf_usdt['dry_run'] = False + default_conf_usdt["dry_run"] = False freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_uts = mocker.spy(freqtrade, 'update_trade_state') + mock_uts = mocker.spy(freqtrade, "update_trade_state") entry_order = limit_order[entry_side(is_short)] add_entry_order = deepcopy(entry_order) - add_entry_order.update({ - 'id': '_partial_entry_id', - 'amount': add_entry_order['amount'] / 1.5, - 'cost': add_entry_order['cost'] / 1.5, - 'filled': add_entry_order['filled'] / 1.5, - }) + add_entry_order.update( + { + "id": "_partial_entry_id", + "amount": add_entry_order["amount"] / 1.5, + "cost": add_entry_order["cost"] / 1.5, + "filled": add_entry_order["filled"] / 1.5, + } + ) exit_order_part = deepcopy(limit_order[exit_side(is_short)]) - exit_order_part.update({ - 'id': 'some_random_partial_id', - 'amount': exit_order_part['amount'] / 2, - 'cost': exit_order_part['cost'] / 2, - 'filled': exit_order_part['filled'] / 2, - }) + exit_order_part.update( + { + "id": "some_random_partial_id", + "amount": exit_order_part["amount"] / 2, + "cost": exit_order_part["cost"] / 2, + "filled": exit_order_part["filled"] / 2, + } + ) exit_order = limit_order[exit_side(is_short)] # Orders intentionally in the wrong sequence - mock_fo = mocker.patch(f'{EXMS}.fetch_orders', return_value=[ - entry_order, - exit_order_part, - exit_order, - add_entry_order, - ]) + mock_fo = mocker.patch( + f"{EXMS}.fetch_orders", + return_value=[ + entry_order, + exit_order_part, + exit_order, + add_entry_order, + ], + ) trade = Trade( - pair='ETH/USDT', + pair="ETH/USDT", fee_open=0.001, fee_close=0.001, - open_rate=entry_order['price'], + open_rate=entry_order["price"], open_date=dt_now(), - stake_amount=entry_order['cost'], - amount=entry_order['amount'], + stake_amount=entry_order["cost"], + amount=entry_order["amount"], exchange="binance", is_short=is_short, leverage=1, @@ -4690,11 +5008,52 @@ def test_handle_onexchange_order_exit(mocker, default_conf_usdt, limit_order, is assert trade.amount == 5.0 +@pytest.mark.usefixtures("init_persistence") +@pytest.mark.parametrize("is_short", [False, True]) +def test_handle_onexchange_order_fully_canceled_enter( + mocker, default_conf_usdt, limit_order, is_short, caplog +): + default_conf_usdt["dry_run"] = False + freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) + + entry_order = limit_order[entry_side(is_short)] + entry_order["status"] = "canceled" + entry_order["filled"] = 0.0 + mock_fo = mocker.patch( + f"{EXMS}.fetch_orders", + return_value=[ + entry_order, + ], + ) + mocker.patch(f"{EXMS}.get_rate", return_value=entry_order["price"]) + + trade = Trade( + pair="ETH/USDT", + fee_open=0.001, + fee_close=0.001, + open_rate=entry_order["price"], + open_date=dt_now(), + stake_amount=entry_order["cost"], + amount=entry_order["amount"], + exchange="binance", + is_short=is_short, + leverage=1, + ) + + trade.orders.append(Order.parse_from_ccxt_object(entry_order, "ADA/USDT", entry_side(is_short))) + Trade.session.add(trade) + assert freqtrade.handle_onexchange_order(trade) is True + assert log_has_re(r"Trade only had fully canceled entry orders\. .*", caplog) + assert mock_fo.call_count == 1 + trades = Trade.get_trades().all() + assert len(trades) == 0 + + def test_get_valid_price(mocker, default_conf_usdt) -> None: patch_RPCManager(mocker) patch_exchange(mocker) freqtrade = FreqtradeBot(default_conf_usdt) - freqtrade.config['custom_price_max_distance_ratio'] = 0.02 + freqtrade.config["custom_price_max_distance_ratio"] = 0.02 custom_price_string = "10" custom_price_badstring = "10abc" @@ -4730,35 +5089,42 @@ def test_get_valid_price(mocker, default_conf_usdt) -> None: assert valid_price_at_min_alwd < proposed_price -@pytest.mark.parametrize('trading_mode,calls,t1,t2', [ - ('spot', 0, "2021-09-01 00:00:00", "2021-09-01 08:00:00"), - ('margin', 0, "2021-09-01 00:00:00", "2021-09-01 08:00:00"), - ('futures', 15, "2021-09-01 00:01:02", "2021-09-01 08:00:01"), - ('futures', 16, "2021-09-01 00:00:02", "2021-09-01 08:00:01"), - ('futures', 16, "2021-08-31 23:59:59", "2021-09-01 08:00:01"), - ('futures', 16, "2021-09-01 00:00:02", "2021-09-01 08:00:02"), - ('futures', 16, "2021-08-31 23:59:59", "2021-09-01 08:00:02"), - ('futures', 16, "2021-08-31 23:59:59", "2021-09-01 08:00:03"), - ('futures', 16, "2021-08-31 23:59:59", "2021-09-01 08:00:04"), - ('futures', 17, "2021-08-31 23:59:59", "2021-09-01 08:01:05"), - ('futures', 17, "2021-08-31 23:59:59", "2021-09-01 08:01:06"), - ('futures', 17, "2021-08-31 23:59:59", "2021-09-01 08:01:07"), - ('futures', 17, "2021-08-31 23:59:58", "2021-09-01 08:01:07"), -]) -@pytest.mark.parametrize('tzoffset', [ - '+00:00', - '+01:00', - '-02:00', -]) -def test_update_funding_fees_schedule(mocker, default_conf, trading_mode, calls, time_machine, - t1, t2, tzoffset): +@pytest.mark.parametrize( + "trading_mode,calls,t1,t2", + [ + ("spot", 0, "2021-09-01 00:00:00", "2021-09-01 08:00:00"), + ("margin", 0, "2021-09-01 00:00:00", "2021-09-01 08:00:00"), + ("futures", 15, "2021-09-01 00:01:02", "2021-09-01 08:00:01"), + ("futures", 16, "2021-09-01 00:00:02", "2021-09-01 08:00:01"), + ("futures", 16, "2021-08-31 23:59:59", "2021-09-01 08:00:01"), + ("futures", 16, "2021-09-01 00:00:02", "2021-09-01 08:00:02"), + ("futures", 16, "2021-08-31 23:59:59", "2021-09-01 08:00:02"), + ("futures", 16, "2021-08-31 23:59:59", "2021-09-01 08:00:03"), + ("futures", 16, "2021-08-31 23:59:59", "2021-09-01 08:00:04"), + ("futures", 17, "2021-08-31 23:59:59", "2021-09-01 08:01:05"), + ("futures", 17, "2021-08-31 23:59:59", "2021-09-01 08:01:06"), + ("futures", 17, "2021-08-31 23:59:59", "2021-09-01 08:01:07"), + ("futures", 17, "2021-08-31 23:59:58", "2021-09-01 08:01:07"), + ], +) +@pytest.mark.parametrize( + "tzoffset", + [ + "+00:00", + "+01:00", + "-02:00", + ], +) +def test_update_funding_fees_schedule( + mocker, default_conf, trading_mode, calls, time_machine, t1, t2, tzoffset +): time_machine.move_to(f"{t1} {tzoffset}", tick=False) patch_RPCManager(mocker) patch_exchange(mocker) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.update_funding_fees', return_value=True) - default_conf['trading_mode'] = trading_mode - default_conf['margin_mode'] = 'isolated' + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.update_funding_fees", return_value=True) + default_conf["trading_mode"] = trading_mode + default_conf["margin_mode"] = "isolated" freqtrade = get_patched_freqtradebot(mocker, default_conf) time_machine.move_to(f"{t2} {tzoffset}", tick=False) @@ -4768,8 +5134,8 @@ def test_update_funding_fees_schedule(mocker, default_conf, trading_mode, calls, assert freqtrade.update_funding_fees.call_count == calls -@pytest.mark.parametrize('schedule_off', [False, True]) -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("schedule_off", [False, True]) +@pytest.mark.parametrize("is_short", [True, False]) def test_update_funding_fees( mocker, default_conf, @@ -4778,7 +5144,7 @@ def test_update_funding_fees( ticker_usdt_sell_up, is_short, limit_order_open, - schedule_off + schedule_off, ): """ nominal_value = mark_price * size @@ -4807,55 +5173,67 @@ def test_update_funding_fees( enter_mm = MagicMock(return_value=open_order) patch_RPCManager(mocker) patch_exchange(mocker) - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" date_midnight = dt_utc(2021, 9, 1) date_eight = dt_utc(2021, 9, 1, 8) date_sixteen = dt_utc(2021, 9, 1, 16) - columns = ['date', 'open', 'high', 'low', 'close', 'volume'] + columns = ["date", "open", "high", "low", "close", "volume"] # 16:00 entry is actually never used # But should be kept in the test to ensure we're filtering correctly. funding_rates = { - "LTC/USDT": - DataFrame([ + "LTC/USDT": DataFrame( + [ [date_midnight, 0.00032583, 0, 0, 0, 0], [date_eight, 0.00024472, 0, 0, 0, 0], [date_sixteen, 0.00024472, 0, 0, 0, 0], - ], columns=columns), - "ETH/USDT": - DataFrame([ + ], + columns=columns, + ), + "ETH/USDT": DataFrame( + [ [date_midnight, 0.0001, 0, 0, 0, 0], [date_eight, 0.0001, 0, 0, 0, 0], [date_sixteen, 0.0001, 0, 0, 0, 0], - ], columns=columns), - "XRP/USDT": - DataFrame([ + ], + columns=columns, + ), + "XRP/USDT": DataFrame( + [ [date_midnight, 0.00049426, 0, 0, 0, 0], [date_eight, 0.00032715, 0, 0, 0, 0], [date_sixteen, 0.00032715, 0, 0, 0, 0], - ], columns=columns) + ], + columns=columns, + ), } mark_prices = { - "LTC/USDT": - DataFrame([ + "LTC/USDT": DataFrame( + [ [date_midnight, 3.3, 0, 0, 0, 0], [date_eight, 3.2, 0, 0, 0, 0], [date_sixteen, 3.2, 0, 0, 0, 0], - ], columns=columns), - "ETH/USDT": - DataFrame([ + ], + columns=columns, + ), + "ETH/USDT": DataFrame( + [ [date_midnight, 2.4, 0, 0, 0, 0], [date_eight, 2.5, 0, 0, 0, 0], [date_sixteen, 2.5, 0, 0, 0, 0], - ], columns=columns), - "XRP/USDT": - DataFrame([ + ], + columns=columns, + ), + "XRP/USDT": DataFrame( + [ [date_midnight, 1.2, 0, 0, 0, 0], [date_eight, 1.2, 0, 0, 0, 0], [date_sixteen, 1.2, 0, 0, 0, 0], - ], columns=columns) + ], + columns=columns, + ), } def refresh_latest_ohlcv_mock(pairlist, **kwargs): @@ -4868,16 +5246,12 @@ def test_update_funding_fees( return ret - mocker.patch(f'{EXMS}.refresh_latest_ohlcv', side_effect=refresh_latest_ohlcv_mock) + mocker.patch(f"{EXMS}.refresh_latest_ohlcv", side_effect=refresh_latest_ohlcv_mock) mocker.patch.multiple( EXMS, get_rate=enter_rate_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=enter_mm, get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, @@ -4887,47 +5261,52 @@ def test_update_funding_fees( freqtrade = get_patched_freqtradebot(mocker, default_conf) # initial funding fees, - freqtrade.execute_entry('ETH/USDT', 123, is_short=is_short) - freqtrade.execute_entry('LTC/USDT', 2.0, is_short=is_short) - freqtrade.execute_entry('XRP/USDT', 123, is_short=is_short) + freqtrade.execute_entry("ETH/USDT", 123, is_short=is_short) + freqtrade.execute_entry("LTC/USDT", 2.0, is_short=is_short) + freqtrade.execute_entry("XRP/USDT", 123, is_short=is_short) multiple = 1 if is_short else -1 trades = Trade.get_open_trades() assert len(trades) == 3 for trade in trades: assert pytest.approx(trade.funding_fees) == 0 - mocker.patch(f'{EXMS}.create_order', return_value=open_exit_order) + mocker.patch(f"{EXMS}.create_order", return_value=open_exit_order) time_machine.move_to("2021-09-01 08:00:00 +00:00") if schedule_off: for trade in trades: freqtrade.execute_trade_exit( trade=trade, # The values of the next 2 params are irrelevant for this test - limit=ticker_usdt_sell_up()['bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.ROI) + limit=ticker_usdt_sell_up()["bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.ROI), + ) + assert trade.funding_fees == pytest.approx( + sum( + trade.amount + * mark_prices[trade.pair].iloc[1:2]["open"] + * funding_rates[trade.pair].iloc[1:2]["open"] + * multiple + ) ) - assert trade.funding_fees == pytest.approx(sum( - trade.amount * - mark_prices[trade.pair].iloc[1:2]['open'] * - funding_rates[trade.pair].iloc[1:2]['open'] * multiple - )) else: freqtrade._schedule.run_pending() # Funding fees for 00:00 and 08:00 for trade in trades: - assert trade.funding_fees == pytest.approx(sum( - trade.amount * - mark_prices[trade.pair].iloc[1:2]['open'] * - funding_rates[trade.pair].iloc[1:2]['open'] * - multiple - )) + assert trade.funding_fees == pytest.approx( + sum( + trade.amount + * mark_prices[trade.pair].iloc[1:2]["open"] + * funding_rates[trade.pair].iloc[1:2]["open"] + * multiple + ) + ) def test_update_funding_fees_error(mocker, default_conf, caplog): - mocker.patch(f'{EXMS}.get_funding_fees', side_effect=ExchangeError()) - default_conf['trading_mode'] = 'futures' - default_conf['margin_mode'] = 'isolated' + mocker.patch(f"{EXMS}.get_funding_fees", side_effect=ExchangeError()) + default_conf["trading_mode"] = "futures" + default_conf["margin_mode"] = "isolated" freqtrade = get_patched_freqtradebot(mocker, default_conf) freqtrade.update_funding_fees() @@ -4938,12 +5317,14 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: patch_RPCManager(mocker) patch_exchange(mocker) patch_wallet(mocker, free=10000) - default_conf_usdt.update({ - "position_adjustment_enable": True, - "dry_run": False, - "stake_amount": 10.0, - "dry_run_wallet": 1000.0, - }) + default_conf_usdt.update( + { + "position_adjustment_enable": True, + "dry_run": False, + "stake_amount": 10.0, + "dry_run_wallet": 1000.0, + } + ) freqtrade = FreqtradeBot(default_conf_usdt) freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True) bid = 11 @@ -4952,36 +5333,33 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: mocker.patch.multiple( EXMS, get_rate=buy_rate_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 10, - 'ask': 12, - 'last': 11 - }), + fetch_ticker=MagicMock(return_value={"bid": 10, "ask": 12, "last": 11}), get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, ) - pair = 'ETH/USDT' + pair = "ETH/USDT" # Initial buy closed_successful_buy_order = { - 'pair': pair, - 'ft_pair': pair, - 'ft_order_side': 'buy', - 'side': 'buy', - 'type': 'limit', - 'status': 'closed', - 'price': bid, - 'average': bid, - 'cost': bid * stake_amount, - 'amount': stake_amount, - 'filled': stake_amount, - 'ft_is_open': False, - 'id': '650', - 'order_id': '650' + "pair": pair, + "ft_pair": pair, + "ft_order_side": "buy", + "side": "buy", + "type": "limit", + "status": "closed", + "price": bid, + "average": bid, + "cost": bid * stake_amount, + "amount": stake_amount, + "filled": stake_amount, + "ft_is_open": False, + "id": "650", + "order_id": "650", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_successful_buy_order)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_successful_buy_order)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_successful_buy_order)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_successful_buy_order) + ) assert freqtrade.execute_entry(pair, stake_amount) # Should create an closed trade with an no open order id # Order is filled and trade is open @@ -5004,7 +5382,7 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert not trade.has_open_orders assert trade.open_rate == 11 assert trade.stake_amount == 110 - assert not trade.fee_updated('buy') + assert not trade.fee_updated("buy") freqtrade.manage_open_orders() @@ -5014,24 +5392,24 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert not trade.has_open_orders assert trade.open_rate == 11 assert trade.stake_amount == 110 - assert not trade.fee_updated('buy') + assert not trade.fee_updated("buy") # First position adjustment buy. open_dca_order_1 = { - 'ft_pair': pair, - 'ft_order_side': 'buy', - 'side': 'buy', - 'type': 'limit', - 'status': None, - 'price': 9, - 'amount': 12, - 'cost': 108, - 'ft_is_open': True, - 'id': '651', - 'order_id': '651' + "ft_pair": pair, + "ft_order_side": "buy", + "side": "buy", + "type": "limit", + "status": None, + "price": 9, + "amount": 12, + "cost": 108, + "ft_is_open": True, + "id": "651", + "order_id": "651", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=open_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', MagicMock(return_value=open_dca_order_1)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=open_dca_order_1)) + mocker.patch(f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=open_dca_order_1)) assert freqtrade.execute_entry(pair, stake_amount, trade=trade) orders = Order.session.scalars(select(Order)).all() @@ -5039,32 +5417,31 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert len(orders) == 2 trade = Trade.session.scalars(select(Trade)).first() assert trade - assert '651' in trade.open_orders_ids + assert "651" in trade.open_orders_ids assert trade.open_rate == 11 assert trade.amount == 10 assert trade.stake_amount == 110 - assert not trade.fee_updated('buy') + assert not trade.fee_updated("buy") trades: List[Trade] = Trade.get_open_trades_without_assigned_fees() assert len(trades) == 1 assert trade.is_open - assert not trade.fee_updated('buy') - order = trade.select_order('buy', False) + assert not trade.fee_updated("buy") + order = trade.select_order("buy", False) assert order - assert order.order_id == '650' + assert order.order_id == "650" def make_sure_its_651(*args, **kwargs): - - if args[0] == '650': + if args[0] == "650": return closed_successful_buy_order - if args[0] == '651': + if args[0] == "651": return open_dca_order_1 return None # Assume it does nothing since order is still open fetch_order_mm = MagicMock(side_effect=make_sure_its_651) - mocker.patch(f'{EXMS}.create_order', fetch_order_mm) - mocker.patch(f'{EXMS}.fetch_order', fetch_order_mm) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', fetch_order_mm) + mocker.patch(f"{EXMS}.create_order", fetch_order_mm) + mocker.patch(f"{EXMS}.fetch_order", fetch_order_mm) + mocker.patch(f"{EXMS}.fetch_order_or_stoploss_order", fetch_order_mm) freqtrade.update_trades_without_assigned_fees() orders = Order.session.scalars(select(Order)).all() @@ -5076,38 +5453,39 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: # Assert trade is as expected trade = Trade.session.scalars(select(Trade)).first() assert trade - assert '651' in trade.open_orders_ids + assert "651" in trade.open_orders_ids assert trade.open_rate == 11 assert trade.amount == 10 assert trade.stake_amount == 110 - assert not trade.fee_updated('buy') + assert not trade.fee_updated("buy") # Make sure the closed order is found as the first order. - order = trade.select_order('buy', False) - assert order.order_id == '650' + order = trade.select_order("buy", False) + assert order.order_id == "650" # Now close the order so it should update. closed_dca_order_1 = { - 'ft_pair': pair, - 'ft_order_side': 'buy', - 'side': 'buy', - 'type': 'limit', - 'status': 'closed', - 'price': 9, - 'average': 9, - 'amount': 12, - 'filled': 12, - 'cost': 108, - 'ft_is_open': False, - 'id': '651', - 'order_id': '651', - 'datetime': dt_now().isoformat(), + "ft_pair": pair, + "ft_order_side": "buy", + "side": "buy", + "type": "limit", + "status": "closed", + "price": 9, + "average": 9, + "amount": 12, + "filled": 12, + "cost": 108, + "ft_is_open": False, + "id": "651", + "order_id": "651", + "datetime": dt_now().isoformat(), } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(return_value=closed_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_dca_order_1)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_dca_order_1)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value=closed_dca_order_1)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_dca_order_1) + ) freqtrade.manage_open_orders() # Assert trade is as expected (averaged dca) @@ -5123,8 +5501,8 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert len(orders) == 2 # Make sure the closed order is found as the second order. - order = trade.select_order('buy', False) - assert order.order_id == '651' + order = trade.select_order("buy", False) + assert order.order_id == "651" # Assert that the trade is not found as open and without fees trades: List[Trade] = Trade.get_open_trades_without_assigned_fees() @@ -5132,24 +5510,25 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: # Add a second DCA closed_dca_order_2 = { - 'ft_pair': pair, - 'status': 'closed', - 'ft_order_side': 'buy', - 'side': 'buy', - 'type': 'limit', - 'price': 7, - 'average': 7, - 'amount': 15, - 'filled': 15, - 'cost': 105, - 'ft_is_open': False, - 'id': '652', - 'order_id': '652' + "ft_pair": pair, + "status": "closed", + "ft_order_side": "buy", + "side": "buy", + "type": "limit", + "price": 7, + "average": 7, + "amount": 15, + "filled": 15, + "cost": 105, + "ft_is_open": False, + "id": "652", + "order_id": "652", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_dca_order_2)) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(return_value=closed_dca_order_2)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_dca_order_2)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_dca_order_2)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value=closed_dca_order_2)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_dca_order_2) + ) assert freqtrade.execute_entry(pair, stake_amount, trade=trade) # Assert trade is as expected (averaged dca) @@ -5165,30 +5544,34 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert len(orders) == 3 # Make sure the closed order is found as the second order. - order = trade.select_order('buy', False) - assert order.order_id == '652' + order = trade.select_order("buy", False) + assert order.order_id == "652" closed_sell_dca_order_1 = { - 'ft_pair': pair, - 'status': 'closed', - 'ft_order_side': 'sell', - 'side': 'sell', - 'type': 'limit', - 'price': 8, - 'average': 8, - 'amount': 15, - 'filled': 15, - 'cost': 120, - 'ft_is_open': False, - 'id': '653', - 'order_id': '653' + "ft_pair": pair, + "status": "closed", + "ft_order_side": "sell", + "side": "sell", + "type": "limit", + "price": 8, + "average": 8, + "amount": 15, + "filled": 15, + "cost": 120, + "ft_is_open": False, + "id": "653", + "order_id": "653", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_sell_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(return_value=closed_sell_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_sell_dca_order_1)) - assert freqtrade.execute_trade_exit(trade=trade, limit=8, - exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), - sub_trade_amt=15) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_sell_dca_order_1)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value=closed_sell_dca_order_1)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_sell_dca_order_1) + ) + assert freqtrade.execute_trade_exit( + trade=trade, + limit=8, + exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), + sub_trade_amt=15, + ) # Assert trade is as expected (averaged dca) trade = Trade.session.scalars(select(Trade)).first() @@ -5204,8 +5587,8 @@ def test_position_adjust(mocker, default_conf_usdt, fee) -> None: assert len(orders) == 4 # Make sure the closed order is found as the second order. - order = trade.select_order('sell', False) - assert order.order_id == '653' + order = trade.select_order("sell", False) + assert order.order_id == "653" def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: @@ -5218,12 +5601,14 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: patch_RPCManager(mocker) patch_exchange(mocker) patch_wallet(mocker, free=10000) - default_conf_usdt.update({ - "position_adjustment_enable": True, - "dry_run": False, - "stake_amount": 200.0, - "dry_run_wallet": 1000.0, - }) + default_conf_usdt.update( + { + "position_adjustment_enable": True, + "dry_run": False, + "stake_amount": 200.0, + "dry_run_wallet": 1000.0, + } + ) freqtrade = FreqtradeBot(default_conf_usdt) freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True) bid = 11 @@ -5232,35 +5617,32 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: mocker.patch.multiple( EXMS, get_rate=buy_rate_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 10, - 'ask': 12, - 'last': 11 - }), + fetch_ticker=MagicMock(return_value={"bid": 10, "ask": 12, "last": 11}), get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, ) - pair = 'ETH/USDT' + pair = "ETH/USDT" # Initial buy closed_successful_buy_order = { - 'pair': pair, - 'ft_pair': pair, - 'ft_order_side': 'buy', - 'side': 'buy', - 'type': 'limit', - 'status': 'closed', - 'price': bid, - 'average': bid, - 'cost': bid * amount, - 'amount': amount, - 'filled': amount, - 'ft_is_open': False, - 'id': '600', - 'order_id': '600' + "pair": pair, + "ft_pair": pair, + "ft_order_side": "buy", + "side": "buy", + "type": "limit", + "status": "closed", + "price": bid, + "average": bid, + "cost": bid * amount, + "amount": amount, + "filled": amount, + "ft_is_open": False, + "id": "600", + "order_id": "600", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_successful_buy_order)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_successful_buy_order)) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_successful_buy_order)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_successful_buy_order) + ) assert freqtrade.execute_entry(pair, amount) # Should create an closed trade with an no open order id # Order is filled and trade is open @@ -5298,27 +5680,31 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: amount = 50 ask = 8 closed_sell_dca_order_1 = { - 'ft_pair': pair, - 'status': 'closed', - 'ft_order_side': 'sell', - 'side': 'sell', - 'type': 'limit', - 'price': ask, - 'average': ask, - 'amount': amount, - 'filled': amount, - 'cost': amount * ask, - 'ft_is_open': False, - 'id': '601', - 'order_id': '601' + "ft_pair": pair, + "status": "closed", + "ft_order_side": "sell", + "side": "sell", + "type": "limit", + "price": ask, + "average": ask, + "amount": amount, + "filled": amount, + "cost": amount * ask, + "ft_is_open": False, + "id": "601", + "order_id": "601", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_sell_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(return_value=closed_sell_dca_order_1)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_sell_dca_order_1)) - assert freqtrade.execute_trade_exit(trade=trade, limit=ask, - exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), - sub_trade_amt=amount) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_sell_dca_order_1)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value=closed_sell_dca_order_1)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_sell_dca_order_1) + ) + assert freqtrade.execute_trade_exit( + trade=trade, + limit=ask, + exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), + sub_trade_amt=amount, + ) trades: List[Trade] = trade.get_open_trades_without_assigned_fees() assert len(trades) == 1 # Assert trade is as expected (averaged dca) @@ -5336,33 +5722,37 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: assert orders assert len(orders) == 2 # Make sure the closed order is found as the second order. - order = trade.select_order('sell', False) - assert order.order_id == '601' + order = trade.select_order("sell", False) + assert order.order_id == "601" amount = 50 ask = 16 closed_sell_dca_order_2 = { - 'ft_pair': pair, - 'status': 'closed', - 'ft_order_side': 'sell', - 'side': 'sell', - 'type': 'limit', - 'price': ask, - 'average': ask, - 'amount': amount, - 'filled': amount, - 'cost': amount * ask, - 'ft_is_open': False, - 'id': '602', - 'order_id': '602' + "ft_pair": pair, + "status": "closed", + "ft_order_side": "sell", + "side": "sell", + "type": "limit", + "price": ask, + "average": ask, + "amount": amount, + "filled": amount, + "cost": amount * ask, + "ft_is_open": False, + "id": "602", + "order_id": "602", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_sell_dca_order_2)) - mocker.patch(f'{EXMS}.fetch_order', MagicMock(return_value=closed_sell_dca_order_2)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_sell_dca_order_2)) - assert freqtrade.execute_trade_exit(trade=trade, limit=ask, - exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), - sub_trade_amt=amount) + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_sell_dca_order_2)) + mocker.patch(f"{EXMS}.fetch_order", MagicMock(return_value=closed_sell_dca_order_2)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_sell_dca_order_2) + ) + assert freqtrade.execute_trade_exit( + trade=trade, + limit=ask, + exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), + sub_trade_amt=amount, + ) # Assert trade is as expected (averaged dca) trade = Trade.session.scalars(select(Trade)).first() @@ -5379,37 +5769,45 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None: assert len(orders) == 3 # Make sure the closed order is found as the second order. - order = trade.select_order('sell', False) - assert order.order_id == '602' + order = trade.select_order("sell", False) + assert order.order_id == "602" assert trade.is_open is False -@pytest.mark.parametrize('data', [ - ( +@pytest.mark.parametrize( + "data", + [ # tuple 1 - side amount, price # tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit - (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), - (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), - (('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)), - (('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)), - (('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, 336.625, 0.1343142)), # final profit (sum) - ), - ( - (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), - (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), - (('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)), - (('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)), - (('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)), - (('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 3175.75, 0.9747170)), # final profit - ) -]) + ( + (("buy", 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), + (("buy", 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), + (("sell", 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)), + (("sell", 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)), + ( + ("sell", 50, 5), + (50.0, 12.5, 625.0, 336.625, 336.625, 0.1343142), + ), # final profit (sum) + ), + ( + (("buy", 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), + (("buy", 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), + (("sell", 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)), + (("buy", 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)), + (("sell", 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)), + (("sell", 150, 23), (150.0, 11.0, 1650.0, 3175.75, 3175.75, 0.9747170)), # final profit + ), + ], +) def test_position_adjust3(mocker, default_conf_usdt, fee, data) -> None: - default_conf_usdt.update({ - "position_adjustment_enable": True, - "dry_run": False, - "stake_amount": 200.0, - "dry_run_wallet": 1000.0, - }) + default_conf_usdt.update( + { + "position_adjustment_enable": True, + "dry_run": False, + "stake_amount": 200.0, + "dry_run_wallet": 1000.0, + } + ) patch_RPCManager(mocker) patch_exchange(mocker) patch_wallet(mocker, free=10000) @@ -5423,41 +5821,40 @@ def test_position_adjust3(mocker, default_conf_usdt, fee, data) -> None: mocker.patch.multiple( EXMS, get_rate=price_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 10, - 'ask': 12, - 'last': 11 - }), + fetch_ticker=MagicMock(return_value={"bid": 10, "ask": 12, "last": 11}), get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, ) - pair = 'ETH/USDT' + pair = "ETH/USDT" closed_successful_order = { - 'pair': pair, - 'ft_pair': pair, - 'ft_order_side': order[0], - 'side': order[0], - 'type': 'limit', - 'status': 'closed', - 'price': price, - 'average': price, - 'cost': price * amount, - 'amount': amount, - 'filled': amount, - 'ft_is_open': False, - 'id': f'60{idx}', - 'order_id': f'60{idx}' + "pair": pair, + "ft_pair": pair, + "ft_order_side": order[0], + "side": order[0], + "type": "limit", + "status": "closed", + "price": price, + "average": price, + "cost": price * amount, + "amount": amount, + "filled": amount, + "ft_is_open": False, + "id": f"60{idx}", + "order_id": f"60{idx}", } - mocker.patch(f'{EXMS}.create_order', MagicMock(return_value=closed_successful_order)) - mocker.patch(f'{EXMS}.fetch_order_or_stoploss_order', - MagicMock(return_value=closed_successful_order)) - if order[0] == 'buy': + mocker.patch(f"{EXMS}.create_order", MagicMock(return_value=closed_successful_order)) + mocker.patch( + f"{EXMS}.fetch_order_or_stoploss_order", MagicMock(return_value=closed_successful_order) + ) + if order[0] == "buy": assert freqtrade.execute_entry(pair, amount, trade=trade) else: assert freqtrade.execute_trade_exit( - trade=trade, limit=price, + trade=trade, + limit=price, exit_check=ExitCheckTuple(exit_type=ExitType.PARTIAL_EXIT), - sub_trade_amt=amount) + sub_trade_amt=amount, + ) orders1 = Order.session.scalars(select(Order)).all() assert orders1 @@ -5476,7 +5873,7 @@ def test_position_adjust3(mocker, default_conf_usdt, fee, data) -> None: assert pytest.approx(trade.close_profit) == result[5] order_obj = trade.select_order(order[0], False) - assert order_obj.order_id == f'60{idx}' + assert order_obj.order_id == f"60{idx}" trade = Trade.session.scalars(select(Trade)).first() assert trade @@ -5485,13 +5882,17 @@ def test_position_adjust3(mocker, default_conf_usdt, fee, data) -> None: def test_process_open_trade_positions_exception(mocker, default_conf_usdt, fee, caplog) -> None: - default_conf_usdt.update({ - "position_adjustment_enable": True, - }) + default_conf_usdt.update( + { + "position_adjustment_enable": True, + } + ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.check_and_call_adjust_trade_position', - side_effect=DependencyException()) + mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.check_and_call_adjust_trade_position", + side_effect=DependencyException(), + ) create_mock_trades(fee) @@ -5500,35 +5901,33 @@ def test_process_open_trade_positions_exception(mocker, default_conf_usdt, fee, def test_check_and_call_adjust_trade_position(mocker, default_conf_usdt, fee, caplog) -> None: - default_conf_usdt.update({ - "position_adjustment_enable": True, - "max_entry_position_adjustment": 0, - }) + default_conf_usdt.update( + { + "position_adjustment_enable": True, + "max_entry_position_adjustment": 0, + } + ) freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) buy_rate_mock = MagicMock(return_value=10) mocker.patch.multiple( EXMS, get_rate=buy_rate_mock, - fetch_ticker=MagicMock(return_value={ - 'bid': 10, - 'ask': 12, - 'last': 11 - }), + fetch_ticker=MagicMock(return_value={"bid": 10, "ask": 12, "last": 11}), get_min_pair_stake_amount=MagicMock(return_value=1), get_fee=fee, ) create_mock_trades(fee) caplog.set_level(logging.DEBUG) - freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(10, 'aaaa')) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(10, "aaaa")) freqtrade.process_open_trade_positions() assert log_has_re(r"Max adjustment entries for .* has been reached\.", caplog) assert freqtrade.strategy.adjust_trade_position.call_count == 1 caplog.clear() - freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(-0.0005, 'partial_exit_c')) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(-0.0005, "partial_exit_c")) freqtrade.process_open_trade_positions() assert log_has_re(r"LIMIT_SELL has been fulfilled.*", caplog) assert freqtrade.strategy.adjust_trade_position.call_count == 1 trade = Trade.get_trades(trade_filter=[Trade.id == 5]).first() - assert trade.orders[-1].ft_order_tag == 'partial_exit_c' + assert trade.orders[-1].ft_order_tag == "partial_exit_c" assert trade.is_open diff --git a/tests/freqtradebot/test_integration.py b/tests/freqtradebot/test_integration.py index 3384ae49f..75cc81fa1 100644 --- a/tests/freqtradebot/test_integration.py +++ b/tests/freqtradebot/test_integration.py @@ -19,13 +19,10 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, * 2nd trade is kept * 3rd trade is sold via sell-signal """ - default_conf['max_open_trades'] = 3 - default_conf['exchange']['name'] = 'binance' + default_conf["max_open_trades"] = 3 + default_conf["exchange"]["name"] = "binance" - stoploss = { - 'id': 123, - 'info': {} - } + stoploss = {"id": 123, "info": {}} stoploss_order_open = { "id": "123", "timestamp": 1542707426845, @@ -42,21 +39,17 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, "remaining": 0.0, "status": "open", "fee": None, - "trades": None + "trades": None, } stoploss_order_closed = stoploss_order_open.copy() - stoploss_order_closed['status'] = 'closed' - stoploss_order_closed['filled'] = stoploss_order_closed['amount'] + stoploss_order_closed["status"] = "closed" + stoploss_order_closed["filled"] = stoploss_order_closed["amount"] # Sell first trade based on stoploss, keep 2nd and 3rd trade open stop_orders = [stoploss_order_closed, stoploss_order_open.copy(), stoploss_order_open.copy()] - stoploss_order_mock = MagicMock( - side_effect=stop_orders) + stoploss_order_mock = MagicMock(side_effect=stop_orders) # Sell 3rd trade (not called for the first trade) - should_sell_mock = MagicMock(side_effect=[ - [], - [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]] - ) + should_sell_mock = MagicMock(side_effect=[[], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)]]) cancel_order_mock = MagicMock() mocker.patch.multiple( EXMS, @@ -70,7 +63,7 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, ) mocker.patch.multiple( - 'freqtrade.freqtradebot.FreqtradeBot', + "freqtrade.freqtradebot.FreqtradeBot", create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) @@ -80,9 +73,9 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True) freqtrade = get_patched_freqtradebot(mocker, default_conf) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # Switch ordertype to market to close trade immediately - freqtrade.strategy.order_types['exit'] = 'market' + freqtrade.strategy.order_types["exit"] = "market" freqtrade.strategy.confirm_trade_entry = MagicMock(return_value=True) freqtrade.strategy.confirm_trade_exit = MagicMock(return_value=True) patch_get_signal(freqtrade) @@ -98,8 +91,8 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, # Make sure stoploss-order is open and trade is bought for idx, trade in enumerate(trades): stop_order = stop_orders[idx] - stop_order['id'] = f"stop{idx}" - oobj = Order.parse_from_ccxt_object(stop_order, trade.pair, 'stoploss') + stop_order["id"] = f"stop{idx}" + oobj = Order.parse_from_ccxt_object(stop_order, trade.pair, "stoploss") oobj.ft_is_open = True trade.orders.append(oobj) @@ -132,10 +125,13 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee, assert not trade.is_open -@pytest.mark.parametrize("balance_ratio,result1", [ - (1, 200), - (0.99, 198), -]) +@pytest.mark.parametrize( + "balance_ratio,result1", + [ + (1, 200), + (0.99, 198), + ], +) def test_forcebuy_last_unlimited(default_conf, ticker, fee, mocker, balance_ratio, result1) -> None: """ Tests workflow unlimited stake-amount @@ -143,14 +139,14 @@ def test_forcebuy_last_unlimited(default_conf, ticker, fee, mocker, balance_rati Sell one trade, calculated stake amount should now be lower than before since one trade was sold at a loss. """ - default_conf['max_open_trades'] = 5 - default_conf['force_entry_enable'] = True - default_conf['stake_amount'] = 'unlimited' - default_conf['tradable_balance_ratio'] = balance_ratio - default_conf['dry_run_wallet'] = 1000 - default_conf['exchange']['name'] = 'binance' - default_conf['telegram']['enabled'] = True - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + default_conf["max_open_trades"] = 5 + default_conf["force_entry_enable"] = True + default_conf["stake_amount"] = "unlimited" + default_conf["tradable_balance_ratio"] = balance_ratio + default_conf["dry_run_wallet"] = 1000 + default_conf["exchange"]["name"] = "binance" + default_conf["telegram"]["enabled"] = True + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -160,24 +156,20 @@ def test_forcebuy_last_unlimited(default_conf, ticker, fee, mocker, balance_rati ) mocker.patch.multiple( - 'freqtrade.freqtradebot.FreqtradeBot', + "freqtrade.freqtradebot.FreqtradeBot", create_stoploss_order=MagicMock(return_value=True), _notify_exit=MagicMock(), ) - should_sell_mock = MagicMock(side_effect=[ - [], - [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)], - [], - [], - []] + should_sell_mock = MagicMock( + side_effect=[[], [ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL)], [], [], []] ) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) freqtrade = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtrade) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # Switch ordertype to market to close trade immediately - freqtrade.strategy.order_types['exit'] = 'market' + freqtrade.strategy.order_types["exit"] = "market" patch_get_signal(freqtrade) # Create 4 trades @@ -186,9 +178,9 @@ def test_forcebuy_last_unlimited(default_conf, ticker, fee, mocker, balance_rati trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 4 - assert freqtrade.wallets.get_trade_stake_amount('XRP/BTC', 5) == result1 + assert freqtrade.wallets.get_trade_stake_amount("XRP/BTC", 5) == result1 - rpc._rpc_force_entry('TKN/BTC', None) + rpc._rpc_force_entry("TKN/BTC", None) trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 5 @@ -206,18 +198,18 @@ def test_forcebuy_last_unlimited(default_conf, ticker, fee, mocker, balance_rati # One trade sold assert len(trades) == 4 # stake-amount should now be reduced, since one trade was sold at a loss. - assert freqtrade.wallets.get_trade_stake_amount('XRP/BTC', 5) < result1 + assert freqtrade.wallets.get_trade_stake_amount("XRP/BTC", 5) < result1 # Validate that balance of sold trade is not in dry-run balances anymore. bals2 = freqtrade.wallets.get_all_balances() assert bals != bals2 assert len(bals) == 6 assert len(bals2) == 5 - assert 'LTC' in bals - assert 'LTC' not in bals2 + assert "LTC" in bals + assert "LTC" not in bals2 def test_dca_buying(default_conf_usdt, ticker_usdt, fee, mocker) -> None: - default_conf_usdt['position_adjustment_enable'] = True + default_conf_usdt["position_adjustment_enable"] = True freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( @@ -242,8 +234,8 @@ def test_dca_buying(default_conf_usdt, ticker_usdt, fee, mocker) -> None: # Reduce bid amount ticker_usdt_modif = ticker_usdt.return_value - ticker_usdt_modif['bid'] = ticker_usdt_modif['bid'] * 0.995 - mocker.patch(f'{EXMS}.fetch_ticker', return_value=ticker_usdt_modif) + ticker_usdt_modif["bid"] = ticker_usdt_modif["bid"] * 0.995 + mocker.patch(f"{EXMS}.fetch_ticker", return_value=ticker_usdt_modif) # additional buy order freqtrade.process() @@ -263,7 +255,7 @@ def test_dca_buying(default_conf_usdt, ticker_usdt, fee, mocker) -> None: assert len(trade.orders) == 2 assert pytest.approx(trade.stake_amount) == 120 assert trade.orders[0].amount == 30 - assert pytest.approx(trade.orders[1].amount) == 60 / ticker_usdt_modif['bid'] + assert pytest.approx(trade.orders[1].amount) == 60 / ticker_usdt_modif["bid"] assert pytest.approx(trade.amount) == trade.orders[0].amount + trade.orders[1].amount assert trade.nr_of_successful_buys == 2 @@ -275,10 +267,10 @@ def test_dca_buying(default_conf_usdt, ticker_usdt, fee, mocker) -> None: trade = Trade.get_trades().first() assert trade.is_open is False assert trade.orders[0].amount == 30 - assert trade.orders[0].side == 'buy' - assert pytest.approx(trade.orders[1].amount) == 60 / ticker_usdt_modif['bid'] + assert trade.orders[0].side == "buy" + assert pytest.approx(trade.orders[1].amount) == 60 / ticker_usdt_modif["bid"] # Sold everything - assert trade.orders[-1].side == 'sell' + assert trade.orders[-1].side == "sell" assert trade.orders[2].amount == trade.amount assert trade.nr_of_successful_buys == 2 @@ -286,7 +278,7 @@ def test_dca_buying(default_conf_usdt, ticker_usdt, fee, mocker) -> None: def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None: - default_conf_usdt['position_adjustment_enable'] = True + default_conf_usdt["position_adjustment_enable"] = True freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( @@ -314,8 +306,8 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None: # Reduce bid amount ticker_usdt_modif = ticker_usdt.return_value - ticker_usdt_modif['ask'] = ticker_usdt_modif['ask'] * 1.004 - mocker.patch(f'{EXMS}.fetch_ticker', return_value=ticker_usdt_modif) + ticker_usdt_modif["ask"] = ticker_usdt_modif["ask"] * 1.004 + mocker.patch(f"{EXMS}.fetch_ticker", return_value=ticker_usdt_modif) # additional buy order freqtrade.process() @@ -334,7 +326,7 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None: trade = Trade.get_trades().first() assert len(trade.orders) == 2 assert pytest.approx(trade.stake_amount) == 120 - assert trade.orders[1].amount == round(60 / ticker_usdt_modif['ask'], 4) + assert trade.orders[1].amount == round(60 / ticker_usdt_modif["ask"], 4) assert trade.amount == trade.orders[0].amount + trade.orders[1].amount assert trade.nr_of_successful_entries == 2 @@ -345,23 +337,21 @@ def test_dca_short(default_conf_usdt, ticker_usdt, fee, mocker) -> None: trade = Trade.get_trades().first() assert trade.is_open is False # assert trade.orders[0].amount == 30 - assert trade.orders[0].side == 'sell' - assert trade.orders[1].amount == round(60 / ticker_usdt_modif['ask'], 4) + assert trade.orders[0].side == "sell" + assert trade.orders[1].amount == round(60 / ticker_usdt_modif["ask"], 4) # Sold everything - assert trade.orders[-1].side == 'buy' + assert trade.orders[-1].side == "buy" assert trade.orders[2].amount == trade.amount assert trade.nr_of_successful_entries == 2 assert trade.nr_of_successful_exits == 1 -@pytest.mark.parametrize('leverage', [ - 1, 2 -]) +@pytest.mark.parametrize("leverage", [1, 2]) def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) -> None: - default_conf_usdt['position_adjustment_enable'] = True - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' + default_conf_usdt["position_adjustment_enable"] = True + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( @@ -371,13 +361,13 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) amount_to_precision=lambda s, x, y: y, price_to_precision=lambda s, x, y: y, ) - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=False) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False) mocker.patch(f"{EXMS}.get_max_leverage", return_value=10) mocker.patch(f"{EXMS}.get_funding_fees", return_value=0) mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0, 0)) patch_get_signal(freqtrade) - freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt['ask'] * 0.96 + freqtrade.strategy.custom_entry_price = lambda **kwargs: ticker_usdt["ask"] * 0.96 freqtrade.strategy.leverage = MagicMock(return_value=leverage) freqtrade.strategy.minimal_roi = {0: 0.2} @@ -417,7 +407,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) assert trade.initial_stop_loss_pct == -0.1 # Fill order - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=True) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=True) freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 2 @@ -433,7 +423,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) # 2nd order - not filling freqtrade.strategy.adjust_trade_position = MagicMock(return_value=120) - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=False) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False) freqtrade.process() trade = Trade.get_trades().first() @@ -458,7 +448,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) # Fill DCA order freqtrade.strategy.adjust_trade_position = MagicMock(return_value=None) - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=True) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=True) freqtrade.strategy.adjust_entry_price = MagicMock(side_effect=ValueError) freqtrade.process() @@ -468,7 +458,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) assert pytest.approx(trade.open_rate) == 1.963153456 assert trade.orders[-1].price == 1.95 assert pytest.approx(trade.orders[-1].cost) == 120 * leverage - assert trade.orders[-1].status == 'closed' + assert trade.orders[-1].status == "closed" assert pytest.approx(trade.amount) == 91.689215 * leverage # Check the 2 filled orders equal the above amount @@ -476,14 +466,14 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) assert pytest.approx(trade.orders[-1].amount) == 61.538461232 * leverage # Full exit - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=False) - freqtrade.strategy.custom_exit = MagicMock(return_value='Exit now') + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False) + freqtrade.strategy.custom_exit = MagicMock(return_value="Exit now") freqtrade.strategy.adjust_entry_price = MagicMock(return_value=2.02) freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 5 assert trade.orders[-1].side == trade.exit_side - assert trade.orders[-1].status == 'open' + assert trade.orders[-1].status == "open" assert trade.orders[-1].price == 2.02 assert pytest.approx(trade.amount) == 91.689215 * leverage assert pytest.approx(trade.orders[-1].amount) == 91.689215 * leverage @@ -492,23 +482,23 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker) freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 5 - assert trade.orders[-1].status == 'open' + assert trade.orders[-1].status == "open" assert trade.orders[-1].price == 2.02 # Adjust entry price cannot be called - this is an exit order assert freqtrade.strategy.adjust_entry_price.call_count == 0 -@pytest.mark.parametrize('leverage', [1, 2]) +@pytest.mark.parametrize("leverage", [1, 2]) @pytest.mark.parametrize("is_short", [False, True]) def test_dca_order_adjust_entry_replace_fails( default_conf_usdt, ticker_usdt, fee, mocker, caplog, is_short, leverage ) -> None: spot = leverage == 1 if not spot: - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' - default_conf_usdt['position_adjustment_enable'] = True - default_conf_usdt['max_open_trades'] = 2 + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" + default_conf_usdt["position_adjustment_enable"] = True + default_conf_usdt["max_open_trades"] = 2 freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) mocker.patch.multiple( EXMS, @@ -518,7 +508,7 @@ def test_dca_order_adjust_entry_replace_fails( ) # no order fills. - mocker.patch(f'{EXMS}._dry_is_price_crossed', side_effect=[False, True]) + mocker.patch(f"{EXMS}._dry_is_price_crossed", side_effect=[False, True]) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) freqtrade.enter_positions() @@ -527,10 +517,10 @@ def test_dca_order_adjust_entry_replace_fails( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() assert len(trades) == 1 - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=False) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False) # Timeout to not interfere freqtrade.strategy.ft_check_timed_out = MagicMock(return_value=False) @@ -538,7 +528,7 @@ def test_dca_order_adjust_entry_replace_fails( # Create DCA order for 2nd trade (so we have 2 open orders on 2 trades) # this 2nd order won't fill. - freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(20, 'PeNF')) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(20, "PeNF")) freqtrade.process() @@ -548,7 +538,7 @@ def test_dca_order_adjust_entry_replace_fails( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() assert len(trades) == 2 # We now have 2 orders open @@ -559,7 +549,7 @@ def test_dca_order_adjust_entry_replace_fails( .where(Order.ft_is_open.is_(True)) .where(Order.ft_order_side != "stoploss") .where(Order.ft_trade_id == Trade.id) - ).all() + ).all() assert len(trades) == 2 assert len(Order.get_open_orders()) == 2 # Entry adjustment is called @@ -568,8 +558,9 @@ def test_dca_order_adjust_entry_replace_fails( # Attempt order replacement - fails. freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1234) - entry_mock = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.execute_entry', - return_value=False) + entry_mock = mocker.patch( + "freqtrade.freqtradebot.FreqtradeBot.execute_entry", return_value=False + ) msg = r"Could not replace order for.*" assert not log_has_re(msg, caplog) freqtrade.manage_open_orders() @@ -580,13 +571,13 @@ def test_dca_order_adjust_entry_replace_fails( assert len(Order.get_open_orders()) == 0 -@pytest.mark.parametrize('leverage', [1, 2]) +@pytest.mark.parametrize("leverage", [1, 2]) def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, leverage) -> None: - default_conf_usdt['position_adjustment_enable'] = True + default_conf_usdt["position_adjustment_enable"] = True spot = leverage == 1 if not spot: - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) assert freqtrade.trading_mode == TradingMode.FUTURES if not spot else TradingMode.SPOT mocker.patch.multiple( @@ -599,7 +590,7 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, levera get_funding_fees=MagicMock(return_value=0), ) mocker.patch(f"{EXMS}.get_max_leverage", return_value=10) - starting_amount = freqtrade.wallets.get_total('USDT') + starting_amount = freqtrade.wallets.get_total("USDT") assert starting_amount == 1000 patch_get_signal(freqtrade) @@ -613,11 +604,11 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, levera assert trade.leverage == leverage assert pytest.approx(trade.amount) == 30.0 * leverage assert trade.open_rate == 2.0 - assert pytest.approx(freqtrade.wallets.get_free('USDT')) == starting_amount - 60 + assert pytest.approx(freqtrade.wallets.get_free("USDT")) == starting_amount - 60 if spot: - assert pytest.approx(freqtrade.wallets.get_total('USDT')) == starting_amount - 60 + assert pytest.approx(freqtrade.wallets.get_total("USDT")) == starting_amount - 60 else: - assert freqtrade.wallets.get_total('USDT') == starting_amount + assert freqtrade.wallets.get_total("USDT") == starting_amount # Too small size freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-59) @@ -627,28 +618,29 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, levera assert pytest.approx(trade.stake_amount) == 60 assert pytest.approx(trade.amount) == 30.0 * leverage assert log_has_re( - r"Remaining amount of \d\.\d+.* would be smaller than the minimum of 10.", caplog) + r"Remaining amount of \d\.\d+.* would be smaller than the minimum of 10.", caplog + ) - freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(-20, 'PES')) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=(-20, "PES")) freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 2 - assert trade.orders[-1].ft_order_side == 'sell' - assert trade.orders[-1].ft_order_tag == 'PES' + assert trade.orders[-1].ft_order_side == "sell" + assert trade.orders[-1].ft_order_tag == "PES" assert pytest.approx(trade.stake_amount) == 40 assert pytest.approx(trade.amount) == 20 * leverage assert trade.open_rate == 2.0 assert trade.is_open assert trade.realized_profit > 0.098 * leverage expected_profit = starting_amount - 40 + trade.realized_profit - assert pytest.approx(freqtrade.wallets.get_free('USDT')) == expected_profit + assert pytest.approx(freqtrade.wallets.get_free("USDT")) == expected_profit if spot: - assert pytest.approx(freqtrade.wallets.get_total('USDT')) == expected_profit + assert pytest.approx(freqtrade.wallets.get_total("USDT")) == expected_profit else: # total won't change in futures mode, only free / used will. - assert freqtrade.wallets.get_total('USDT') == starting_amount + trade.realized_profit + assert freqtrade.wallets.get_total("USDT") == starting_amount + trade.realized_profit caplog.clear() # Sell more than what we got (we got ~20 coins left) @@ -666,14 +658,13 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, levera assert len(trade.orders) == 2 # Amount exactly comes out as exactly 0 - freqtrade.strategy.adjust_trade_position = MagicMock( - return_value=-trade.stake_amount) + freqtrade.strategy.adjust_trade_position = MagicMock(return_value=-trade.stake_amount) freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 3 - assert trade.orders[-1].ft_order_side == 'sell' + assert trade.orders[-1].ft_order_side == "sell" assert pytest.approx(trade.stake_amount) == 40 assert trade.is_open is False @@ -683,14 +674,14 @@ def test_dca_exiting(default_conf_usdt, ticker_usdt, fee, mocker, caplog, levera freqtrade.process() trade = Trade.get_trades().first() assert len(trade.orders) == 3 - assert trade.orders[-1].ft_order_side == 'sell' + assert trade.orders[-1].ft_order_side == "sell" assert pytest.approx(trade.stake_amount) == 40 assert trade.is_open is False - assert log_has_re('Amount to exit is 0.0 due to exchange limits - not exiting.', caplog) + assert log_has_re("Amount to exit is 0.0 due to exchange limits - not exiting.", caplog) expected_profit = starting_amount - 60 + trade.realized_profit - assert pytest.approx(freqtrade.wallets.get_free('USDT')) == expected_profit + assert pytest.approx(freqtrade.wallets.get_free("USDT")) == expected_profit if spot: - assert pytest.approx(freqtrade.wallets.get_total('USDT')) == expected_profit + assert pytest.approx(freqtrade.wallets.get_total("USDT")) == expected_profit else: # total won't change in futures mode, only free / used will. - assert freqtrade.wallets.get_total('USDT') == starting_amount + trade.realized_profit + assert freqtrade.wallets.get_total("USDT") == starting_amount + trade.realized_profit diff --git a/tests/freqtradebot/test_stoploss_on_exchange.py b/tests/freqtradebot/test_stoploss_on_exchange.py index 04a04ea9f..451548816 100644 --- a/tests/freqtradebot/test_stoploss_on_exchange.py +++ b/tests/freqtradebot/test_stoploss_on_exchange.py @@ -11,8 +11,16 @@ from freqtrade.freqtradebot import FreqtradeBot from freqtrade.persistence import Order, Trade from freqtrade.persistence.models import PairLock from freqtrade.util.datetime_helpers import dt_now -from tests.conftest import (EXMS, get_patched_freqtradebot, log_has, log_has_re, patch_edge, - patch_exchange, patch_get_signal, patch_whitelist) +from tests.conftest import ( + EXMS, + get_patched_freqtradebot, + log_has, + log_has_re, + patch_edge, + patch_exchange, + patch_get_signal, + patch_whitelist, +) from tests.conftest_trades import entry_side, exit_side from tests.freqtradebot.test_freqtradebot import patch_RPCManager @@ -23,24 +31,20 @@ def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_sho patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=MagicMock(return_value=limit_order[entry_side(is_short)]), get_fee=fee, ) order = limit_order[entry_side(is_short)] - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_trade', MagicMock(return_value=True)) - mocker.patch(f'{EXMS}.fetch_order', return_value=order) - mocker.patch(f'{EXMS}.get_trades_for_order', return_value=[]) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_trade", MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.fetch_order", return_value=order) + mocker.patch(f"{EXMS}.get_trades_for_order", return_value=[]) - stoploss = MagicMock(return_value={'id': 13434334}) - mocker.patch(f'{EXMS}.create_stoploss', stoploss) + stoploss = MagicMock(return_value={"id": 13434334}) + mocker.patch(f"{EXMS}.create_stoploss", stoploss) freqtrade = FreqtradeBot(default_conf_usdt) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -57,9 +61,10 @@ def test_add_stoploss_on_exchange(mocker, default_conf_usdt, limit_order, is_sho @pytest.mark.parametrize("is_short", [False, True]) -def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_short, - limit_order) -> None: - stop_order_dict = {'id': "13434334"} +def test_handle_stoploss_on_exchange( + mocker, default_conf_usdt, fee, caplog, is_short, limit_order +) -> None: + stop_order_dict = {"id": "13434334"} stoploss = MagicMock(return_value=stop_order_dict) enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] @@ -67,17 +72,15 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, - create_stoploss=stoploss + create_stoploss=stoploss, ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -100,11 +103,11 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ # should do nothing and return false trade.is_open = True - hanging_stoploss_order = MagicMock(return_value={'id': '13434334', 'status': 'open'}) - mocker.patch(f'{EXMS}.fetch_stoploss_order', hanging_stoploss_order) + hanging_stoploss_order = MagicMock(return_value={"id": "13434334", "status": "open"}) + mocker.patch(f"{EXMS}.fetch_stoploss_order", hanging_stoploss_order) assert freqtrade.handle_stoploss_on_exchange(trade) is False - hanging_stoploss_order.assert_called_once_with('13434334', trade.pair) + hanging_stoploss_order.assert_called_once_with("13434334", trade.pair) assert len(trade.open_sl_orders) == 1 assert trade.open_sl_orders[-1].order_id == "13434334" @@ -113,12 +116,12 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ caplog.clear() trade.is_open = True - canceled_stoploss_order = MagicMock(return_value={'id': '13434334', 'status': 'canceled'}) - mocker.patch(f'{EXMS}.fetch_stoploss_order', canceled_stoploss_order) + canceled_stoploss_order = MagicMock(return_value={"id": "13434334", "status": "canceled"}) + mocker.patch(f"{EXMS}.fetch_stoploss_order", canceled_stoploss_order) stoploss.reset_mock() amount_before = trade.amount - stop_order_dict.update({'id': "103_1"}) + stop_order_dict.update({"id": "103_1"}) assert freqtrade.handle_stoploss_on_exchange(trade) is False assert stoploss.call_count == 1 @@ -129,43 +132,45 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ # Fourth case: when stoploss is set and it is hit # should return true as a trade actually happened caplog.clear() - stop_order_dict.update({'id': "103_1"}) + stop_order_dict.update({"id": "103_1"}) trade = Trade.session.scalars(select(Trade)).first() trade.is_short = is_short trade.is_open = True - stoploss_order_hit = MagicMock(return_value={ - 'id': "103_1", - 'status': 'closed', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'filled': enter_order['amount'], - 'remaining': 0, - 'amount': enter_order['amount'], - }) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit) + stoploss_order_hit = MagicMock( + return_value={ + "id": "103_1", + "status": "closed", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "filled": enter_order["amount"], + "remaining": 0, + "amount": enter_order["amount"], + } + ) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_order_hit) freqtrade.strategy.order_filled = MagicMock(return_value=None) assert freqtrade.handle_stoploss_on_exchange(trade) is True - assert log_has_re(r'STOP_LOSS_LIMIT is hit for Trade\(id=1, .*\)\.', caplog) + assert log_has_re(r"STOP_LOSS_LIMIT is hit for Trade\(id=1, .*\)\.", caplog) assert len(trade.open_sl_orders) == 0 assert trade.is_open is False assert freqtrade.strategy.order_filled.call_count == 1 caplog.clear() - mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError()) + mocker.patch(f"{EXMS}.create_stoploss", side_effect=ExchangeError()) trade.is_open = True freqtrade.handle_stoploss_on_exchange(trade) - assert log_has('Unable to place a stoploss order on exchange.', caplog) + assert log_has("Unable to place a stoploss order on exchange.", caplog) assert len(trade.open_sl_orders) == 0 # Fifth case: fetch_order returns InvalidOrder # It should try to add stoploss order - stop_order_dict.update({'id': "105"}) + stop_order_dict.update({"id": "105"}) stoploss.reset_mock() - mocker.patch(f'{EXMS}.fetch_stoploss_order', side_effect=InvalidOrderException()) - mocker.patch(f'{EXMS}.create_stoploss', stoploss) + mocker.patch(f"{EXMS}.fetch_stoploss_order", side_effect=InvalidOrderException()) + mocker.patch(f"{EXMS}.create_stoploss", stoploss) freqtrade.handle_stoploss_on_exchange(trade) assert len(trade.open_sl_orders) == 1 assert stoploss.call_count == 1 @@ -175,17 +180,18 @@ def test_handle_stoploss_on_exchange(mocker, default_conf_usdt, fee, caplog, is_ trade.is_open = False trade.open_sl_orders[-1].ft_is_open = False stoploss.reset_mock() - mocker.patch(f'{EXMS}.fetch_order') - mocker.patch(f'{EXMS}.create_stoploss', stoploss) + mocker.patch(f"{EXMS}.fetch_order") + mocker.patch(f"{EXMS}.create_stoploss", stoploss) assert freqtrade.handle_stoploss_on_exchange(trade) is False assert trade.has_open_sl_orders is False assert stoploss.call_count == 0 @pytest.mark.parametrize("is_short", [False, True]) -def test_handle_stoploss_on_exchange_emergency(mocker, default_conf_usdt, fee, is_short, - limit_order) -> None: - stop_order_dict = {'id': "13434334"} +def test_handle_stoploss_on_exchange_emergency( + mocker, default_conf_usdt, fee, is_short, limit_order +) -> None: + stop_order_dict = {"id": "13434334"} stoploss = MagicMock(return_value=stop_order_dict) enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] @@ -193,17 +199,15 @@ def test_handle_stoploss_on_exchange_emergency(mocker, default_conf_usdt, fee, i patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, - create_stoploss=stoploss + create_stoploss=stoploss, ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -216,39 +220,42 @@ def test_handle_stoploss_on_exchange_emergency(mocker, default_conf_usdt, fee, i # emergency exit triggered # Trailing stop should not act anymore - stoploss_order_cancelled = MagicMock(side_effect=[{ - 'id': "107", - 'status': 'canceled', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'amount': enter_order['amount'], - 'filled': 0, - 'remaining': enter_order['amount'], - 'info': {'stopPrice': 22}, - }]) + stoploss_order_cancelled = MagicMock( + side_effect=[ + { + "id": "107", + "status": "canceled", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "amount": enter_order["amount"], + "filled": 0, + "remaining": enter_order["amount"], + "info": {"stopPrice": 22}, + } + ] + ) trade.stoploss_last_update = dt_now() - timedelta(hours=1) trade.stop_loss = 24 trade.exit_reason = None trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, - order_id='107', - status='open', + order_id="107", + status="open", ) ) - freqtrade.config['trailing_stop'] = True + freqtrade.config["trailing_stop"] = True stoploss = MagicMock(side_effect=InvalidOrderException()) assert trade.has_open_sl_orders is True Trade.commit() - mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result', - side_effect=InvalidOrderException()) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_cancelled) - mocker.patch(f'{EXMS}.create_stoploss', stoploss) + mocker.patch(f"{EXMS}.cancel_stoploss_order_with_result", side_effect=InvalidOrderException()) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_order_cancelled) + mocker.patch(f"{EXMS}.create_stoploss", stoploss) assert freqtrade.handle_stoploss_on_exchange(trade) is False assert trade.has_open_sl_orders is False assert trade.is_open is False @@ -257,8 +264,9 @@ def test_handle_stoploss_on_exchange_emergency(mocker, default_conf_usdt, fee, i @pytest.mark.parametrize("is_short", [False, True]) def test_handle_stoploss_on_exchange_partial( - mocker, default_conf_usdt, fee, is_short, limit_order) -> None: - stop_order_dict = {'id': "101", "status": "open"} + mocker, default_conf_usdt, fee, is_short, limit_order +) -> None: + stop_order_dict = {"id": "101", "status": "open"} stoploss = MagicMock(return_value=stop_order_dict) enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] @@ -266,17 +274,15 @@ def test_handle_stoploss_on_exchange_partial( patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, - create_stoploss=stoploss + create_stoploss=stoploss, ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -291,20 +297,22 @@ def test_handle_stoploss_on_exchange_partial( assert trade.has_open_sl_orders is True assert trade.open_sl_orders[-1].order_id == "101" assert trade.amount == 30 - stop_order_dict.update({'id': "102"}) + stop_order_dict.update({"id": "102"}) # Stoploss on exchange is cancelled on exchange, but filled partially. # Must update trade amount to guarantee successful exit. - stoploss_order_hit = MagicMock(return_value={ - 'id': "101", - 'status': 'canceled', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'filled': trade.amount / 2, - 'remaining': trade.amount / 2, - 'amount': enter_order['amount'], - }) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit) + stoploss_order_hit = MagicMock( + return_value={ + "id": "101", + "status": "canceled", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "filled": trade.amount / 2, + "remaining": trade.amount / 2, + "amount": enter_order["amount"], + } + ) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_order_hit) assert freqtrade.handle_stoploss_on_exchange(trade) is False # Stoploss filled partially ... assert trade.amount == 15 @@ -314,10 +322,11 @@ def test_handle_stoploss_on_exchange_partial( @pytest.mark.parametrize("is_short", [False, True]) def test_handle_stoploss_on_exchange_partial_cancel_here( - mocker, default_conf_usdt, fee, is_short, limit_order, caplog, time_machine) -> None: - stop_order_dict = {'id': "101", "status": "open"} + mocker, default_conf_usdt, fee, is_short, limit_order, caplog, time_machine +) -> None: + stop_order_dict = {"id": "101", "status": "open"} time_machine.move_to(dt_now()) - default_conf_usdt['trailing_stop'] = True + default_conf_usdt["trailing_stop"] = True stoploss = MagicMock(return_value=stop_order_dict) enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] @@ -325,17 +334,15 @@ def test_handle_stoploss_on_exchange_partial_cancel_here( patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, - create_stoploss=stoploss + create_stoploss=stoploss, ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -350,36 +357,40 @@ def test_handle_stoploss_on_exchange_partial_cancel_here( assert trade.has_open_sl_orders is True assert trade.open_sl_orders[-1].order_id == "101" assert trade.amount == 30 - stop_order_dict.update({'id': "102"}) + stop_order_dict.update({"id": "102"}) # Stoploss on exchange is open. # Freqtrade cancels the stop - but cancel returns a partial filled order. - stoploss_order_hit = MagicMock(return_value={ - 'id': "101", - 'status': 'open', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'filled': 0, - 'remaining': trade.amount, - 'amount': enter_order['amount'], - }) - stoploss_order_cancel = MagicMock(return_value={ - 'id': "101", - 'status': 'canceled', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'filled': trade.amount / 2, - 'remaining': trade.amount / 2, - 'amount': enter_order['amount'], - }) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hit) - mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result', stoploss_order_cancel) + stoploss_order_hit = MagicMock( + return_value={ + "id": "101", + "status": "open", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "filled": 0, + "remaining": trade.amount, + "amount": enter_order["amount"], + } + ) + stoploss_order_cancel = MagicMock( + return_value={ + "id": "101", + "status": "canceled", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "filled": trade.amount / 2, + "remaining": trade.amount / 2, + "amount": enter_order["amount"], + } + ) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_order_hit) + mocker.patch(f"{EXMS}.cancel_stoploss_order_with_result", stoploss_order_cancel) time_machine.shift(timedelta(minutes=15)) assert freqtrade.handle_stoploss_on_exchange(trade) is False # Canceled Stoploss filled partially ... - assert log_has_re('Cancelling current stoploss on exchange.*', caplog) + assert log_has_re("Cancelling current stoploss on exchange.*", caplog) assert trade.has_open_sl_orders is True assert trade.open_sl_orders[-1].order_id == "102" @@ -387,8 +398,9 @@ def test_handle_stoploss_on_exchange_partial_cancel_here( @pytest.mark.parametrize("is_short", [False, True]) -def test_handle_sle_cancel_cant_recreate(mocker, default_conf_usdt, fee, caplog, is_short, - limit_order) -> None: +def test_handle_sle_cancel_cant_recreate( + mocker, default_conf_usdt, fee, caplog, is_short, limit_order +) -> None: # Sixth case: stoploss order was cancelled but couldn't create new one enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] @@ -396,20 +408,18 @@ def test_handle_sle_cancel_cant_recreate(mocker, default_conf_usdt, fee, caplog, patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, ) mocker.patch.multiple( EXMS, - fetch_stoploss_order=MagicMock(return_value={'status': 'canceled', 'id': '100'}), + fetch_stoploss_order=MagicMock(return_value={"status": "canceled", "id": "100"}), create_stoploss=MagicMock(side_effect=ExchangeError()), ) freqtrade = FreqtradeBot(default_conf_usdt) @@ -421,19 +431,19 @@ def test_handle_sle_cancel_cant_recreate(mocker, default_conf_usdt, fee, caplog, trade.is_open = True trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, - order_id='100', - status='open', + order_id="100", + status="open", ) ) assert trade assert freqtrade.handle_stoploss_on_exchange(trade) is False - assert log_has_re(r'All Stoploss orders are cancelled, but unable to recreate one\.', caplog) + assert log_has_re(r"All Stoploss orders are cancelled, but unable to recreate one\.", caplog) assert trade.has_open_sl_orders is False assert trade.is_open is True @@ -446,28 +456,26 @@ def test_create_stoploss_order_invalid_order( order = limit_order[exit_side(is_short)] rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) - create_order_mock = MagicMock(side_effect=[ - open_order, - order, - ]) + create_order_mock = MagicMock( + side_effect=[ + open_order, + order, + ] + ) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), create_order=create_order_mock, get_fee=fee, ) mocker.patch.multiple( EXMS, - fetch_order=MagicMock(return_value={'status': 'canceled'}), + fetch_order=MagicMock(return_value={"status": "canceled"}), create_stoploss=MagicMock(side_effect=InvalidOrderException()), ) freqtrade = FreqtradeBot(default_conf_usdt) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() @@ -482,46 +490,44 @@ def test_create_stoploss_order_invalid_order( # Should call a market sell assert create_order_mock.call_count == 2 - assert create_order_mock.call_args[1]['ordertype'] == 'market' - assert create_order_mock.call_args[1]['pair'] == trade.pair - assert create_order_mock.call_args[1]['amount'] == trade.amount + assert create_order_mock.call_args[1]["ordertype"] == "market" + assert create_order_mock.call_args[1]["pair"] == trade.pair + assert create_order_mock.call_args[1]["amount"] == trade.amount # Rpc is sending first buy, then sell assert rpc_mock.call_count == 2 - assert rpc_mock.call_args_list[0][0][0]['exit_reason'] == ExitType.EMERGENCY_EXIT.value - assert rpc_mock.call_args_list[0][0][0]['order_type'] == 'market' - assert rpc_mock.call_args_list[0][0][0]['type'] == 'exit' - assert rpc_mock.call_args_list[1][0][0]['type'] == 'exit_fill' + assert rpc_mock.call_args_list[0][0][0]["exit_reason"] == ExitType.EMERGENCY_EXIT.value + assert rpc_mock.call_args_list[0][0][0]["order_type"] == "market" + assert rpc_mock.call_args_list[0][0][0]["type"] == "exit" + assert rpc_mock.call_args_list[1][0][0]["type"] == "exit_fill" @pytest.mark.parametrize("is_short", [False, True]) def test_create_stoploss_order_insufficient_funds( mocker, default_conf_usdt, caplog, fee, limit_order, is_short ): - exit_order = limit_order[exit_side(is_short)]['id'] + exit_order = limit_order[exit_side(is_short)]["id"] freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mock_insuf = mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds') + mock_insuf = mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_insufficient_funds") mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - limit_order[entry_side(is_short)], - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + limit_order[entry_side(is_short)], + exit_order, + ] + ), get_fee=fee, - fetch_order=MagicMock(return_value={'status': 'canceled'}), + fetch_order=MagicMock(return_value={"status": "canceled"}), ) mocker.patch.multiple( EXMS, create_stoploss=MagicMock(side_effect=InsufficientFundsError()), ) patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() @@ -539,33 +545,48 @@ def test_create_stoploss_order_insufficient_funds( assert mock_insuf.call_count == 1 -@pytest.mark.parametrize("is_short,bid,ask,stop_price,hang_price", [ - (False, [4.38, 4.16], [4.4, 4.17], ['2.0805', 4.4 * 0.95], 3), - (True, [1.09, 1.21], [1.1, 1.22], ['2.321', 1.09 * 1.05], 1.5), -]) +@pytest.mark.parametrize( + "is_short,bid,ask,stop_price,hang_price", + [ + (False, [4.38, 4.16], [4.4, 4.17], ["2.0805", 4.4 * 0.95], 3), + (True, [1.09, 1.21], [1.1, 1.22], ["2.321", 1.09 * 1.05], 1.5), + ], +) @pytest.mark.usefixtures("init_persistence") def test_handle_stoploss_on_exchange_trailing( - mocker, default_conf_usdt, fee, is_short, bid, ask, limit_order, stop_price, hang_price, + mocker, + default_conf_usdt, + fee, + is_short, + bid, + ask, + limit_order, + stop_price, + hang_price, time_machine, ) -> None: # When trailing stoploss is set enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] - stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'}) + stoploss = MagicMock(return_value={"id": "13434334", "status": "open"}) start_dt = dt_now() time_machine.move_to(start_dt, tick=False) patch_RPCManager(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.19, - 'ask': 2.2, - 'last': 2.19, - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock( + return_value={ + "bid": 2.19, + "ask": 2.2, + "last": 2.19, + } + ), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, ) mocker.patch.multiple( @@ -575,21 +596,21 @@ def test_handle_stoploss_on_exchange_trailing( ) # enabling TSL - default_conf_usdt['trailing_stop'] = True + default_conf_usdt["trailing_stop"] = True # disabling ROI - default_conf_usdt['minimal_roi']['0'] = 999999999 + default_conf_usdt["minimal_roi"]["0"] = 999999999 freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) # enabling stoploss on exchange - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # setting stoploss freqtrade.strategy.stoploss = 0.05 if is_short else -0.05 # setting stoploss_on_exchange_interval to 60 seconds - freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60 + freqtrade.strategy.order_types["stoploss_on_exchange_interval"] = 60 patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -601,33 +622,31 @@ def test_handle_stoploss_on_exchange_trailing( trade.stoploss_last_update = dt_now() - timedelta(minutes=20) trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, - order_id='100', + order_id="100", order_date=dt_now() - timedelta(minutes=20), ) ) stoploss_order_hanging = { - 'id': '100', - 'status': 'open', - 'type': 'stop_loss_limit', - 'price': hang_price, - 'average': 2, - 'fee': {}, - 'amount': 0, - 'info': { - 'stopPrice': stop_price[0] - } + "id": "100", + "status": "open", + "type": "stop_loss_limit", + "price": hang_price, + "average": 2, + "fee": {}, + "amount": 0, + "info": {"stopPrice": stop_price[0]}, } stoploss_order_cancel = deepcopy(stoploss_order_hanging) - stoploss_order_cancel['status'] = 'canceled' + stoploss_order_cancel["status"] = "canceled" - mocker.patch(f'{EXMS}.fetch_stoploss_order', return_value=stoploss_order_hanging) - mocker.patch(f'{EXMS}.cancel_stoploss_order', return_value=stoploss_order_cancel) + mocker.patch(f"{EXMS}.fetch_stoploss_order", return_value=stoploss_order_hanging) + mocker.patch(f"{EXMS}.cancel_stoploss_order", return_value=stoploss_order_cancel) # stoploss initially at 5% assert freqtrade.handle_trade(trade) is False @@ -635,24 +654,27 @@ def test_handle_stoploss_on_exchange_trailing( assert len(trade.open_sl_orders) == 1 - assert trade.open_sl_orders[-1].order_id == '13434334' + assert trade.open_sl_orders[-1].order_id == "13434334" # price jumped 2x mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': bid[0], - 'ask': ask[0], - 'last': bid[0], - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": bid[0], + "ask": ask[0], + "last": bid[0], + } + ), ) - cancel_order_mock = MagicMock(return_value={ - 'id': '13434334', 'status': 'canceled', 'fee': {}, 'amount': trade.amount}) - stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'}) - mocker.patch(f'{EXMS}.fetch_stoploss_order') - mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock) - mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock) + cancel_order_mock = MagicMock( + return_value={"id": "13434334", "status": "canceled", "fee": {}, "amount": trade.amount} + ) + stoploss_order_mock = MagicMock(return_value={"id": "so1", "status": "open"}) + mocker.patch(f"{EXMS}.fetch_stoploss_order") + mocker.patch(f"{EXMS}.cancel_stoploss_order", cancel_order_mock) + mocker.patch(f"{EXMS}.create_stoploss", stoploss_order_mock) # stoploss should not be updated as the interval is 60 seconds assert freqtrade.handle_trade(trade) is False @@ -669,29 +691,33 @@ def test_handle_stoploss_on_exchange_trailing( assert freqtrade.handle_stoploss_on_exchange(trade) is False - cancel_order_mock.assert_called_once_with('13434334', 'ETH/USDT') + cancel_order_mock.assert_called_once_with("13434334", "ETH/USDT") stoploss_order_mock.assert_called_once_with( amount=30, - pair='ETH/USDT', + pair="ETH/USDT", order_types=freqtrade.strategy.order_types, stop_price=stop_price[1], side=exit_side(is_short), - leverage=1.0 + leverage=1.0, ) # price fell below stoploss, so dry-run sells trade. mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': bid[1], - 'ask': ask[1], - 'last': bid[1], - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": bid[1], + "ask": ask[1], + "last": bid[1], + } + ), + ) + mocker.patch( + f"{EXMS}.cancel_stoploss_order_with_result", + return_value={"id": "so1", "status": "canceled"}, ) - mocker.patch(f'{EXMS}.cancel_stoploss_order_with_result', - return_value={'id': 'so1', 'status': 'canceled'}) assert len(trade.open_sl_orders) == 1 - assert trade.open_sl_orders[-1].order_id == 'so1' + assert trade.open_sl_orders[-1].order_id == "so1" assert freqtrade.handle_trade(trade) is True assert trade.is_open is False @@ -706,37 +732,35 @@ def test_handle_stoploss_on_exchange_trailing_error( enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] # When trailing stoploss is set - stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'}) + stoploss = MagicMock(return_value={"id": "13434334", "status": "open"}) patch_exchange(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - {'id': enter_order['id']}, - {'id': exit_order['id']}, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + {"id": enter_order["id"]}, + {"id": exit_order["id"]}, + ] + ), get_fee=fee, create_stoploss=stoploss, stoploss_adjust=MagicMock(return_value=True), ) # enabling TSL - default_conf_usdt['trailing_stop'] = True + default_conf_usdt["trailing_stop"] = True freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) # enabling stoploss on exchange - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # setting stoploss freqtrade.strategy.stoploss = 0.05 if is_short else -0.05 # setting stoploss_on_exchange_interval to 60 seconds - freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60 + freqtrade.strategy.order_types["stoploss_on_exchange_interval"] = 60 patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() @@ -745,30 +769,26 @@ def test_handle_stoploss_on_exchange_trailing_error( trade.stop_loss = 0.2 stoploss_order_hanging = { - 'id': "abcd", - 'status': 'open', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'info': { - 'stopPrice': '0.1' - } + "id": "abcd", + "status": "open", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "info": {"stopPrice": "0.1"}, } trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=3, - order_id='abcd', + order_id="abcd", order_date=dt_now(), ) ) - mocker.patch(f'{EXMS}.cancel_stoploss_order', - side_effect=InvalidOrderException()) - mocker.patch(f'{EXMS}.fetch_stoploss_order', - return_value=stoploss_order_hanging) + mocker.patch(f"{EXMS}.cancel_stoploss_order", side_effect=InvalidOrderException()) + mocker.patch(f"{EXMS}.fetch_stoploss_order", return_value=stoploss_order_hanging) time_machine.shift(timedelta(minutes=50)) freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging) assert log_has_re(r"Could not cancel stoploss order abcd for pair ETH/USDT.*", caplog) @@ -780,8 +800,8 @@ def test_handle_stoploss_on_exchange_trailing_error( # Fail creating stoploss order caplog.clear() - cancel_mock = mocker.patch(f'{EXMS}.cancel_stoploss_order') - mocker.patch(f'{EXMS}.create_stoploss', side_effect=ExchangeError()) + cancel_mock = mocker.patch(f"{EXMS}.cancel_stoploss_order") + mocker.patch(f"{EXMS}.create_stoploss", side_effect=ExchangeError()) time_machine.shift(timedelta(minutes=50)) freqtrade.handle_trailing_stoploss_on_exchange(trade, stoploss_order_hanging) assert cancel_mock.call_count == 2 @@ -789,14 +809,15 @@ def test_handle_stoploss_on_exchange_trailing_error( def test_stoploss_on_exchange_price_rounding( - mocker, default_conf_usdt, fee, open_trade_usdt) -> None: + mocker, default_conf_usdt, fee, open_trade_usdt +) -> None: patch_RPCManager(mocker) mocker.patch.multiple( EXMS, get_fee=fee, ) price_mock = MagicMock(side_effect=lambda p, s, **kwargs: int(s)) - stoploss_mock = MagicMock(return_value={'id': '13434334'}) + stoploss_mock = MagicMock(return_value={"id": "13434334"}) adjust_mock = MagicMock(return_value=False) mocker.patch.multiple( EXMS, @@ -821,19 +842,17 @@ def test_handle_stoploss_on_exchange_custom_stop( enter_order = limit_order[entry_side(is_short)] exit_order = limit_order[exit_side(is_short)] # When trailing stoploss is set - stoploss = MagicMock(return_value={'id': 13434334, 'status': 'open'}) + stoploss = MagicMock(return_value={"id": 13434334, "status": "open"}) patch_RPCManager(mocker) mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 1.9, - 'ask': 2.2, - 'last': 1.9 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 1.9, "ask": 2.2, "last": 1.9}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, is_cancel_order_result_suitable=MagicMock(return_value=True), ) @@ -844,21 +863,21 @@ def test_handle_stoploss_on_exchange_custom_stop( ) # enabling TSL - default_conf_usdt['use_custom_stoploss'] = True + default_conf_usdt["use_custom_stoploss"] = True # disabling ROI - default_conf_usdt['minimal_roi']['0'] = 999999999 + default_conf_usdt["minimal_roi"]["0"] = 999999999 freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) # enabling stoploss on exchange - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # setting stoploss freqtrade.strategy.custom_stoploss = lambda *args, **kwargs: -0.04 # setting stoploss_on_exchange_interval to 60 seconds - freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 60 + freqtrade.strategy.order_types["stoploss_on_exchange_interval"] = 60 patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) @@ -868,54 +887,54 @@ def test_handle_stoploss_on_exchange_custom_stop( trade.is_open = True trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, order_date=dt_now() - timedelta(minutes=601), - order_id='100', + order_id="100", ) ) Trade.commit() slo = { - 'id': '100', - 'status': 'open', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'info': { - 'stopPrice': '2.0805' - } + "id": "100", + "status": "open", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "info": {"stopPrice": "2.0805"}, } slo_canceled = deepcopy(slo) - slo_canceled.update({'status': 'canceled'}) + slo_canceled.update({"status": "canceled"}) def fetch_stoploss_order_mock(order_id, *args, **kwargs): x = deepcopy(slo) - x['id'] = order_id + x["id"] = order_id return x - mocker.patch(f'{EXMS}.fetch_stoploss_order', MagicMock(fetch_stoploss_order_mock)) - mocker.patch(f'{EXMS}.cancel_stoploss_order', return_value=slo_canceled) + mocker.patch(f"{EXMS}.fetch_stoploss_order", MagicMock(fetch_stoploss_order_mock)) + mocker.patch(f"{EXMS}.cancel_stoploss_order", return_value=slo_canceled) assert freqtrade.handle_trade(trade) is False assert freqtrade.handle_stoploss_on_exchange(trade) is False # price jumped 2x mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': 4.38 if not is_short else 1.9 / 2, - 'ask': 4.4 if not is_short else 2.2 / 2, - 'last': 4.38 if not is_short else 1.9 / 2, - }) + f"{EXMS}.fetch_ticker", + MagicMock( + return_value={ + "bid": 4.38 if not is_short else 1.9 / 2, + "ask": 4.4 if not is_short else 2.2 / 2, + "last": 4.38 if not is_short else 1.9 / 2, + } + ), ) cancel_order_mock = MagicMock() - stoploss_order_mock = MagicMock(return_value={'id': 'so1', 'status': 'open'}) - mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock) - mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock) + stoploss_order_mock = MagicMock(return_value={"id": "so1", "status": "open"}) + mocker.patch(f"{EXMS}.cancel_stoploss_order", cancel_order_mock) + mocker.patch(f"{EXMS}.create_stoploss", stoploss_order_mock) # stoploss should not be updated as the interval is 60 seconds assert freqtrade.handle_trade(trade) is False @@ -928,81 +947,73 @@ def test_handle_stoploss_on_exchange_custom_stop( assert trade.stop_loss_pct == -0.04 if not is_short else 0.04 # setting stoploss_on_exchange_interval to 0 seconds - freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0 + freqtrade.strategy.order_types["stoploss_on_exchange_interval"] = 0 cancel_order_mock.assert_not_called() stoploss_order_mock.assert_not_called() assert freqtrade.handle_stoploss_on_exchange(trade) is False - cancel_order_mock.assert_called_once_with('13434334', 'ETH/USDT') + cancel_order_mock.assert_called_once_with("13434334", "ETH/USDT") # Long uses modified ask - offset, short modified bid + offset stoploss_order_mock.assert_called_once_with( amount=pytest.approx(trade.amount), - pair='ETH/USDT', + pair="ETH/USDT", order_types=freqtrade.strategy.order_types, stop_price=4.4 * 0.96 if not is_short else 0.95 * 1.04, side=exit_side(is_short), - leverage=1.0 + leverage=1.0, ) # price fell below stoploss, so dry-run sells trade. mocker.patch( - f'{EXMS}.fetch_ticker', - MagicMock(return_value={ - 'bid': 4.17, - 'ask': 4.19, - 'last': 4.17 - }) + f"{EXMS}.fetch_ticker", MagicMock(return_value={"bid": 4.17, "ask": 4.19, "last": 4.17}) ) assert freqtrade.handle_trade(trade) is True def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_order) -> None: - - enter_order = limit_order['buy'] - exit_order = limit_order['sell'] - enter_order['average'] = 2.19 + enter_order = limit_order["buy"] + exit_order = limit_order["sell"] + enter_order["average"] = 2.19 # When trailing stoploss is set - stoploss = MagicMock(return_value={'id': '13434334', 'status': 'open'}) + stoploss = MagicMock(return_value={"id": "13434334", "status": "open"}) patch_RPCManager(mocker) patch_exchange(mocker) patch_edge(mocker) - edge_conf['max_open_trades'] = float('inf') - edge_conf['dry_run_wallet'] = 999.9 - edge_conf['exchange']['name'] = 'binance' + edge_conf["max_open_trades"] = float("inf") + edge_conf["dry_run_wallet"] = 999.9 + edge_conf["exchange"]["name"] = "binance" mocker.patch.multiple( EXMS, - fetch_ticker=MagicMock(return_value={ - 'bid': 2.19, - 'ask': 2.2, - 'last': 2.19 - }), - create_order=MagicMock(side_effect=[ - enter_order, - exit_order, - ]), + fetch_ticker=MagicMock(return_value={"bid": 2.19, "ask": 2.2, "last": 2.19}), + create_order=MagicMock( + side_effect=[ + enter_order, + exit_order, + ] + ), get_fee=fee, create_stoploss=stoploss, ) # enabling TSL - edge_conf['trailing_stop'] = True - edge_conf['trailing_stop_positive'] = 0.01 - edge_conf['trailing_stop_positive_offset'] = 0.011 + edge_conf["trailing_stop"] = True + edge_conf["trailing_stop_positive"] = 0.01 + edge_conf["trailing_stop_positive_offset"] = 0.011 # disabling ROI - edge_conf['minimal_roi']['0'] = 999999999 + edge_conf["minimal_roi"]["0"] = 999999999 freqtrade = FreqtradeBot(edge_conf) # enabling stoploss on exchange - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # setting stoploss freqtrade.strategy.stoploss = -0.02 # setting stoploss_on_exchange_interval to 0 seconds - freqtrade.strategy.order_types['stoploss_on_exchange_interval'] = 0 + freqtrade.strategy.order_types["stoploss_on_exchange_interval"] = 0 patch_get_signal(freqtrade) @@ -1015,25 +1026,27 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde trade.stoploss_last_update = dt_now() trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, - order_id='100', + order_id="100", ) ) - stoploss_order_hanging = MagicMock(return_value={ - 'id': '100', - 'status': 'open', - 'type': 'stop_loss_limit', - 'price': 3, - 'average': 2, - 'stopPrice': '2.178' - }) + stoploss_order_hanging = MagicMock( + return_value={ + "id": "100", + "status": "open", + "type": "stop_loss_limit", + "price": 3, + "average": 2, + "stopPrice": "2.178", + } + ) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_order_hanging) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_order_hanging) # stoploss initially at 20% as edge dictated it. assert freqtrade.handle_trade(trade) is False @@ -1042,15 +1055,14 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde cancel_order_mock = MagicMock() stoploss_order_mock = MagicMock() - mocker.patch(f'{EXMS}.cancel_stoploss_order', cancel_order_mock) - mocker.patch(f'{EXMS}.create_stoploss', stoploss_order_mock) + mocker.patch(f"{EXMS}.cancel_stoploss_order", cancel_order_mock) + mocker.patch(f"{EXMS}.create_stoploss", stoploss_order_mock) # price goes down 5% - mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={ - 'bid': 2.19 * 0.95, - 'ask': 2.2 * 0.95, - 'last': 2.19 * 0.95 - })) + mocker.patch( + f"{EXMS}.fetch_ticker", + MagicMock(return_value={"bid": 2.19 * 0.95, "ask": 2.2 * 0.95, "last": 2.19 * 0.95}), + ) assert freqtrade.handle_trade(trade) is False assert freqtrade.handle_stoploss_on_exchange(trade) is False @@ -1061,32 +1073,36 @@ def test_tsl_on_exchange_compatible_with_edge(mocker, edge_conf, fee, limit_orde cancel_order_mock.assert_not_called() # price jumped 2x - mocker.patch(f'{EXMS}.fetch_ticker', MagicMock(return_value={ - 'bid': 4.38, - 'ask': 4.4, - 'last': 4.38 - })) + mocker.patch( + f"{EXMS}.fetch_ticker", MagicMock(return_value={"bid": 4.38, "ask": 4.4, "last": 4.38}) + ) assert freqtrade.handle_trade(trade) is False assert freqtrade.handle_stoploss_on_exchange(trade) is False # stoploss should be set to 1% as trailing is on assert trade.stop_loss == 4.4 * 0.99 - cancel_order_mock.assert_called_once_with('100', 'NEO/BTC') + cancel_order_mock.assert_called_once_with("100", "NEO/BTC") stoploss_order_mock.assert_called_once_with( amount=30, - pair='NEO/BTC', + pair="NEO/BTC", order_types=freqtrade.strategy.order_types, stop_price=4.4 * 0.99, - side='sell', - leverage=1.0 + side="sell", + leverage=1.0, ) @pytest.mark.parametrize("is_short", [False, True]) def test_execute_trade_exit_down_stoploss_on_exchange_dry_run( - default_conf_usdt, ticker_usdt, fee, is_short, ticker_usdt_sell_down, - ticker_usdt_sell_up, mocker) -> None: + default_conf_usdt, + ticker_usdt, + fee, + is_short, + ticker_usdt_sell_down, + ticker_usdt_sell_up, + mocker, +) -> None: rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -1108,65 +1124,67 @@ def test_execute_trade_exit_down_stoploss_on_exchange_dry_run( # Decrease the price and sell it mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down + EXMS, fetch_ticker=ticker_usdt_sell_up if is_short else ticker_usdt_sell_down ) - default_conf_usdt['dry_run'] = True - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + default_conf_usdt["dry_run"] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True # Setting trade stoploss to 0.01 trade.stop_loss = 2.0 * 1.01 if is_short else 2.0 * 0.99 freqtrade.execute_trade_exit( - trade=trade, limit=trade.stop_loss, - exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)) + trade=trade, limit=trade.stop_loss, exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS) + ) assert rpc_mock.call_count == 2 last_msg = rpc_mock.call_args_list[-1][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/USDT', - 'direction': 'Short' if trade.is_short else 'Long', - 'leverage': 1.0, - 'gain': 'loss', - 'limit': 2.02 if is_short else 1.98, - 'order_rate': 2.02 if is_short else 1.98, - 'amount': pytest.approx(29.70297029 if is_short else 30.0), - 'order_type': 'limit', - 'buy_tag': None, - 'enter_tag': None, - 'open_rate': 2.02 if is_short else 2.0, - 'current_rate': 2.2 if is_short else 2.0, - 'profit_amount': -0.3 if is_short else -0.8985, - 'profit_ratio': -0.00501253 if is_short else -0.01493766, - 'stake_currency': 'USDT', - 'quote_currency': 'USDT', - 'fiat_currency': 'USD', - 'base_currency': 'ETH', - 'exit_reason': ExitType.STOP_LOSS.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'stake_amount': pytest.approx(60), - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/USDT", + "direction": "Short" if trade.is_short else "Long", + "leverage": 1.0, + "gain": "loss", + "limit": 2.02 if is_short else 1.98, + "order_rate": 2.02 if is_short else 1.98, + "amount": pytest.approx(29.70297029 if is_short else 30.0), + "order_type": "limit", + "buy_tag": None, + "enter_tag": None, + "open_rate": 2.02 if is_short else 2.0, + "current_rate": 2.2 if is_short else 2.0, + "profit_amount": -0.3 if is_short else -0.8985, + "profit_ratio": -0.00501253 if is_short else -0.01493766, + "stake_currency": "USDT", + "quote_currency": "USDT", + "fiat_currency": "USD", + "base_currency": "ETH", + "exit_reason": ExitType.STOP_LOSS.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "sub_trade": False, + "cumulative_profit": 0.0, + "stake_amount": pytest.approx(60), + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg def test_execute_trade_exit_sloe_cancel_exception( - mocker, default_conf_usdt, ticker_usdt, fee, caplog) -> None: + mocker, default_conf_usdt, ticker_usdt, fee, caplog +) -> None: freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) - mocker.patch(f'{EXMS}.cancel_stoploss_order', side_effect=InvalidOrderException()) - mocker.patch('freqtrade.wallets.Wallets.get_free', MagicMock(return_value=300)) - create_order_mock = MagicMock(side_effect=[ - {'id': '12345554'}, - {'id': '12345555'}, - ]) + mocker.patch(f"{EXMS}.cancel_stoploss_order", side_effect=InvalidOrderException()) + mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=300)) + create_order_mock = MagicMock( + side_effect=[ + {"id": "12345554"}, + {"id": "12345555"}, + ] + ) patch_exchange(mocker) mocker.patch.multiple( EXMS, @@ -1175,47 +1193,42 @@ def test_execute_trade_exit_sloe_cancel_exception( create_order=create_order_mock, ) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True patch_get_signal(freqtrade) freqtrade.enter_positions() trade = Trade.session.scalars(select(Trade)).first() PairLock.session = MagicMock() - freqtrade.config['dry_run'] = False + freqtrade.config["dry_run"] = False trade.orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trade.pair, ft_is_open=True, ft_amount=trade.amount, ft_price=trade.stop_loss, - order_id='abcd', - status='open', + order_id="abcd", + status="open", ) ) - freqtrade.execute_trade_exit(trade=trade, limit=1234, - exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)) + freqtrade.execute_trade_exit( + trade=trade, limit=1234, exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS) + ) assert create_order_mock.call_count == 2 - assert log_has('Could not cancel stoploss order abcd for pair ETH/USDT', caplog) + assert log_has("Could not cancel stoploss order abcd for pair ETH/USDT", caplog) @pytest.mark.parametrize("is_short", [False, True]) def test_execute_trade_exit_with_stoploss_on_exchange( - default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, mocker) -> None: - - default_conf_usdt['exchange']['name'] = 'binance' + default_conf_usdt, ticker_usdt, fee, ticker_usdt_sell_up, is_short, mocker +) -> None: + default_conf_usdt["exchange"]["name"] = "binance" rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) - stoploss = MagicMock(return_value={ - 'id': 123, - 'status': 'open', - 'info': { - 'foo': 'bar' - } - }) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.handle_order_fee') + stoploss = MagicMock(return_value={"id": 123, "status": "open", "info": {"foo": "bar"}}) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.handle_order_fee") cancel_order = MagicMock(return_value=True) mocker.patch.multiple( @@ -1230,7 +1243,7 @@ def test_execute_trade_exit_with_stoploss_on_exchange( ) freqtrade = FreqtradeBot(default_conf_usdt) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True patch_get_signal(freqtrade, enter_short=is_short, enter_long=not is_short) # Create some test data @@ -1245,15 +1258,12 @@ def test_execute_trade_exit_with_stoploss_on_exchange( freqtrade.exit_positions(trades) # Increase the price and sell it - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_usdt_sell_up - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker_usdt_sell_up) freqtrade.execute_trade_exit( trade=trade, - limit=ticker_usdt_sell_up()['ask' if is_short else 'bid'], - exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS) + limit=ticker_usdt_sell_up()["ask" if is_short else "bid"], + exit_check=ExitCheckTuple(exit_type=ExitType.STOP_LOSS), ) trade = Trade.session.scalars(select(Trade)).first() @@ -1265,8 +1275,9 @@ def test_execute_trade_exit_with_stoploss_on_exchange( @pytest.mark.parametrize("is_short", [False, True]) def test_may_execute_trade_exit_after_stoploss_on_exchange_hit( - default_conf_usdt, ticker_usdt, fee, mocker, is_short) -> None: - default_conf_usdt['exchange']['name'] = 'binance' + default_conf_usdt, ticker_usdt, fee, mocker, is_short +) -> None: + default_conf_usdt["exchange"]["name"] = "binance" rpc_mock = patch_RPCManager(mocker) patch_exchange(mocker) mocker.patch.multiple( @@ -1278,17 +1289,12 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit( _dry_is_price_crossed=MagicMock(side_effect=[False, True]), ) - stoploss = MagicMock(return_value={ - 'id': 123, - 'info': { - 'foo': 'bar' - } - }) + stoploss = MagicMock(return_value={"id": 123, "info": {"foo": "bar"}}) - mocker.patch(f'{EXMS}.create_stoploss', stoploss) + mocker.patch(f"{EXMS}.create_stoploss", stoploss) freqtrade = FreqtradeBot(default_conf_usdt) - freqtrade.strategy.order_types['stoploss_on_exchange'] = True + freqtrade.strategy.order_types["stoploss_on_exchange"] = True patch_get_signal(freqtrade, enter_long=not is_short, enter_short=is_short) # Create some test data @@ -1305,32 +1311,34 @@ def test_may_execute_trade_exit_after_stoploss_on_exchange_hit( # Assuming stoploss on exchange is hit # trade should be sold at the price of stoploss, with exit_reason STOPLOSS_ON_EXCHANGE - stoploss_executed = MagicMock(return_value={ - "id": "123", - "timestamp": 1542707426845, - "datetime": "2018-11-20T09:50:26.845Z", - "lastTradeTimestamp": None, - "symbol": "BTC/USDT", - "type": "stop_loss_limit", - "side": "buy" if is_short else "sell", - "price": 1.08801, - "amount": trade.amount, - "cost": 1.08801 * trade.amount, - "average": 1.08801, - "filled": trade.amount, - "remaining": 0.0, - "status": "closed", - "fee": None, - "trades": None - }) - mocker.patch(f'{EXMS}.fetch_stoploss_order', stoploss_executed) + stoploss_executed = MagicMock( + return_value={ + "id": "123", + "timestamp": 1542707426845, + "datetime": "2018-11-20T09:50:26.845Z", + "lastTradeTimestamp": None, + "symbol": "BTC/USDT", + "type": "stop_loss_limit", + "side": "buy" if is_short else "sell", + "price": 1.08801, + "amount": trade.amount, + "cost": 1.08801 * trade.amount, + "average": 1.08801, + "filled": trade.amount, + "remaining": 0.0, + "status": "closed", + "fee": None, + "trades": None, + } + ) + mocker.patch(f"{EXMS}.fetch_stoploss_order", stoploss_executed) freqtrade.exit_positions(trades) assert trade.has_open_sl_orders is False assert trade.is_open is False assert trade.exit_reason == ExitType.STOPLOSS_ON_EXCHANGE.value assert rpc_mock.call_count == 4 - assert rpc_mock.call_args_list[1][0][0]['type'] == RPCMessageType.ENTRY - assert rpc_mock.call_args_list[1][0][0]['amount'] > 20 - assert rpc_mock.call_args_list[2][0][0]['type'] == RPCMessageType.ENTRY_FILL - assert rpc_mock.call_args_list[3][0][0]['type'] == RPCMessageType.EXIT_FILL + assert rpc_mock.call_args_list[1][0][0]["type"] == RPCMessageType.ENTRY + assert rpc_mock.call_args_list[1][0][0]["amount"] > 20 + assert rpc_mock.call_args_list[2][0][0]["type"] == RPCMessageType.ENTRY_FILL + assert rpc_mock.call_args_list[3][0][0]["type"] == RPCMessageType.EXIT_FILL diff --git a/tests/freqtradebot/test_worker.py b/tests/freqtradebot/test_worker.py index 79e2f35d4..1dfdca5b2 100644 --- a/tests/freqtradebot/test_worker.py +++ b/tests/freqtradebot/test_worker.py @@ -12,25 +12,25 @@ from tests.conftest import EXMS, get_patched_worker, log_has, log_has_re def test_worker_state(mocker, default_conf, markets) -> None: - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) worker = get_patched_worker(mocker, default_conf) assert worker.freqtrade.state is State.RUNNING - default_conf.pop('initial_state') + default_conf.pop("initial_state") worker = Worker(args=None, config=default_conf) assert worker.freqtrade.state is State.STOPPED def test_worker_running(mocker, default_conf, caplog) -> None: mock_throttle = MagicMock() - mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle) - mocker.patch('freqtrade.persistence.Trade.stoploss_reinitialization', MagicMock()) + mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle) + mocker.patch("freqtrade.persistence.Trade.stoploss_reinitialization", MagicMock()) worker = get_patched_worker(mocker, default_conf) state = worker._worker(old_state=None) assert state is State.RUNNING - assert log_has('Changing state to: RUNNING', caplog) + assert log_has("Changing state to: RUNNING", caplog) assert mock_throttle.call_count == 1 # Check strategy is loaded, and received a dataprovider object assert worker.freqtrade.strategy @@ -40,13 +40,13 @@ def test_worker_running(mocker, default_conf, caplog) -> None: def test_worker_stopped(mocker, default_conf, caplog) -> None: mock_throttle = MagicMock() - mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle) + mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle) worker = get_patched_worker(mocker, default_conf) worker.freqtrade.state = State.STOPPED state = worker._worker(old_state=State.RUNNING) assert state is State.STOPPED - assert log_has('Changing state from RUNNING to: STOPPED', caplog) + assert log_has("Changing state from RUNNING to: STOPPED", caplog) assert mock_throttle.call_count == 1 @@ -70,11 +70,11 @@ def test_throttle(mocker, default_conf, caplog) -> None: def test_throttle_sleep_time(mocker, default_conf, caplog) -> None: - caplog.set_level(logging.DEBUG) worker = get_patched_worker(mocker, default_conf) sleep_mock = mocker.patch("freqtrade.worker.Worker._sleep") with time_machine.travel("2022-09-01 05:00:00 +00:00") as t: + def throttled_func(x=1): t.shift(timedelta(seconds=x)) return 42 @@ -107,8 +107,12 @@ def test_throttle_sleep_time(mocker, default_conf, caplog) -> None: sleep_mock.reset_mock() # Throttle for more than 5m (1 timeframe) - assert worker._throttle(throttled_func, throttle_secs=400, timeframe='5m', - timeframe_offset=0.4, x=5) == 42 + assert ( + worker._throttle( + throttled_func, throttle_secs=400, timeframe="5m", timeframe_offset=0.4, x=5 + ) + == 42 + ) assert sleep_mock.call_count == 1 # 300 (5m) - 60 (1m - see set time above) - 5 (duration of throttled_func) = 235 assert 235.2 < sleep_mock.call_args[0][0] < 235.6 @@ -117,8 +121,12 @@ def test_throttle_sleep_time(mocker, default_conf, caplog) -> None: sleep_mock.reset_mock() # Offset of 5s, so we hit the sweet-spot between "candle" and "candle offset" # Which should not get a throttle iteration to avoid late candle fetching - assert worker._throttle(throttled_func, throttle_secs=10, timeframe='5m', - timeframe_offset=5, x=1.2) == 42 + assert ( + worker._throttle( + throttled_func, throttle_secs=10, timeframe="5m", timeframe_offset=5, x=1.2 + ) + == 42 + ) assert sleep_mock.call_count == 1 # Time is slightly bigger than throttle secs due to the high timeframe offset. assert 11.1 < sleep_mock.call_args[0][0] < 13.2 @@ -141,7 +149,7 @@ def test_worker_heartbeat_running(default_conf, mocker, caplog): message = r"Bot heartbeat\. PID=.*state='RUNNING'" mock_throttle = MagicMock() - mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle) + mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle) worker = get_patched_worker(mocker, default_conf) worker.freqtrade.state = State.RUNNING @@ -164,7 +172,7 @@ def test_worker_heartbeat_stopped(default_conf, mocker, caplog): message = r"Bot heartbeat\. PID=.*state='STOPPED'" mock_throttle = MagicMock() - mocker.patch('freqtrade.worker.Worker._throttle', mock_throttle) + mocker.patch("freqtrade.worker.Worker._throttle", mock_throttle) worker = get_patched_worker(mocker, default_conf) worker.freqtrade.state = State.STOPPED diff --git a/tests/leverage/test_candletype.py b/tests/leverage/test_candletype.py index ed7991d26..a424012d7 100644 --- a/tests/leverage/test_candletype.py +++ b/tests/leverage/test_candletype.py @@ -3,25 +3,31 @@ import pytest from freqtrade.enums import CandleType -@pytest.mark.parametrize('input,expected', [ - ('', CandleType.SPOT), - ('spot', CandleType.SPOT), - (CandleType.SPOT, CandleType.SPOT), - (CandleType.FUTURES, CandleType.FUTURES), - (CandleType.INDEX, CandleType.INDEX), - (CandleType.MARK, CandleType.MARK), - ('futures', CandleType.FUTURES), - ('mark', CandleType.MARK), - ('premiumIndex', CandleType.PREMIUMINDEX), -]) +@pytest.mark.parametrize( + "input,expected", + [ + ("", CandleType.SPOT), + ("spot", CandleType.SPOT), + (CandleType.SPOT, CandleType.SPOT), + (CandleType.FUTURES, CandleType.FUTURES), + (CandleType.INDEX, CandleType.INDEX), + (CandleType.MARK, CandleType.MARK), + ("futures", CandleType.FUTURES), + ("mark", CandleType.MARK), + ("premiumIndex", CandleType.PREMIUMINDEX), + ], +) def test_CandleType_from_string(input, expected): assert CandleType.from_string(input) == expected -@pytest.mark.parametrize('input,expected', [ - ('futures', CandleType.FUTURES), - ('spot', CandleType.SPOT), - ('margin', CandleType.SPOT), -]) +@pytest.mark.parametrize( + "input,expected", + [ + ("futures", CandleType.FUTURES), + ("spot", CandleType.SPOT), + ("margin", CandleType.SPOT), + ], +) def test_CandleType_get_default(input, expected): assert CandleType.get_default(input) == expected diff --git a/tests/leverage/test_interest.py b/tests/leverage/test_interest.py index dd4983c71..6df94bca4 100644 --- a/tests/leverage/test_interest.py +++ b/tests/leverage/test_interest.py @@ -10,33 +10,40 @@ five_hours = FtPrecise(5.0) twentyfive_hours = FtPrecise(25.0) -@pytest.mark.parametrize('exchange,interest_rate,hours,expected', [ - ('binance', 0.0005, ten_mins, 0.00125), - ('binance', 0.00025, ten_mins, 0.000625), - ('binance', 0.00025, five_hours, 0.003125), - ('binance', 0.00025, twentyfive_hours, 0.015625), - # Kraken - ('kraken', 0.0005, ten_mins, 0.06), - ('kraken', 0.00025, ten_mins, 0.03), - ('kraken', 0.00025, five_hours, 0.045), - ('kraken', 0.00025, twentyfive_hours, 0.12), -]) +@pytest.mark.parametrize( + "exchange,interest_rate,hours,expected", + [ + ("binance", 0.0005, ten_mins, 0.00125), + ("binance", 0.00025, ten_mins, 0.000625), + ("binance", 0.00025, five_hours, 0.003125), + ("binance", 0.00025, twentyfive_hours, 0.015625), + # Kraken + ("kraken", 0.0005, ten_mins, 0.06), + ("kraken", 0.00025, ten_mins, 0.03), + ("kraken", 0.00025, five_hours, 0.045), + ("kraken", 0.00025, twentyfive_hours, 0.12), + ], +) def test_interest(exchange, interest_rate, hours, expected): borrowed = FtPrecise(60.0) - assert pytest.approx(float(interest( - exchange_name=exchange, - borrowed=borrowed, - rate=FtPrecise(interest_rate), - hours=hours - ))) == expected + assert ( + pytest.approx( + float( + interest( + exchange_name=exchange, + borrowed=borrowed, + rate=FtPrecise(interest_rate), + hours=hours, + ) + ) + ) + == expected + ) def test_interest_exception(): with pytest.raises(OperationalException, match=r"Leverage not available on .* with freqtrade"): interest( - exchange_name='bitmex', - borrowed=FtPrecise(60.0), - rate=FtPrecise(0.0005), - hours=ten_mins + exchange_name="bitmex", borrowed=FtPrecise(60.0), rate=FtPrecise(0.0005), hours=ten_mins ) diff --git a/tests/optimize/__init__.py b/tests/optimize/__init__.py index b95764ba5..c824e4484 100644 --- a/tests/optimize/__init__.py +++ b/tests/optimize/__init__.py @@ -9,13 +9,14 @@ from freqtrade.util.datetime_helpers import dt_utc tests_start_time = dt_utc(2018, 10, 3) -tests_timeframe = '1h' +tests_timeframe = "1h" class BTrade(NamedTuple): """ Minimalistic Trade result used for functional backtesting """ + exit_reason: ExitType open_tick: int close_tick: int @@ -27,6 +28,7 @@ class BTContainer(NamedTuple): """ Minimal BacktestContainer defining Backtest inputs and results. """ + data: List[List[float]] stop_loss: float roi: Dict[str, float] @@ -51,22 +53,32 @@ def _get_frame_time_from_offset(offset): def _build_backtest_dataframe(data): - columns = ['date', 'open', 'high', 'low', 'close', 'volume', 'enter_long', 'exit_long', - 'enter_short', 'exit_short'] + columns = [ + "date", + "open", + "high", + "low", + "close", + "volume", + "enter_long", + "exit_long", + "enter_short", + "exit_short", + ] if len(data[0]) == 8: # No short columns data = [d + [0, 0] for d in data] - columns = columns + ['enter_tag'] if len(data[0]) == 11 else columns + columns = columns + ["enter_tag"] if len(data[0]) == 11 else columns frame = DataFrame.from_records(data, columns=columns) - frame['date'] = frame['date'].apply(_get_frame_time_from_offset) + frame["date"] = frame["date"].apply(_get_frame_time_from_offset) # Ensure floats are in place - for column in ['open', 'high', 'low', 'close', 'volume']: - frame[column] = frame[column].astype('float64') + for column in ["open", "high", "low", "close", "volume"]: + frame[column] = frame[column].astype("float64") # Ensure all candles make kindof sense - assert all(frame['low'] <= frame['close']) - assert all(frame['low'] <= frame['open']) - assert all(frame['high'] >= frame['close']) - assert all(frame['high'] >= frame['open']) + assert all(frame["low"] <= frame["close"]) + assert all(frame["low"] <= frame["open"]) + assert all(frame["high"] >= frame["close"]) + assert all(frame["high"] >= frame["open"]) return frame diff --git a/tests/optimize/conftest.py b/tests/optimize/conftest.py index cb8a6b5f7..b2833ef64 100644 --- a/tests/optimize/conftest.py +++ b/tests/optimize/conftest.py @@ -11,21 +11,23 @@ from freqtrade.optimize.hyperopt import Hyperopt from tests.conftest import patch_exchange -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def hyperopt_conf(default_conf): hyperconf = deepcopy(default_conf) - hyperconf.update({ - 'datadir': Path(default_conf['datadir']), - 'runmode': RunMode.HYPEROPT, - 'strategy': 'HyperoptableStrategy', - 'hyperopt_loss': 'ShortTradeDurHyperOptLoss', - 'hyperopt_path': str(Path(__file__).parent / 'hyperopts'), - 'epochs': 1, - 'timerange': None, - 'spaces': ['default'], - 'hyperopt_jobs': 1, - 'hyperopt_min_trades': 1, - }) + hyperconf.update( + { + "datadir": Path(default_conf["datadir"]), + "runmode": RunMode.HYPEROPT, + "strategy": "HyperoptableStrategy", + "hyperopt_loss": "ShortTradeDurHyperOptLoss", + "hyperopt_path": str(Path(__file__).parent / "hyperopts"), + "epochs": 1, + "timerange": None, + "spaces": ["default"], + "hyperopt_jobs": 1, + "hyperopt_min_trades": 1, + } + ) return hyperconf @@ -36,32 +38,29 @@ def backtesting_cleanup(): Backtesting.cleanup() -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def hyperopt(hyperopt_conf, mocker): - patch_exchange(mocker) return Hyperopt(hyperopt_conf) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def hyperopt_results(): return pd.DataFrame( { - 'pair': ['ETH/USDT', 'ETH/USDT', 'ETH/USDT', 'ETH/USDT'], - 'profit_ratio': [-0.1, 0.2, -0.12, 0.3], - 'profit_abs': [-0.2, 0.4, -0.21, 0.6], - 'trade_duration': [10, 30, 10, 10], - 'amount': [0.1, 0.1, 0.1, 0.1], - 'exit_reason': [ExitType.STOP_LOSS, ExitType.ROI, ExitType.STOP_LOSS, ExitType.ROI], - 'open_date': - [ + "pair": ["ETH/USDT", "ETH/USDT", "ETH/USDT", "ETH/USDT"], + "profit_ratio": [-0.1, 0.2, -0.12, 0.3], + "profit_abs": [-0.2, 0.4, -0.21, 0.6], + "trade_duration": [10, 30, 10, 10], + "amount": [0.1, 0.1, 0.1, 0.1], + "exit_reason": [ExitType.STOP_LOSS, ExitType.ROI, ExitType.STOP_LOSS, ExitType.ROI], + "open_date": [ datetime(2019, 1, 1, 9, 15, 0), datetime(2019, 1, 2, 8, 55, 0), datetime(2019, 1, 3, 9, 15, 0), datetime(2019, 1, 4, 9, 15, 0), ], - 'close_date': - [ + "close_date": [ datetime(2019, 1, 1, 9, 25, 0), datetime(2019, 1, 2, 9, 25, 0), datetime(2019, 1, 3, 9, 25, 0), diff --git a/tests/optimize/test_backtest_detail.py b/tests/optimize/test_backtest_detail.py index 54468910c..edaedb81e 100644 --- a/tests/optimize/test_backtest_detail.py +++ b/tests/optimize/test_backtest_detail.py @@ -9,51 +9,69 @@ from freqtrade.enums import ExitType, TradingMode from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence.trade_model import LocalTrade from tests.conftest import EXMS, patch_exchange -from tests.optimize import (BTContainer, BTrade, _build_backtest_dataframe, - _get_frame_time_from_offset, tests_timeframe) +from tests.optimize import ( + BTContainer, + BTrade, + _build_backtest_dataframe, + _get_frame_time_from_offset, + tests_timeframe, +) # Test 0: Sell with signal sell in candle 3 # Test with Stop-loss at 1% -tc0 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], # exit with stoploss hit - [3, 5010, 5010, 4980, 5010, 6172, 0, 1], - [4, 5010, 5011, 4977, 4995, 6172, 0, 0], - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 1}, profit_perc=0.002, use_exit_signal=True, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)] +tc0 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], # exit with stoploss hit + [3, 5010, 5010, 4980, 5010, 6172, 0, 1], + [4, 5010, 5011, 4977, 4995, 6172, 0, 0], + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 1}, + profit_perc=0.002, + use_exit_signal=True, + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)], ) # Test 1: Stop-Loss Triggered 1% loss # Test with Stop-loss at 1% -tc1 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4600, 4600, 6172, 0, 0], # exit with stoploss hit - [3, 4975, 5000, 4975, 4977, 6172, 0, 0], - [4, 4977, 4995, 4977, 4995, 6172, 0, 0], - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 1}, profit_perc=-0.01, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)] +tc1 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4600, 4600, 6172, 0, 0], # exit with stoploss hit + [3, 4975, 5000, 4975, 4977, 6172, 0, 0], + [4, 4977, 4995, 4977, 4995, 6172, 0, 0], + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 1}, + profit_perc=-0.01, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)], ) # Test 2: Minus 4% Low, minus 1% close # Test with Stop-Loss at 3% -tc2 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4962, 4975, 6172, 0, 0], - [3, 4975, 5000, 4800, 4962, 6172, 0, 0], # exit with stoploss hit - [4, 4962, 4987, 4937, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.03, roi={"0": 1}, profit_perc=-0.03, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=3)] +tc2 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4962, 4975, 6172, 0, 0], + [3, 4975, 5000, 4800, 4962, 6172, 0, 0], # exit with stoploss hit + [4, 4962, 4987, 4937, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.03, + roi={"0": 1}, + profit_perc=-0.03, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=3)], ) @@ -63,320 +81,422 @@ tc2 = BTContainer(data=[ # Candle drops 20% # Trade-A: Stop-Loss Triggered 2% Loss # Trade-B: Stop-Loss Triggered 2% Loss -tc3 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4800, 4975, 6172, 0, 0], # exit with stoploss hit - [3, 4975, 5000, 4950, 4962, 6172, 1, 0], - [4, 4975, 5000, 4950, 4962, 6172, 0, 0], # enter trade 2 (signal on last candle) - [5, 4962, 4987, 4000, 4000, 6172, 0, 0], # exit with stoploss hit - [6, 4950, 4975, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.02, roi={"0": 1}, profit_perc=-0.04, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2), - BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=4, close_tick=5)] +tc3 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4800, 4975, 6172, 0, 0], # exit with stoploss hit + [3, 4975, 5000, 4950, 4962, 6172, 1, 0], + [4, 4975, 5000, 4950, 4962, 6172, 0, 0], # enter trade 2 (signal on last candle) + [5, 4962, 4987, 4000, 4000, 6172, 0, 0], # exit with stoploss hit + [6, 4950, 4975, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.02, + roi={"0": 1}, + profit_perc=-0.04, + trades=[ + BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2), + BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=4, close_tick=5), + ], ) # Test 4: Minus 3% / recovery +15% # Candle Data for test 3 – Candle drops 3% Closed 15% up # Test with Stop-loss at 2% ROI 6% # Stop-Loss Triggered 2% Loss -tc4 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5750, 4850, 5750, 6172, 0, 0], # Exit with stoploss hit - [3, 4975, 5000, 4950, 4962, 6172, 0, 0], - [4, 4962, 4987, 4937, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.02, roi={"0": 0.06}, profit_perc=-0.02, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)] +tc4 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5750, 4850, 5750, 6172, 0, 0], # Exit with stoploss hit + [3, 4975, 5000, 4950, 4962, 6172, 0, 0], + [4, 4962, 4987, 4937, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.02, + roi={"0": 0.06}, + profit_perc=-0.02, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)], ) # Test 5: Drops 0.5% Closes +20%, ROI triggers 3% Gain # stop-loss: 1%, ROI: 3% -tc5 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4980, 4987, 6172, 1, 0], - [1, 5000, 5025, 4980, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5025, 4975, 4987, 6172, 0, 0], - [3, 4975, 6000, 4975, 6000, 6172, 0, 0], # ROI - [4, 4962, 4987, 4962, 4972, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.03}, profit_perc=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc5 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4980, 4987, 6172, 1, 0], + [1, 5000, 5025, 4980, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5025, 4975, 4987, 6172, 0, 0], + [3, 4975, 6000, 4975, 6000, 6172, 0, 0], # ROI + [4, 4962, 4987, 4962, 4972, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.03}, + profit_perc=0.03, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 6: Drops 3% / Recovers 6% Positive / Closes 1% positive, Stop-Loss triggers 2% Loss # stop-loss: 2% ROI: 5% -tc6 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5300, 4850, 5050, 6172, 0, 0], # Exit with stoploss - [3, 4975, 5000, 4950, 4962, 6172, 0, 0], - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.02, roi={"0": 0.05}, profit_perc=-0.02, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)] +tc6 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5300, 4850, 5050, 6172, 0, 0], # Exit with stoploss + [3, 4975, 5000, 4950, 4962, 6172, 0, 0], + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.02, + roi={"0": 0.05}, + profit_perc=-0.02, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2)], ) # Test 7: 6% Positive / 1% Negative / Close 1% Positive, ROI Triggers 3% Gain # stop-loss: 2% ROI: 3% -tc7 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5050, 6172, 0, 0], - [3, 4975, 5000, 4950, 4962, 6172, 0, 0], - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.02, roi={"0": 0.03}, profit_perc=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)] +tc7 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5050, 6172, 0, 0], + [3, 4975, 5000, 4950, 4962, 6172, 0, 0], + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.02, + roi={"0": 0.03}, + profit_perc=0.03, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)], ) # Test 8: trailing_stop should raise so candle 3 causes a stoploss. # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted in candle 2 -tc8 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5050, 4950, 5000, 6172, 0, 0], - [2, 5000, 5250, 4750, 4850, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.055, trailing_stop=True, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)] +tc8 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5050, 4950, 5000, 6172, 0, 0], + [2, 5000, 5250, 4750, 4850, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.055, + trailing_stop=True, + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)], ) # Test 9: trailing_stop should raise - high and low in same candle. # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted in candle 3 -tc9 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5050, 4950, 5000, 6172, 0, 0], - [2, 5000, 5050, 4950, 5000, 6172, 0, 0], - [3, 5000, 5200, 4550, 4850, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.064, trailing_stop=True, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)] +tc9 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5050, 4950, 5000, 6172, 0, 0], + [2, 5000, 5050, 4950, 5000, 6172, 0, 0], + [3, 5000, 5200, 4550, 4850, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.064, + trailing_stop=True, + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)], ) # Test 10: trailing_stop should raise so candle 3 causes a stoploss # without applying trailing_stop_positive since stoploss_offset is at 10%. # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted candle 2 -tc10 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 5100, 5100, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.1, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.10, +tc10 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 5100, 5100, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.1, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.10, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=4)] + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=4)], ) # Test 11: trailing_stop should raise so candle 3 causes a stoploss # applying a positive trailing stop of 3% since stop_positive_offset is reached. # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted candle 2 -tc11 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 5100, 5100, 6172, 0, 0], - [3, 5000, 5150, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.019, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc11 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 5100, 5100, 6172, 0, 0], + [3, 5000, 5150, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.019, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)], ) # Test 12: trailing_stop should raise in candle 2 and cause a stoploss in the same candle # applying a positive trailing stop of 3% since stop_positive_offset is reached. # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted candle 2 -tc12 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 4650, 5100, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.019, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc12 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 4650, 5100, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.019, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=2)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=2)], ) # Test 13: Buy and sell ROI on same candle # stop-loss: 10% (should not apply), ROI: 1% -tc13 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 4850, 5100, 6172, 0, 0], - [3, 4850, 5050, 4750, 4750, 6172, 0, 0], - [4, 4750, 4950, 4750, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.01}, profit_perc=0.01, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1)] +tc13 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 4850, 5100, 6172, 0, 0], + [3, 4850, 5050, 4750, 4750, 6172, 0, 0], + [4, 4750, 4950, 4750, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.01}, + profit_perc=0.01, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1)], ) # Test 14 - Buy and Stoploss on same candle # stop-loss: 5%, ROI: 10% (should not apply) -tc14 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4600, 5100, 6172, 0, 0], - [2, 5100, 5251, 4850, 5100, 6172, 0, 0], - [3, 4850, 5050, 4750, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.05, roi={"0": 0.10}, profit_perc=-0.05, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)] +tc14 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4600, 5100, 6172, 0, 0], + [2, 5100, 5251, 4850, 5100, 6172, 0, 0], + [3, 4850, 5050, 4750, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.05, + roi={"0": 0.10}, + profit_perc=-0.05, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)], ) # Test 15 - Buy and ROI on same candle, followed by buy and Stoploss on next candle # stop-loss: 5%, ROI: 10% (should not apply) -tc15 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4900, 5100, 6172, 1, 0], - [2, 5100, 5251, 4650, 5100, 6172, 0, 0], - [3, 4850, 5050, 4750, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.05, roi={"0": 0.01}, profit_perc=-0.04, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1), - BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=2, close_tick=2)] +tc15 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4900, 5100, 6172, 1, 0], + [2, 5100, 5251, 4650, 5100, 6172, 0, 0], + [3, 4850, 5050, 4750, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.05, + roi={"0": 0.01}, + profit_perc=-0.04, + trades=[ + BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1), + BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=2, close_tick=2), + ], ) # Test 16: Buy, hold for 65 min, then forceexit using roi=-1 # Causes negative profit even though sell-reason is ROI. # stop-loss: 10%, ROI: 10% (should not apply), -100% after 65 minutes (limits trade duration) -tc16 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5050, 6172, 0, 0], - [3, 4975, 5000, 4940, 4962, 6172, 0, 0], # Forceexit on ROI (roi=-1) - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10, "65": -1}, profit_perc=-0.012, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc16 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5050, 6172, 0, 0], + [3, 4975, 5000, 4940, 4962, 6172, 0, 0], # Forceexit on ROI (roi=-1) + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10, "65": -1}, + profit_perc=-0.012, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 17: Buy, hold for 120 mins, then forceexit using roi=-1 # Causes negative profit even though sell-reason is ROI. # stop-loss: 10%, ROI: 10% (should not apply), -100% after 100 minutes (limits trade duration) # Uses open as sell-rate (special case) - since the roi-time is a multiple of the timeframe. -tc17 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5050, 6172, 0, 0], - [3, 4980, 5000, 4940, 4962, 6172, 0, 0], # Forceexit on ROI (roi=-1) - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10, "120": -1}, profit_perc=-0.004, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc17 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5050, 6172, 0, 0], + [3, 4980, 5000, 4940, 4962, 6172, 0, 0], # Forceexit on ROI (roi=-1) + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10, "120": -1}, + profit_perc=-0.004, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 18: Buy, hold for 120 mins, then drop ROI to 1%, causing a sell in candle 3. # stop-loss: 10%, ROI: 10% (should not apply), -100% after 100 minutes (limits trade duration) # uses open_rate as sell-price -tc18 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5200, 6172, 0, 0], - [3, 5200, 5220, 4940, 4962, 6172, 0, 0], # Sell on ROI (sells on open) - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4950, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10, "120": 0.01}, profit_perc=0.04, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc18 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5200, 6172, 0, 0], + [3, 5200, 5220, 4940, 4962, 6172, 0, 0], # Sell on ROI (sells on open) + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4950, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10, "120": 0.01}, + profit_perc=0.04, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 19: Buy, hold for 119 mins, then drop ROI to 1%, causing a sell in candle 3. # stop-loss: 10%, ROI: 10% (should not apply), -100% after 100 minutes (limits trade duration) # uses calculated ROI (1%) as sell rate, otherwise identical to tc18 -tc19 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5200, 6172, 0, 0], - [3, 5000, 5300, 4940, 4962, 6172, 0, 0], # Sell on ROI - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4550, 4975, 4550, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10, "120": 0.01}, profit_perc=0.01, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc19 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5200, 6172, 0, 0], + [3, 5000, 5300, 4940, 4962, 6172, 0, 0], # Sell on ROI + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4550, 4975, 4550, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10, "120": 0.01}, + profit_perc=0.01, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 20: Buy, hold for 119 mins, then drop ROI to 1%, causing a sell in candle 3. # stop-loss: 10%, ROI: 10% (should not apply), -100% after 100 minutes (limits trade duration) # uses calculated ROI (1%) as sell rate, otherwise identical to tc18 -tc20 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], - [2, 4987, 5300, 4950, 5200, 6172, 0, 0], - [3, 5200, 5300, 4940, 4962, 6172, 0, 0], # Sell on ROI - [4, 4962, 4987, 4950, 4950, 6172, 0, 0], - [5, 4925, 4975, 4925, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10, "119": 0.01}, profit_perc=0.01, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc20 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], + [2, 4987, 5300, 4950, 5200, 6172, 0, 0], + [3, 5200, 5300, 4940, 4962, 6172, 0, 0], # Sell on ROI + [4, 4962, 4987, 4950, 4950, 6172, 0, 0], + [5, 4925, 4975, 4925, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10, "119": 0.01}, + profit_perc=0.01, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 21: trailing_stop ROI collision. # Roi should trigger before Trailing stop - otherwise Trailing stop profits can be > ROI # which cannot happen in reality # stop-loss: 10%, ROI: 4%, Trailing stop adjusted at the sell candle -tc21 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 4650, 5100, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.04}, profit_perc=0.04, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc21 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 4650, 5100, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.04}, + profit_perc=0.04, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)], ) # Test 22: trailing_stop Raises in candle 2 - but ROI applies at the same time. # applying a positive trailing stop of 3% - ROI should apply before trailing stop. # stop-loss: 10%, ROI: 4%, stoploss adjusted candle 2 -tc22 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 5100, 5100, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.04}, profit_perc=0.04, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc22 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 5100, 5100, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.04}, + profit_perc=0.04, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)], ) # Test 23: trailing_stop Raises in candle 2 - but ROI applies at the same time. # applying a positive trailing stop of 3% - ROI should apply before trailing stop. # stop-loss: 10%, ROI: 4%, stoploss adjusted candle 2 -tc23 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5050, 4900, 4900, 6172, 0, 0, 0, 0], - [2, 4900, 4900, 4749, 4900, 6172, 0, 0, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0]], - stop_loss=-0.10, roi={"0": 0.04}, profit_perc=0.04, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc23 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5050, 4900, 4900, 6172, 0, 0, 0, 0], + [2, 4900, 4900, 4749, 4900, 6172, 0, 0, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.04}, + profit_perc=0.04, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2, is_short=True)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2, is_short=True)], ) # Test 24: trailing_stop Raises in candle 2 (does not trigger) @@ -385,460 +505,620 @@ tc23 = BTContainer(data=[ # in the candle after the raised stoploss candle with ROI reason. # Stoploss would trigger in this candle too, but it's no longer relevant. # stop-loss: 10%, ROI: 4%, stoploss adjusted candle 2, ROI adjusted in candle 3 (causing the sell) -tc24 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 5100, 5100, 6172, 0, 0], - [3, 4850, 5251, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.1, "119": 0.03}, profit_perc=0.03, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc24 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 5100, 5100, 6172, 0, 0], + [3, 4850, 5251, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.1, "119": 0.03}, + profit_perc=0.03, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 25: Sell with signal sell in candle 3 (stoploss also triggers on this candle) # Stoploss at 1%. # Stoploss wins over Sell-signal (because sell-signal is acted on in the next candle) -tc25 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], - [3, 5010, 5010, 4855, 5010, 6172, 0, 1], # Triggers stoploss + sellsignal - [4, 5010, 5010, 4977, 4995, 6172, 0, 0], - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 1}, profit_perc=-0.01, use_exit_signal=True, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=3)] +tc25 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], + [3, 5010, 5010, 4855, 5010, 6172, 0, 1], # Triggers stoploss + sellsignal + [4, 5010, 5010, 4977, 4995, 6172, 0, 0], + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 1}, + profit_perc=-0.01, + use_exit_signal=True, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=3)], ) # Test 26: Sell with signal sell in candle 3 (stoploss also triggers on this candle) # Stoploss at 1%. # Sell-signal wins over stoploss -tc26 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], - [3, 5010, 5010, 4986, 5010, 6172, 0, 1], - [4, 5010, 5010, 4855, 4995, 6172, 0, 0], # Triggers stoploss + sellsignal acted on - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 1}, profit_perc=0.002, use_exit_signal=True, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)] +tc26 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], + [3, 5010, 5010, 4986, 5010, 6172, 0, 1], + [4, 5010, 5010, 4855, 4995, 6172, 0, 0], # Triggers stoploss + sellsignal acted on + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 1}, + profit_perc=0.002, + use_exit_signal=True, + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)], ) # Test 27: (copy of test26 with leverage) # Sell with signal sell in candle 3 (stoploss also triggers on this candle) # Stoploss at 1%. # Sell-signal wins over stoploss -tc27 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], - [3, 5010, 5010, 4986, 5010, 6172, 0, 1], - [4, 5010, 5010, 4855, 4995, 6172, 0, 0], # Triggers stoploss + sellsignal acted on - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.05, roi={"0": 1}, profit_perc=0.002 * 5.0, use_exit_signal=True, +tc27 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], + [3, 5010, 5010, 4986, 5010, 6172, 0, 1], + [4, 5010, 5010, 4855, 4995, 6172, 0, 0], # Triggers stoploss + sellsignal acted on + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.05, + roi={"0": 1}, + profit_perc=0.002 * 5.0, + use_exit_signal=True, leverage=5.0, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)] + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)], ) # Test 28: (copy of test26 with leverage and as short) # Sell with signal sell in candle 3 (stoploss also triggers on this candle) # Stoploss at 1%. # Sell-signal wins over stoploss -tc28 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 0, 0, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0, 0, 0], - [3, 5010, 5010, 4986, 5010, 6172, 0, 0, 0, 1], - [4, 4990, 5010, 4855, 4995, 6172, 0, 0, 0, 0], # Triggers stoploss + sellsignal acted on - [5, 4995, 4995, 4950, 4950, 6172, 0, 0, 0, 0]], - stop_loss=-0.05, roi={"0": 1}, profit_perc=0.002 * 5.0, use_exit_signal=True, +tc28 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 0, 0, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0, 0, 0], + [3, 5010, 5010, 4986, 5010, 6172, 0, 0, 0, 1], + [4, 4990, 5010, 4855, 4995, 6172, 0, 0, 0, 0], # Triggers stoploss + sellsignal acted on + [5, 4995, 4995, 4950, 4950, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.05, + roi={"0": 1}, + profit_perc=0.002 * 5.0, + use_exit_signal=True, leverage=5.0, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=True)] + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=True)], ) # Test 29: Sell with signal sell in candle 3 (ROI at signal candle) # Stoploss at 10% (irrelevant), ROI at 5% (will trigger) # Sell-signal wins over stoploss -tc29 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], - [3, 5010, 5251, 4986, 5010, 6172, 0, 1], # Triggers ROI, sell-signal - [4, 5010, 5010, 4855, 4995, 6172, 0, 0], - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.05, use_exit_signal=True, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)] +tc29 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], + [3, 5010, 5251, 4986, 5010, 6172, 0, 1], # Triggers ROI, sell-signal + [4, 5010, 5010, 4855, 4995, 6172, 0, 0], + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.05}, + profit_perc=0.05, + use_exit_signal=True, + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=3)], ) # Test 30: Sell with signal sell in candle 3 (ROI at signal candle) # Stoploss at 10% (irrelevant), ROI at 5% (will trigger) - Wins over Sell-signal -tc30 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5025, 4975, 4987, 6172, 1, 0], - [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4987, 5012, 4986, 4986, 6172, 0, 0], - [3, 5010, 5012, 4986, 5010, 6172, 0, 1], # sell-signal - [4, 5010, 5251, 4855, 4995, 6172, 0, 0], # Triggers ROI, sell-signal acted on - [5, 4995, 4995, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.05}, profit_perc=0.002, use_exit_signal=True, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)] +tc30 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5025, 4975, 4987, 6172, 1, 0], + [1, 5000, 5025, 4975, 4987, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4987, 5012, 4986, 4986, 6172, 0, 0], + [3, 5010, 5012, 4986, 5010, 6172, 0, 1], # sell-signal + [4, 5010, 5251, 4855, 4995, 6172, 0, 0], # Triggers ROI, sell-signal acted on + [5, 4995, 4995, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.05}, + profit_perc=0.002, + use_exit_signal=True, + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4)], ) # Test 31: trailing_stop should raise so candle 3 causes a stoploss # Same case than tc11 - but candle 3 "gaps down" - the stoploss will be above the candle, # therefore "open" will be used # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted candle 2 -tc31 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5100, 4950, 5100, 6172, 0, 0], - [2, 5100, 5251, 5100, 5100, 6172, 0, 0], - [3, 4850, 5050, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.03, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc31 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5100, 4950, 5100, 6172, 0, 0], + [2, 5100, 5251, 5100, 5100, 6172, 0, 0], + [3, 4850, 5050, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.03, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3)], ) # Test 32: (Short of test 31) trailing_stop should raise so candle 3 causes a stoploss # Same case than tc11 - but candle 3 "gaps down" - the stoploss will be above the candle, # therefore "open" will be used # stop-loss: 10%, ROI: 10% (should not apply), stoploss adjusted candle 2 -tc32 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5050, 4890, 4890, 6172, 0, 0, 0, 0], - [2, 4890, 4890, 4749, 4890, 6172, 0, 0, 0, 0], - [3, 5150, 5350, 4950, 4950, 6172, 0, 0, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.03, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.05, +tc32 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5050, 4890, 4890, 6172, 0, 0, 0, 0], + [2, 4890, 4890, 4749, 4890, 6172, 0, 0, 0, 0], + [3, 5150, 5350, 4950, 4950, 6172, 0, 0, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.03, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.05, trailing_stop_positive=0.03, trades=[ BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=3, is_short=True) -] + ], ) # Test 33: trailing_stop should be triggered by low of next candle, without adjusting stoploss using # high of stoploss candle. # stop-loss: 10%, ROI: 10% (should not apply) -tc33 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5050, 5000, 5000, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Triggers trailing-stoploss - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.02, trailing_stop=True, +tc33 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5050, 5000, 5000, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Triggers trailing-stoploss + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.02, + trailing_stop=True, trailing_stop_positive=0.03, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=2)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=2)], ) # Test 34: trailing_stop should be triggered immediately on trade open candle. # stop-loss: 10%, ROI: 10% (should not apply) -tc34 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4900, 4900, 6172, 0, 0], # enter trade (signal on last candle) and stop - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.01, trailing_stop=True, +tc34 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4900, 4900, 6172, 0, 0], # enter trade (signal on last candle) and stop + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.01, + trailing_stop=True, trailing_stop_positive=0.01, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)], ) # Test 35: trailing_stop should be triggered immediately on trade open candle. # stop-loss: 10%, ROI: 10% (should not apply) -tc35 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4900, 4900, 6172, 0, 0], # enter trade (signal on last candle) and stop - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.01, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.02, +tc35 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4900, 4900, 6172, 0, 0], # enter trade (signal on last candle) and stop + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.01, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.02, trailing_stop_positive=0.01, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)] + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)], ) # Test 36: trailing_stop should be triggered immediately on trade open candle. # stop-loss: 1%, ROI: 10% (should not apply) -tc36 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # enter trade and stop - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=-0.01, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.02, - trailing_stop_positive=0.01, use_custom_stoploss=True, - trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)] +tc36 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # enter trade and stop + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=-0.01, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.02, + trailing_stop_positive=0.01, + use_custom_stoploss=True, + trades=[BTrade(exit_reason=ExitType.TRAILING_STOP_LOSS, open_tick=1, close_tick=1)], ) # Test 37: trailing_stop should be triggered immediately on trade open candle. # stop-loss: 1%, ROI: 10% (should not apply) -tc37 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0, 0, 0, 'buy_signal_01'], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0, None], # enter trade and stop - [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0, None], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0, None], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0, None]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=-0.01, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.02, - trailing_stop_positive=0.01, use_custom_stoploss=True, - trades=[BTrade( - exit_reason=ExitType.TRAILING_STOP_LOSS, - open_tick=1, - close_tick=1, - enter_tag='buy_signal_01' - )] +tc37 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0, 0, 0, "buy_signal_01"], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0, None], # enter trade and stop + [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0, None], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0, None], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0, None], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=-0.01, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.02, + trailing_stop_positive=0.01, + use_custom_stoploss=True, + trades=[ + BTrade( + exit_reason=ExitType.TRAILING_STOP_LOSS, + open_tick=1, + close_tick=1, + enter_tag="buy_signal_01", + ) + ], ) # Test 38: trailing_stop should be triggered immediately on trade open candle. # copy of Test37 using shorts. # stop-loss: 1%, ROI: 10% (should not apply) -tc38 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0, 'short_signal_01'], - [1, 5000, 5049, 4500, 5000, 6172, 0, 0, 0, 0, None], # enter trade and stop - [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0, None], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0, None], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0, None]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=-0.01, trailing_stop=True, - trailing_only_offset_is_reached=True, trailing_stop_positive_offset=0.02, - trailing_stop_positive=0.01, use_custom_stoploss=True, - trades=[BTrade( - exit_reason=ExitType.TRAILING_STOP_LOSS, - open_tick=1, - close_tick=1, - enter_tag='short_signal_01', - is_short=True, - )] +tc38 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0, "short_signal_01"], + [1, 5000, 5049, 4500, 5000, 6172, 0, 0, 0, 0, None], # enter trade and stop + [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0, None], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0, None], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0, None], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=-0.01, + trailing_stop=True, + trailing_only_offset_is_reached=True, + trailing_stop_positive_offset=0.02, + trailing_stop_positive=0.01, + use_custom_stoploss=True, + trades=[ + BTrade( + exit_reason=ExitType.TRAILING_STOP_LOSS, + open_tick=1, + close_tick=1, + enter_tag="short_signal_01", + is_short=True, + ) + ], ) # Test 39: Custom-entry-price below all candles should timeout - so no trade happens. -tc39 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # timeout - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=0.0, - custom_entry_price=4200, trades=[] +tc39 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # timeout + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=0.0, + custom_entry_price=4200, + trades=[], ) # Test 40: Custom-entry-price above all candles should have rate adjusted to "entry candle high" -tc40 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Timeout - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=-0.01, - custom_entry_price=7200, trades=[ - BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1) -]) +tc40 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Timeout + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=-0.01, + custom_entry_price=7200, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1)], +) # Test 41: Custom-entry-price above all candles should have rate adjusted to "entry candle high" -tc41 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0], # Timeout - [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=-0.01, +tc41 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0], # Timeout + [2, 4900, 5250, 4500, 5100, 6172, 0, 0, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=-0.01, custom_entry_price=4000, - trades=[ - BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1, is_short=True) -] + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=1, is_short=True)], ) # Test 42: Custom-entry-price around candle low # Would cause immediate ROI exit, but since the trade was entered # below open, we treat this as cheating, and delay the sell by 1 candle. # details: https://github.com/freqtrade/freqtrade/issues/6261 -tc42 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 4999, 6172, 0, 0], # Enter and immediate ROI - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.01}, profit_perc=0.01, +tc42 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 4999, 6172, 0, 0], # Enter and immediate ROI + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.01}, + profit_perc=0.01, custom_entry_price=4952, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=2)], ) # Test 43: Custom-entry-price around candle low # Would cause immediate ROI exit below close # details: https://github.com/freqtrade/freqtrade/issues/6261 -tc43 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5400, 5500, 4951, 5100, 6172, 0, 0], # Enter and immediate ROI - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.01}, profit_perc=0.01, +tc43 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5400, 5500, 4951, 5100, 6172, 0, 0], # Enter and immediate ROI + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.01}, + profit_perc=0.01, custom_entry_price=4952, - trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1)] + trades=[BTrade(exit_reason=ExitType.ROI, open_tick=1, close_tick=1)], ) # Test 44: Custom exit price below all candles # Price adjusted to candle Low. -tc44 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], - [2, 4900, 5250, 4900, 5100, 6172, 0, 1], # exit - but timeout - [3, 5100, 5100, 4950, 4950, 6172, 0, 0], - [4, 5000, 5100, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=-0.01, +tc44 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], + [2, 4900, 5250, 4900, 5100, 6172, 0, 1], # exit - but timeout + [3, 5100, 5100, 4950, 4950, 6172, 0, 0], + [4, 5000, 5100, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=-0.01, use_exit_signal=True, custom_exit_price=4552, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=3)] + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=3)], ) # Test 45: Custom exit price above all candles # causes sell signal timeout -tc45 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], - [2, 4950, 5250, 4900, 5100, 6172, 0, 1], # exit - entry timeout - [3, 5100, 5100, 4950, 4950, 6172, 0, 0], - [4, 5000, 5100, 4950, 4950, 6172, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.0, +tc45 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], + [2, 4950, 5250, 4900, 5100, 6172, 0, 1], # exit - entry timeout + [3, 5100, 5100, 4950, 4950, 6172, 0, 0], + [4, 5000, 5100, 4950, 4950, 6172, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.0, use_exit_signal=True, custom_exit_price=6052, - trades=[BTrade(exit_reason=ExitType.FORCE_EXIT, open_tick=1, close_tick=4)] + trades=[BTrade(exit_reason=ExitType.FORCE_EXIT, open_tick=1, close_tick=4)], ) # Test 46: (Short of tc45) Custom short exit price above below candles # causes sell signal timeout -tc46 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5000, 4951, 5000, 6172, 0, 0, 0, 0], - [2, 4910, 5150, 4910, 5100, 6172, 0, 0, 0, 1], # exit - entry timeout - [3, 5100, 5100, 4950, 4950, 6172, 0, 0, 0, 0], - [4, 5000, 5100, 4950, 4950, 6172, 0, 0, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.0, +tc46 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5000, 4951, 5000, 6172, 0, 0, 0, 0], + [2, 4910, 5150, 4910, 5100, 6172, 0, 0, 0, 1], # exit - entry timeout + [3, 5100, 5100, 4950, 4950, 6172, 0, 0, 0, 0], + [4, 5000, 5100, 4950, 4950, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.0, use_exit_signal=True, custom_exit_price=4700, - trades=[BTrade(exit_reason=ExitType.FORCE_EXIT, open_tick=1, close_tick=4, is_short=True)] + trades=[BTrade(exit_reason=ExitType.FORCE_EXIT, open_tick=1, close_tick=4, is_short=True)], ) # Test 47: Colliding long and short signal -tc47 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0], - [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], - [3, 5100, 5100, 4950, 4950, 6172, 0, 0, 0, 0], - [4, 5000, 5100, 4950, 4950, 6172, 0, 0, 0, 0]], - stop_loss=-0.10, roi={"0": 0.10}, profit_perc=0.0, +tc47 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0, 0, 0], + [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], + [3, 5100, 5100, 4950, 4950, 6172, 0, 0, 0, 0], + [4, 5000, 5100, 4950, 4950, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.10, + roi={"0": 0.10}, + profit_perc=0.0, use_exit_signal=True, - trades=[] + trades=[], ) # Test 48: Custom-entry-price below all candles - readjust order -tc48 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # timeout - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust - [3, 5100, 5100, 4650, 4750, 6172, 0, 1], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.2, roi={"0": 0.10}, profit_perc=-0.087, - use_exit_signal=True, timeout=1000, - custom_entry_price=4200, adjust_entry_price=5200, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=False)] +tc48 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # timeout + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust + [3, 5100, 5100, 4650, 4750, 6172, 0, 1], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.2, + roi={"0": 0.10}, + profit_perc=-0.087, + use_exit_signal=True, + timeout=1000, + custom_entry_price=4200, + adjust_entry_price=5200, + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=False)], ) # Test 49: Custom-entry-price short above all candles - readjust order -tc49 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5200, 4951, 5000, 6172, 0, 0, 0, 0], # timeout - [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], # Order readjust - [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 1], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0]], - stop_loss=-0.2, roi={"0": 0.10}, profit_perc=0.05, - use_exit_signal=True, timeout=1000, - custom_entry_price=5300, adjust_entry_price=5000, - trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=True)] +tc49 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5200, 4951, 5000, 6172, 0, 0, 0, 0], # timeout + [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], # Order readjust + [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 1], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.2, + roi={"0": 0.10}, + profit_perc=0.05, + use_exit_signal=True, + timeout=1000, + custom_entry_price=5300, + adjust_entry_price=5000, + trades=[BTrade(exit_reason=ExitType.EXIT_SIGNAL, open_tick=1, close_tick=4, is_short=True)], ) # Test 50: Custom-entry-price below all candles - readjust order cancels order -tc50 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], # Enter long - place order - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Order readjust - cancel order - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=0.0, - use_exit_signal=True, timeout=1000, - custom_entry_price=4200, adjust_entry_price=None, - trades=[] +tc50 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], # Enter long - place order + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Order readjust - cancel order + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=0.0, + use_exit_signal=True, + timeout=1000, + custom_entry_price=4200, + adjust_entry_price=None, + trades=[], ) # Test 51: Custom-entry-price below all candles - readjust order leaves order in place and timeout. -tc51 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], # Enter long - place order - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Order readjust - replace order - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust - maintain order - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], # Timeout - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.01, roi={"0": 0.10}, profit_perc=0.0, - use_exit_signal=True, timeout=60, - custom_entry_price=4200, adjust_entry_price=4100, - trades=[] +tc51 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], # Enter long - place order + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # Order readjust - replace order + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust - maintain order + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], # Timeout + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.01, + roi={"0": 0.10}, + profit_perc=0.0, + use_exit_signal=True, + timeout=60, + custom_entry_price=4200, + adjust_entry_price=4100, + trades=[], ) # Test 52: Custom-entry-price below all candles - readjust order - stoploss -tc52 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 1, 0], - [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # enter trade (signal on last candle) - [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust - [3, 5100, 5100, 4650, 4750, 6172, 0, 0], # stoploss hit? - [4, 4750, 4950, 4350, 4750, 6172, 0, 0]], - stop_loss=-0.03, roi={}, profit_perc=-0.03, - use_exit_signal=True, timeout=1000, - custom_entry_price=4200, adjust_entry_price=5200, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2, is_short=False)] +tc52 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 1, 0], + [1, 5000, 5500, 4951, 5000, 6172, 0, 0], # enter trade (signal on last candle) + [2, 4900, 5250, 4500, 5100, 6172, 0, 0], # Order readjust + [3, 5100, 5100, 4650, 4750, 6172, 0, 0], # stoploss hit? + [4, 4750, 4950, 4350, 4750, 6172, 0, 0], + ], + stop_loss=-0.03, + roi={}, + profit_perc=-0.03, + use_exit_signal=True, + timeout=1000, + custom_entry_price=4200, + adjust_entry_price=5200, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2, is_short=False)], ) # Test 53: Custom-entry-price short above all candles - readjust order - stoploss -tc53 = BTContainer(data=[ - # D O H L C V EL XL ES Xs BT - [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], - [1, 5000, 5200, 4951, 5000, 6172, 0, 0, 0, 0], # enter trade (signal on last candle) - [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], # Order readjust - [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 1], # stoploss hit? - [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0]], - stop_loss=-0.03, roi={"0": 0.10}, profit_perc=-0.03, - use_exit_signal=True, timeout=1000, - custom_entry_price=5300, adjust_entry_price=5000, - trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2, is_short=True)] +tc53 = BTContainer( + data=[ + # D O H L C V EL XL ES Xs BT + [0, 5000, 5050, 4950, 5000, 6172, 0, 0, 1, 0], + [1, 5000, 5200, 4951, 5000, 6172, 0, 0, 0, 0], # enter trade (signal on last candle) + [2, 4900, 5250, 4900, 5100, 6172, 0, 0, 0, 0], # Order readjust + [3, 5100, 5100, 4650, 4750, 6172, 0, 0, 0, 1], # stoploss hit? + [4, 4750, 4950, 4350, 4750, 6172, 0, 0, 0, 0], + ], + stop_loss=-0.03, + roi={"0": 0.10}, + profit_perc=-0.03, + use_exit_signal=True, + timeout=1000, + custom_entry_price=5300, + adjust_entry_price=5000, + trades=[BTrade(exit_reason=ExitType.STOP_LOSS, open_tick=1, close_tick=2, is_short=True)], ) TESTS = [ @@ -910,10 +1190,12 @@ def test_backtest_results(default_conf, mocker, caplog, data: BTContainer) -> No default_conf["trailing_stop"] = data.trailing_stop default_conf["trailing_only_offset_is_reached"] = data.trailing_only_offset_is_reached if data.timeout: - default_conf['unfilledtimeout'].update({ - 'entry': data.timeout, - 'exit': data.timeout, - }) + default_conf["unfilledtimeout"].update( + { + "entry": data.timeout, + "exit": data.timeout, + } + ) # Only add this to configuration If it's necessary if data.trailing_stop_positive is not None: default_conf["trailing_stop_positive"] = data.trailing_stop_positive @@ -921,12 +1203,12 @@ def test_backtest_results(default_conf, mocker, caplog, data: BTContainer) -> No default_conf["use_exit_signal"] = data.use_exit_signal default_conf["max_open_trades"] = 10 + patch_exchange(mocker) mocker.patch(f"{EXMS}.get_fee", return_value=0.0) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) mocker.patch(f"{EXMS}.get_max_leverage", return_value=100) mocker.patch(f"{EXMS}.calculate_funding_fees", return_value=0) - patch_exchange(mocker) frame = _build_backtest_dataframe(data.data) backtesting = Backtesting(default_conf) # TODO: Should we initialize this properly?? @@ -956,7 +1238,7 @@ def test_backtest_results(default_conf, mocker, caplog, data: BTContainer) -> No end_date=max_date, ) - results = result['results'] + results = result["results"] assert len(results) == len(data.trades) assert round(results["profit_ratio"].sum(), 3) == round(data.profit_perc, 3) diff --git a/tests/optimize/test_backtesting.py b/tests/optimize/test_backtesting.py index 4163e9606..5576b312f 100644 --- a/tests/optimize/test_backtesting.py +++ b/tests/optimize/test_backtesting.py @@ -26,23 +26,21 @@ from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import LocalTrade, Trade from freqtrade.resolvers import StrategyResolver from freqtrade.util.datetime_helpers import dt_utc -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, get_args, log_has, log_has_re, - patch_exchange, patched_configuration_load_config_file) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + get_args, + log_has, + log_has_re, + patch_exchange, + patched_configuration_load_config_file, +) ORDER_TYPES = [ - { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': False - }, - { - 'entry': 'limit', - 'exit': 'limit', - 'stoploss': 'limit', - 'stoploss_on_exchange': True - }] + {"entry": "limit", "exit": "limit", "stoploss": "limit", "stoploss_on_exchange": False}, + {"entry": "limit", "exit": "limit", "stoploss": "limit", "stoploss_on_exchange": True}, +] def trim_dictlist(dict_list, num): @@ -53,39 +51,46 @@ def trim_dictlist(dict_list, num): def load_data_test(what, testdatadir): - timerange = TimeRange.parse_timerange('1510694220-1510700340') - data = history.load_pair_history(pair='UNITTEST/BTC', datadir=testdatadir, - timeframe='1m', timerange=timerange, - drop_incomplete=False, - fill_up_missing=False) + timerange = TimeRange.parse_timerange("1510694220-1510700340") + data = history.load_pair_history( + pair="UNITTEST/BTC", + datadir=testdatadir, + timeframe="1m", + timerange=timerange, + drop_incomplete=False, + fill_up_missing=False, + ) base = 0.001 - if what == 'raise': - data.loc[:, 'open'] = data.index * base - data.loc[:, 'high'] = data.index * base + 0.0001 - data.loc[:, 'low'] = data.index * base - 0.0001 - data.loc[:, 'close'] = data.index * base + if what == "raise": + data.loc[:, "open"] = data.index * base + data.loc[:, "high"] = data.index * base + 0.0001 + data.loc[:, "low"] = data.index * base - 0.0001 + data.loc[:, "close"] = data.index * base - if what == 'lower': - data.loc[:, 'open'] = 1 - data.index * base - data.loc[:, 'high'] = 1 - data.index * base + 0.0001 - data.loc[:, 'low'] = 1 - data.index * base - 0.0001 - data.loc[:, 'close'] = 1 - data.index * base + if what == "lower": + data.loc[:, "open"] = 1 - data.index * base + data.loc[:, "high"] = 1 - data.index * base + 0.0001 + data.loc[:, "low"] = 1 - data.index * base - 0.0001 + data.loc[:, "close"] = 1 - data.index * base - if what == 'sine': + if what == "sine": hz = 0.1 # frequency - data.loc[:, 'open'] = np.sin(data.index * hz) / 1000 + base - data.loc[:, 'high'] = np.sin(data.index * hz) / 1000 + base + 0.0001 - data.loc[:, 'low'] = np.sin(data.index * hz) / 1000 + base - 0.0001 - data.loc[:, 'close'] = np.sin(data.index * hz) / 1000 + base + data.loc[:, "open"] = np.sin(data.index * hz) / 1000 + base + data.loc[:, "high"] = np.sin(data.index * hz) / 1000 + base + 0.0001 + data.loc[:, "low"] = np.sin(data.index * hz) / 1000 + base - 0.0001 + data.loc[:, "close"] = np.sin(data.index * hz) / 1000 + base - return {'UNITTEST/BTC': clean_ohlcv_dataframe(data, timeframe='1m', pair='UNITTEST/BTC', - fill_missing=True, drop_incomplete=True)} + return { + "UNITTEST/BTC": clean_ohlcv_dataframe( + data, timeframe="1m", pair="UNITTEST/BTC", fill_missing=True, drop_incomplete=True + ) + } # FIX: fixturize this? -def _make_backtest_conf(mocker, datadir, conf=None, pair='UNITTEST/BTC'): - data = history.load_data(datadir=datadir, timeframe='1m', pairs=[pair]) +def _make_backtest_conf(mocker, datadir, conf=None, pair="UNITTEST/BTC"): + data = history.load_data(datadir=datadir, timeframe="1m", pairs=[pair]) data = trim_dictlist(data, -201) patch_exchange(mocker) backtesting = Backtesting(conf) @@ -93,30 +98,30 @@ def _make_backtest_conf(mocker, datadir, conf=None, pair='UNITTEST/BTC'): processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) return { - 'processed': processed, - 'start_date': min_date, - 'end_date': max_date, + "processed": processed, + "start_date": min_date, + "end_date": max_date, } def _trend(signals, buy_value, sell_value): - n = len(signals['low']) + n = len(signals["low"]) buy = np.zeros(n) sell = np.zeros(n) - for i in range(0, len(signals['date'])): + for i in range(0, len(signals["date"])): if random.random() > 0.5: # Both buy and sell signals at same timeframe buy[i] = buy_value sell[i] = sell_value - signals['enter_long'] = buy - signals['exit_long'] = sell - signals['enter_short'] = 0 - signals['exit_short'] = 0 + signals["enter_long"] = buy + signals["exit_long"] = sell + signals["enter_short"] = 0 + signals["exit_short"] = 0 return signals def _trend_alternate(dataframe=None, metadata=None): signals = dataframe - low = signals['low'] + low = signals["low"] n = len(low) buy = np.zeros(n) sell = np.zeros(n) @@ -125,10 +130,10 @@ def _trend_alternate(dataframe=None, metadata=None): buy[i] = 1 else: sell[i] = 1 - signals['enter_long'] = buy - signals['exit_long'] = sell - signals['enter_short'] = 0 - signals['exit_short'] = 0 + signals["enter_long"] = buy + signals["exit_long"] = sell + signals["enter_short"] = 0 + signals["exit_short"] = 0 return dataframe @@ -137,107 +142,120 @@ def test_setup_optimize_configuration_without_arguments(mocker, default_conf, ca patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--export', 'none' + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--export", + "none", ] config = setup_optimize_configuration(get_args(args), RunMode.BACKTEST) - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config - assert not log_has_re('Parameter -i/--ticker-interval detected .*', caplog) + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config + assert not log_has_re("Parameter -i/--ticker-interval detected .*", caplog) - assert 'position_stacking' not in config - assert not log_has('Parameter --enable-position-stacking detected ...', caplog) + assert "position_stacking" not in config + assert not log_has("Parameter --enable-position-stacking detected ...", caplog) - assert 'timerange' not in config - assert 'export' in config - assert config['export'] == 'none' - assert 'runmode' in config - assert config['runmode'] == RunMode.BACKTEST + assert "timerange" not in config + assert "export" in config + assert config["export"] == "none" + assert "runmode" in config + assert config["runmode"] == RunMode.BACKTEST def test_setup_bt_configuration_with_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) - mocker.patch( - 'freqtrade.configuration.configuration.create_datadir', - lambda c, x: x - ) + mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--datadir', '/foo/bar', - '--timeframe', '1m', - '--enable-position-stacking', - '--disable-max-market-positions', - '--timerange', ':100', - '--export-filename', 'foo_bar.json', - '--fee', '0', + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--datadir", + "/foo/bar", + "--timeframe", + "1m", + "--enable-position-stacking", + "--disable-max-market-positions", + "--timerange", + ":100", + "--export-filename", + "foo_bar.json", + "--fee", + "0", ] config = setup_optimize_configuration(get_args(args), RunMode.BACKTEST) - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert config['runmode'] == RunMode.BACKTEST + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert config["runmode"] == RunMode.BACKTEST - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config - assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - caplog) + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config + assert log_has("Parameter -i/--timeframe detected ... Using timeframe: 1m ...", caplog) - assert 'position_stacking' in config - assert log_has('Parameter --enable-position-stacking detected ...', caplog) + assert "position_stacking" in config + assert log_has("Parameter --enable-position-stacking detected ...", caplog) - assert 'use_max_market_positions' in config - assert log_has('Parameter --disable-max-market-positions detected ...', caplog) - assert log_has('max_open_trades set to unlimited ...', caplog) + assert "use_max_market_positions" in config + assert log_has("Parameter --disable-max-market-positions detected ...", caplog) + assert log_has("max_open_trades set to unlimited ...", caplog) - assert 'timerange' in config - assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog) + assert "timerange" in config + assert log_has("Parameter --timerange detected: {} ...".format(config["timerange"]), caplog) - assert 'export' in config - assert 'exportfilename' in config - assert isinstance(config['exportfilename'], Path) - assert log_has('Storing backtest results to {} ...'.format(config['exportfilename']), caplog) + assert "export" in config + assert "exportfilename" in config + assert isinstance(config["exportfilename"], Path) + assert log_has("Storing backtest results to {} ...".format(config["exportfilename"]), caplog) - assert 'fee' in config - assert log_has('Parameter --fee detected, setting fee to: {} ...'.format(config['fee']), caplog) + assert "fee" in config + assert log_has("Parameter --fee detected, setting fee to: {} ...".format(config["fee"]), caplog) def test_setup_optimize_configuration_stake_amount(mocker, default_conf, caplog) -> None: - patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--stake-amount', '1', - '--starting-balance', '2' + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--stake-amount", + "1", + "--starting-balance", + "2", ] conf = setup_optimize_configuration(get_args(args), RunMode.BACKTEST) assert isinstance(conf, dict) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--stake-amount', '1', - '--starting-balance', '0.5' + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--stake-amount", + "1", + "--starting-balance", + "0.5", ] with pytest.raises(OperationalException, match=r"Starting balance .* smaller .*"): setup_optimize_configuration(get_args(args), RunMode.BACKTEST) @@ -245,19 +263,21 @@ def test_setup_optimize_configuration_stake_amount(mocker, default_conf, caplog) def test_start(mocker, fee, default_conf, caplog) -> None: start_mock = MagicMock() - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.start', start_mock) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.start", start_mock) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, ] pargs = get_args(args) start_backtesting(pargs) - assert log_has('Starting freqtrade in Backtesting mode', caplog) + assert log_has("Starting freqtrade in Backtesting mode", caplog) assert start_mock.call_count == 1 @@ -269,11 +289,11 @@ def test_backtesting_init(mocker, default_conf, order_types) -> None: """ default_conf["order_types"] = order_types patch_exchange(mocker) - get_fee = mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.5)) + get_fee = mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.5)) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) assert backtesting.config == default_conf - assert backtesting.timeframe == '5m' + assert backtesting.timeframe == "5m" assert callable(backtesting.strategy.advise_all_indicators) assert callable(backtesting.strategy.advise_entry) assert callable(backtesting.strategy.advise_exit) @@ -286,27 +306,27 @@ def test_backtesting_init(mocker, default_conf, order_types) -> None: def test_backtesting_init_no_timeframe(mocker, default_conf, caplog) -> None: patch_exchange(mocker) - del default_conf['timeframe'] - default_conf['strategy_list'] = [CURRENT_TEST_STRATEGY, - 'HyperoptableStrategy'] + del default_conf["timeframe"] + default_conf["strategy_list"] = [CURRENT_TEST_STRATEGY, "HyperoptableStrategy"] - mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.5)) - with pytest.raises(OperationalException, - match=r"Timeframe needs to be set in either configuration"): + mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.5)) + with pytest.raises( + OperationalException, match=r"Timeframe needs to be set in either configuration" + ): Backtesting(default_conf) def test_data_with_fee(default_conf, mocker) -> None: patch_exchange(mocker) - default_conf['fee'] = 0.1234 + default_conf["fee"] = 0.01234 - fee_mock = mocker.patch(f'{EXMS}.get_fee', MagicMock(return_value=0.5)) + fee_mock = mocker.patch(f"{EXMS}.get_fee", MagicMock(return_value=0.5)) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - assert backtesting.fee == 0.1234 + assert backtesting.fee == 0.01234 assert fee_mock.call_count == 0 - default_conf['fee'] = 0.0 + default_conf["fee"] = 0.0 backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) assert backtesting.fee == 0.0 @@ -315,19 +335,20 @@ def test_data_with_fee(default_conf, mocker) -> None: def test_data_to_dataframe_bt(default_conf, mocker, testdatadir) -> None: patch_exchange(mocker) - timerange = TimeRange.parse_timerange('1510694220-1510700340') - data = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, - fill_up_missing=True) + timerange = TimeRange.parse_timerange("1510694220-1510700340") + data = history.load_data( + testdatadir, "1m", ["UNITTEST/BTC"], timerange=timerange, fill_up_missing=True + ) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) processed = backtesting.strategy.advise_all_indicators(data) - assert len(processed['UNITTEST/BTC']) == 103 + assert len(processed["UNITTEST/BTC"]) == 103 # Load strategy to compare the result between Backtesting function and strategy are the same strategy = StrategyResolver.load_strategy(default_conf) processed2 = strategy.advise_all_indicators(data) - assert processed['UNITTEST/BTC'].equals(processed2['UNITTEST/BTC']) + assert processed["UNITTEST/BTC"].equals(processed2["UNITTEST/BTC"]) def test_backtest_abort(default_conf, mocker, testdatadir) -> None: @@ -348,21 +369,23 @@ def test_backtesting_start(default_conf, mocker, caplog) -> None: def get_timerange(input1): return dt_utc(2017, 11, 14, 21, 17), dt_utc(2017, 11, 14, 22, 59) - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest') - mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats') - mocker.patch('freqtrade.optimize.backtesting.show_backtest_results') - sbs = mocker.patch('freqtrade.optimize.backtesting.store_backtest_stats') - sbc = mocker.patch('freqtrade.optimize.backtesting.store_backtest_analysis_results') - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest") + mocker.patch("freqtrade.optimize.backtesting.generate_backtest_stats") + mocker.patch("freqtrade.optimize.backtesting.show_backtest_results") + sbs = mocker.patch("freqtrade.optimize.backtesting.store_backtest_stats") + sbc = mocker.patch("freqtrade.optimize.backtesting.store_backtest_analysis_results") + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) - default_conf['timeframe'] = '1m' - default_conf['export'] = 'signals' - default_conf['exportfilename'] = 'export.txt' - default_conf['timerange'] = '-1510694220' - default_conf['runmode'] = RunMode.BACKTEST + default_conf["timeframe"] = "1m" + default_conf["export"] = "signals" + default_conf["exportfilename"] = "export.txt" + default_conf["timerange"] = "-1510694220" + default_conf["runmode"] = RunMode.BACKTEST backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) @@ -370,10 +393,7 @@ def test_backtesting_start(default_conf, mocker, caplog) -> None: backtesting.strategy.bot_start = MagicMock() backtesting.start() # check the logs, that will contain the backtest result - exists = [ - 'Backtesting with data from 2017-11-14 21:17:00 ' - 'up to 2017-11-14 22:59:00 (0 days).' - ] + exists = ["Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days)."] for line in exists: assert log_has(line, caplog) assert backtesting.strategy.dp._pairlists is not None @@ -387,104 +407,126 @@ def test_backtesting_start_no_data(default_conf, mocker, caplog, testdatadir) -> def get_timerange(input1): return dt_utc(2017, 11, 14, 21, 17), dt_utc(2017, 11, 14, 22, 59) - mocker.patch('freqtrade.data.history.history_utils.load_pair_history', - MagicMock(return_value=pd.DataFrame())) - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch( + "freqtrade.data.history.history_utils.load_pair_history", + MagicMock(return_value=pd.DataFrame()), + ) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest') - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest") + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) - default_conf['timeframe'] = "1m" - default_conf['export'] = 'none' - default_conf['timerange'] = '20180101-20180102' + default_conf["timeframe"] = "1m" + default_conf["export"] = "none" + default_conf["timerange"] = "20180101-20180102" backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - with pytest.raises(OperationalException, match='No data found. Terminating.'): + with pytest.raises(OperationalException, match="No data found. Terminating."): backtesting.start() def test_backtesting_no_pair_left(default_conf, mocker, caplog, testdatadir) -> None: - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - mocker.patch('freqtrade.data.history.history_utils.load_pair_history', - MagicMock(return_value=pd.DataFrame())) - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + mocker.patch( + "freqtrade.data.history.history_utils.load_pair_history", + MagicMock(return_value=pd.DataFrame()), + ) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest') - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=[])) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest") + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", PropertyMock(return_value=[]) + ) - default_conf['timeframe'] = "1m" - default_conf['export'] = 'none' - default_conf['timerange'] = '20180101-20180102' + default_conf["timeframe"] = "1m" + default_conf["export"] = "none" + default_conf["timerange"] = "20180101-20180102" - with pytest.raises(OperationalException, match='No pair in whitelist.'): + with pytest.raises(OperationalException, match="No pair in whitelist."): Backtesting(default_conf) - default_conf['pairlists'] = [{"method": "VolumePairList", "number_assets": 5}] - with pytest.raises(OperationalException, - match=r'VolumePairList not allowed for backtesting\..*StaticPairList.*'): + default_conf["pairlists"] = [{"method": "VolumePairList", "number_assets": 5}] + with pytest.raises( + OperationalException, + match=r"VolumePairList not allowed for backtesting\..*StaticPairList.*", + ): Backtesting(default_conf) - default_conf.update({ - 'pairlists': [{"method": "StaticPairList"}], - 'timeframe_detail': '1d', - }) + default_conf.update( + { + "pairlists": [{"method": "StaticPairList"}], + "timeframe_detail": "1d", + } + ) - with pytest.raises(OperationalException, - match='Detail timeframe must be smaller than strategy timeframe.'): + with pytest.raises( + OperationalException, match="Detail timeframe must be smaller than strategy timeframe." + ): Backtesting(default_conf) def test_backtesting_pairlist_list(default_conf, mocker, caplog, testdatadir, tickers) -> None: - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - mocker.patch(f'{EXMS}.get_tickers', tickers) - mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y: y) - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.get_tickers", tickers) + mocker.patch(f"{EXMS}.price_to_precision", lambda s, x, y: y) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest') - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/BTC'])) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.refresh_pairlist') + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest") + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/BTC"]), + ) + mocker.patch("freqtrade.plugins.pairlistmanager.PairListManager.refresh_pairlist") - default_conf['ticker_interval'] = "1m" - default_conf['export'] = 'none' + default_conf["ticker_interval"] = "1m" + default_conf["export"] = "none" # Use stoploss from strategy - del default_conf['stoploss'] - default_conf['timerange'] = '20180101-20180102' + del default_conf["stoploss"] + default_conf["timerange"] = "20180101-20180102" - default_conf['pairlists'] = [{"method": "VolumePairList", "number_assets": 5}] - with pytest.raises(OperationalException, - match=r'VolumePairList not allowed for backtesting\..*StaticPairList.*'): + default_conf["pairlists"] = [{"method": "VolumePairList", "number_assets": 5}] + with pytest.raises( + OperationalException, + match=r"VolumePairList not allowed for backtesting\..*StaticPairList.*", + ): Backtesting(default_conf) - default_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}] - with pytest.raises(OperationalException, - match='PerformanceFilter not allowed for backtesting.'): + default_conf["pairlists"] = [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}] + with pytest.raises( + OperationalException, match="PerformanceFilter not allowed for backtesting." + ): Backtesting(default_conf) - default_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PrecisionFilter"}, ] + default_conf["pairlists"] = [ + {"method": "StaticPairList"}, + {"method": "PrecisionFilter"}, + ] Backtesting(default_conf) # Multiple strategies - default_conf['strategy_list'] = [CURRENT_TEST_STRATEGY, 'StrategyTestV2'] - with pytest.raises(OperationalException, - match='PrecisionFilter not allowed for backtesting multiple strategies.'): + default_conf["strategy_list"] = [CURRENT_TEST_STRATEGY, "StrategyTestV2"] + with pytest.raises( + OperationalException, + match="PrecisionFilter not allowed for backtesting multiple strategies.", + ): Backtesting(default_conf) def test_backtest__enter_trade(default_conf, fee, mocker) -> None: - default_conf['use_exit_signal'] = False - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001) - mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf')) + default_conf["use_exit_signal"] = False + mocker.patch(f"{EXMS}.get_fee", fee) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) - default_conf['stake_amount'] = 'unlimited' - default_conf['max_open_trades'] = 2 + default_conf["stake_amount"] = "unlimited" + default_conf["max_open_trades"] = 2 backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - pair = 'UNITTEST/BTC' + pair = "UNITTEST/BTC" row = [ pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0), 1, # Buy @@ -493,64 +535,68 @@ def test_backtest__enter_trade(default_conf, fee, mocker) -> None: 0, # Sell 0.00099, # Low 0.0012, # High - '', # Buy Signal Name + "", # Buy Signal Name ] - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert isinstance(trade, LocalTrade) assert trade.stake_amount == 495 # Fake 2 trades, so there's not enough amount for the next trade left. LocalTrade.trades_open.append(trade) - LocalTrade.trades_open.append(trade) backtesting.wallets.update() - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert trade is None LocalTrade.trades_open.pop() - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert trade is not None + LocalTrade.trades_open.pop() backtesting.strategy.custom_stake_amount = lambda **kwargs: 123.5 backtesting.wallets.update() - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") + LocalTrade.trades_open.pop() assert trade assert trade.stake_amount == 123.5 # In case of error - use proposed stake backtesting.strategy.custom_stake_amount = lambda **kwargs: 20 / 0 - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") + LocalTrade.trades_open.pop() assert trade assert trade.stake_amount == 495 assert trade.is_short is False - trade = backtesting._enter_trade(pair, row=row, direction='short') + trade = backtesting._enter_trade(pair, row=row, direction="short") + LocalTrade.trades_open.pop() assert trade assert trade.stake_amount == 495 assert trade.is_short is True mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=300.0) - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") + LocalTrade.trades_open.pop() assert trade assert trade.stake_amount == 300.0 def test_backtest__enter_trade_futures(default_conf_usdt, fee, mocker) -> None: - default_conf_usdt['use_exit_signal'] = False - mocker.patch(f'{EXMS}.get_fee', fee) + default_conf_usdt["use_exit_signal"] = False + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) mocker.patch(f"{EXMS}.get_max_leverage", return_value=100) mocker.patch("freqtrade.optimize.backtesting.price_to_precision", lambda p, *args: p) patch_exchange(mocker) - default_conf_usdt['stake_amount'] = 300 - default_conf_usdt['max_open_trades'] = 2 - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' - default_conf_usdt['stake_currency'] = 'USDT' - default_conf_usdt['exchange']['pair_whitelist'] = ['.*'] + default_conf_usdt["stake_amount"] = 300 + default_conf_usdt["max_open_trades"] = 2 + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" + default_conf_usdt["stake_currency"] = "USDT" + default_conf_usdt["exchange"]["pair_whitelist"] = [".*"] backtesting = Backtesting(default_conf_usdt) backtesting._set_strategy(backtesting.strategylist[0]) - mocker.patch('freqtrade.optimize.backtesting.Backtesting._run_funding_fees') - pair = 'ETH/USDT:USDT' + mocker.patch("freqtrade.optimize.backtesting.Backtesting._run_funding_fees") + pair = "ETH/USDT:USDT" row = [ pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0), 0.1, # Open @@ -561,14 +607,13 @@ def test_backtest__enter_trade_futures(default_conf_usdt, fee, mocker) -> None: 0, # exit_long 1, # enter_short 0, # exit_hsort - '', # Long Signal Name - '', # Short Signal Name - '', # Exit Signal Name + "", # Long Signal Name + "", # Short Signal Name + "", # Exit Signal Name ] backtesting.strategy.leverage = MagicMock(return_value=5.0) - mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", - return_value=(0.01, 0.01)) + mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0.01, 0.01)) # leverage = 5 # ep1(trade.open_rate) = 0.1 @@ -588,7 +633,7 @@ def test_backtest__enter_trade_futures(default_conf_usdt, fee, mocker) -> None: # = 0.08080740740740741 + ((0.1 - 0.08080740740740741) * 0.05 * 1) # = 0.08176703703703704 - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert pytest.approx(trade.liquidation_price) == 0.081767037 # Binance, Short @@ -600,37 +645,38 @@ def test_backtest__enter_trade_futures(default_conf_usdt, fee, mocker) -> None: # = 0.11881254125412541 + (abs(0.1 - 0.11881254125412541) * 0.05 * -1) # = 0.11787191419141915 - trade = backtesting._enter_trade(pair, row=row, direction='short') + trade = backtesting._enter_trade(pair, row=row, direction="short") assert pytest.approx(trade.liquidation_price) == 0.11787191 assert pytest.approx(trade.orders[0].cost) == ( - trade.stake_amount * trade.leverage + trade.fee_open) + trade.stake_amount * trade.leverage + trade.fee_open + ) assert pytest.approx(trade.orders[-1].stake_amount) == trade.stake_amount # Stake-amount too high! mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=600.0) - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert trade is None # Stake-amount throwing error - mocker.patch("freqtrade.wallets.Wallets.get_trade_stake_amount", - side_effect=DependencyException) + mocker.patch( + "freqtrade.wallets.Wallets.get_trade_stake_amount", side_effect=DependencyException + ) - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert trade is None -def test_backtest__check_trade_exit(default_conf, fee, mocker) -> None: - default_conf['use_exit_signal'] = False - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) +def test_backtest__check_trade_exit(default_conf, mocker) -> None: + default_conf["use_exit_signal"] = False patch_exchange(mocker) - default_conf['timeframe_detail'] = '1m' - default_conf['max_open_trades'] = 2 + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + default_conf["timeframe_detail"] = "1m" + default_conf["max_open_trades"] = 2 backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - pair = 'UNITTEST/BTC' + pair = "UNITTEST/BTC" row = [ pd.Timestamp(year=2020, month=1, day=1, hour=4, minute=55, tzinfo=timezone.utc), 200, # Open @@ -641,12 +687,12 @@ def test_backtest__check_trade_exit(default_conf, fee, mocker) -> None: 0, # exit_long 0, # enter_short 0, # exit_hsort - '', # Long Signal Name - '', # Short Signal Name - '', # Exit Signal Name + "", # Long Signal Name + "", # Short Signal Name + "", # Exit Signal Name ] - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert isinstance(trade, LocalTrade) row_sell = [ @@ -659,10 +705,9 @@ def test_backtest__check_trade_exit(default_conf, fee, mocker) -> None: 0, # exit_long 0, # enter_short 0, # exit_short - '', # long Signal Name - '', # Short Signal Name - '', # Exit Signal Name - + "", # long Signal Name + "", # Short Signal Name + "", # Exit Signal Name ] # No data available. @@ -672,31 +717,45 @@ def test_backtest__check_trade_exit(default_conf, fee, mocker) -> None: assert res.close_date_utc == datetime(2020, 1, 1, 5, 0, tzinfo=timezone.utc) # Enter new trade - trade = backtesting._enter_trade(pair, row=row, direction='long') + trade = backtesting._enter_trade(pair, row=row, direction="long") assert isinstance(trade, LocalTrade) # Assign empty ... no result. backtesting.detail_data[pair] = pd.DataFrame( - [], columns=['date', 'open', 'high', 'low', 'close', 'enter_long', 'exit_long', - 'enter_short', 'exit_short', 'long_tag', 'short_tag', 'exit_tag']) + [], + columns=[ + "date", + "open", + "high", + "low", + "close", + "enter_long", + "exit_long", + "enter_short", + "exit_short", + "long_tag", + "short_tag", + "exit_tag", + ], + ) res = backtesting._check_trade_exit(trade, row, row[0].to_pydatetime()) assert res is None -def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: - default_conf['use_exit_signal'] = False - default_conf['max_open_trades'] = 10 +def test_backtest_one(default_conf, mocker, testdatadir) -> None: + default_conf["use_exit_signal"] = False + default_conf["max_open_trades"] = 10 - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) patch_exchange(mocker) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - pair = 'UNITTEST/BTC' - timerange = TimeRange('date', None, 1517227800, 0) - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=timerange) + pair = "UNITTEST/BTC" + timerange = TimeRange("date", None, 1517227800, 0) + data = history.load_data( + datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange + ) processed = backtesting.strategy.advise_all_indicators(data) backtesting.strategy.order_filled = MagicMock() min_date, max_date = get_timerange(processed) @@ -706,108 +765,131 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None: start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty assert len(results) == 2 expected = pd.DataFrame( - {'pair': [pair, pair], - 'stake_amount': [0.001, 0.001], - 'max_stake_amount': [0.001, 0.001], - 'amount': [0.00957442, 0.0097064], - 'open_date': pd.to_datetime([dt_utc(2018, 1, 29, 18, 40, 0), - dt_utc(2018, 1, 30, 3, 30, 0)], utc=True - ), - 'close_date': pd.to_datetime([dt_utc(2018, 1, 29, 22, 35, 0), - dt_utc(2018, 1, 30, 4, 10, 0)], utc=True), - 'open_rate': [0.104445, 0.10302485], - 'close_rate': [0.104969, 0.103541], - 'fee_open': [0.0025, 0.0025], - 'fee_close': [0.0025, 0.0025], - 'trade_duration': [235, 40], - 'profit_ratio': [0.0, 0.0], - 'profit_abs': [0.0, 0.0], - 'exit_reason': [ExitType.ROI.value, ExitType.ROI.value], - 'initial_stop_loss_abs': [0.0940005, 0.09272236], - 'initial_stop_loss_ratio': [-0.1, -0.1], - 'stop_loss_abs': [0.0940005, 0.09272236], - 'stop_loss_ratio': [-0.1, -0.1], - 'min_rate': [0.10370188, 0.10300000000000001], - 'max_rate': [0.10501, 0.1038888], - 'is_open': [False, False], - 'enter_tag': ['', ''], - "leverage": [1.0, 1.0], - "is_short": [False, False], - 'open_timestamp': [1517251200000, 1517283000000], - 'close_timestamp': [1517265300000, 1517285400000], - 'orders': [ - [ - {'amount': 0.00957442, 'safe_price': 0.104445, 'ft_order_side': 'buy', - 'order_filled_timestamp': 1517251200000, 'ft_is_entry': True, - 'ft_order_tag': ''}, - {'amount': 0.00957442, 'safe_price': 0.10496853383458644, 'ft_order_side': 'sell', - 'order_filled_timestamp': 1517265300000, 'ft_is_entry': False, - 'ft_order_tag': 'roi'} - ], [ - {'amount': 0.0097064, 'safe_price': 0.10302485, 'ft_order_side': 'buy', - 'order_filled_timestamp': 1517283000000, 'ft_is_entry': True, - 'ft_order_tag': ''}, - {'amount': 0.0097064, 'safe_price': 0.10354126528822055, 'ft_order_side': 'sell', - 'order_filled_timestamp': 1517285400000, 'ft_is_entry': False, - 'ft_order_tag': 'roi'} - ] - ] - }) + { + "pair": [pair, pair], + "stake_amount": [0.001, 0.001], + "max_stake_amount": [0.001, 0.001], + "amount": [0.00957442, 0.0097064], + "open_date": pd.to_datetime( + [dt_utc(2018, 1, 29, 18, 40, 0), dt_utc(2018, 1, 30, 3, 30, 0)], utc=True + ), + "close_date": pd.to_datetime( + [dt_utc(2018, 1, 29, 22, 35, 0), dt_utc(2018, 1, 30, 4, 10, 0)], utc=True + ), + "open_rate": [0.104445, 0.10302485], + "close_rate": [0.104969, 0.103541], + "fee_open": [0.0025, 0.0025], + "fee_close": [0.0025, 0.0025], + "trade_duration": [235, 40], + "profit_ratio": [0.0, 0.0], + "profit_abs": [0.0, 0.0], + "exit_reason": [ExitType.ROI.value, ExitType.ROI.value], + "initial_stop_loss_abs": [0.0940005, 0.09272236], + "initial_stop_loss_ratio": [-0.1, -0.1], + "stop_loss_abs": [0.0940005, 0.09272236], + "stop_loss_ratio": [-0.1, -0.1], + "min_rate": [0.10370188, 0.10300000000000001], + "max_rate": [0.10501, 0.1038888], + "is_open": [False, False], + "enter_tag": ["", ""], + "leverage": [1.0, 1.0], + "is_short": [False, False], + "open_timestamp": [1517251200000, 1517283000000], + "close_timestamp": [1517265300000, 1517285400000], + "orders": [ + [ + { + "amount": 0.00957442, + "safe_price": 0.104445, + "ft_order_side": "buy", + "order_filled_timestamp": 1517251200000, + "ft_is_entry": True, + "ft_order_tag": "", + }, + { + "amount": 0.00957442, + "safe_price": 0.10496853383458644, + "ft_order_side": "sell", + "order_filled_timestamp": 1517265300000, + "ft_is_entry": False, + "ft_order_tag": "roi", + }, + ], + [ + { + "amount": 0.0097064, + "safe_price": 0.10302485, + "ft_order_side": "buy", + "order_filled_timestamp": 1517283000000, + "ft_is_entry": True, + "ft_order_tag": "", + }, + { + "amount": 0.0097064, + "safe_price": 0.10354126528822055, + "ft_order_side": "sell", + "order_filled_timestamp": 1517285400000, + "ft_is_entry": False, + "ft_order_tag": "roi", + }, + ], + ], + } + ) pd.testing.assert_frame_equal(results, expected) - assert 'orders' in results.columns + assert "orders" in results.columns data_pair = processed[pair] # Called once per order assert backtesting.strategy.order_filled.call_count == 4 for _, t in results.iterrows(): - assert len(t['orders']) == 2 + assert len(t["orders"]) == 2 ln = data_pair.loc[data_pair["date"] == t["open_date"]] # Check open trade rate aligns to open rate assert not ln.empty assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) # check close trade rate aligns to close rate or is between high and low ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] - assert (round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or - round(ln1.iloc[0]["low"], 6) < round( - t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6)) + assert round(ln1.iloc[0]["open"], 6) == round(t["close_rate"], 6) or round( + ln1.iloc[0]["low"], 6 + ) < round(t["close_rate"], 6) < round(ln1.iloc[0]["high"], 6) -@pytest.mark.parametrize('use_detail', [True, False]) -def test_backtest_one_detail(default_conf_usdt, fee, mocker, testdatadir, use_detail) -> None: - default_conf_usdt['use_exit_signal'] = False - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - if use_detail: - default_conf_usdt['timeframe_detail'] = '1m' +@pytest.mark.parametrize("use_detail", [True, False]) +def test_backtest_one_detail(default_conf_usdt, mocker, testdatadir, use_detail) -> None: + default_conf_usdt["use_exit_signal"] = False patch_exchange(mocker) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + if use_detail: + default_conf_usdt["timeframe_detail"] = "1m" def advise_entry(df, *args, **kwargs): # Mock function to force several entries - df.loc[(df['rsi'] < 40), 'enter_long'] = 1 + df.loc[(df["rsi"] < 40), "enter_long"] = 1 return df def custom_entry_price(proposed_rate, **kwargs): return proposed_rate * 0.997 - default_conf_usdt['max_open_trades'] = 10 + default_conf_usdt["max_open_trades"] = 10 backtesting = Backtesting(default_conf_usdt) backtesting._set_strategy(backtesting.strategylist[0]) backtesting.strategy.populate_entry_trend = advise_entry backtesting.strategy.custom_entry_price = custom_entry_price - pair = 'XRP/ETH' + pair = "XRP/ETH" # Pick a timerange adapted to the pair we use to test - timerange = TimeRange.parse_timerange('20191010-20191013') - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=[pair], - timerange=timerange) + timerange = TimeRange.parse_timerange("20191010-20191013") + data = history.load_data(datadir=testdatadir, timeframe="5m", pairs=[pair], timerange=timerange) if use_detail: - data_1m = history.load_data(datadir=testdatadir, timeframe='1m', pairs=[pair], - timerange=timerange) + data_1m = history.load_data( + datadir=testdatadir, timeframe="1m", pairs=[pair], timerange=timerange + ) backtesting.detail_data = data_1m processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -817,33 +899,37 @@ def test_backtest_one_detail(default_conf_usdt, fee, mocker, testdatadir, use_de start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty # Timeout settings from default_conf = entry: 10, exit: 30 assert len(results) == (2 if use_detail else 3) - assert 'orders' in results.columns + assert "orders" in results.columns data_pair = processed[pair] data_1m_pair = data_1m[pair] if use_detail else pd.DataFrame() late_entry = 0 for _, t in results.iterrows(): - assert len(t['orders']) == 2 + assert len(t["orders"]) == 2 - entryo = t['orders'][0] - entry_ts = datetime.fromtimestamp(entryo['order_filled_timestamp'] // 1000, tz=timezone.utc) - if entry_ts > t['open_date']: + entryo = t["orders"][0] + entry_ts = datetime.fromtimestamp(entryo["order_filled_timestamp"] // 1000, tz=timezone.utc) + if entry_ts > t["open_date"]: late_entry += 1 # Get "entry fill" candle - ln = (data_1m_pair.loc[data_1m_pair["date"] == entry_ts] - if use_detail else data_pair.loc[data_pair["date"] == entry_ts]) + ln = ( + data_1m_pair.loc[data_1m_pair["date"] == entry_ts] + if use_detail + else data_pair.loc[data_pair["date"] == entry_ts] + ) # Check open trade rate aligns to open rate assert not ln.empty # assert round(ln.iloc[0]["open"], 6) == round(t["open_rate"], 6) - assert round(ln.iloc[0]["low"], 6) <= round( - t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + assert ( + round(ln.iloc[0]["low"], 6) <= round(t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + ) # check close trade rate aligns to close rate or is between high and low ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] if use_detail: @@ -853,57 +939,68 @@ def test_backtest_one_detail(default_conf_usdt, fee, mocker, testdatadir, use_de assert not ln1.empty ln2 = ln1_1m if ln1.empty else ln1 - assert (round(ln2.iloc[0]["low"], 6) <= round( - t["close_rate"], 6) <= round(ln2.iloc[0]["high"], 6)) + assert ( + round(ln2.iloc[0]["low"], 6) + <= round(t["close_rate"], 6) + <= round(ln2.iloc[0]["high"], 6) + ) assert late_entry > 0 -@pytest.mark.parametrize('use_detail,exp_funding_fee, exp_ff_updates', [ - (True, -0.018054162, 11), - (False, -0.01780296, 5), - ]) +@pytest.mark.parametrize( + "use_detail,exp_funding_fee, exp_ff_updates", + [ + (True, -0.018054162, 11), + (False, -0.01780296, 5), + ], +) def test_backtest_one_detail_futures( - default_conf_usdt, fee, mocker, testdatadir, use_detail, exp_funding_fee, - exp_ff_updates) -> None: - default_conf_usdt['use_exit_signal'] = False - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' - default_conf_usdt['candle_type_def'] = CandleType.FUTURES + default_conf_usdt, mocker, testdatadir, use_detail, exp_funding_fee, exp_ff_updates +) -> None: + default_conf_usdt["use_exit_signal"] = False + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" + default_conf_usdt["candle_type_def"] = CandleType.FUTURES - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/USDT:USDT'])) - mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", - return_value=(0.01, 0.01)) - default_conf_usdt['timeframe'] = '1h' - if use_detail: - default_conf_usdt['timeframe_detail'] = '5m' patch_exchange(mocker) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/USDT:USDT"]), + ) + mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0.01, 0.01)) + default_conf_usdt["timeframe"] = "1h" + if use_detail: + default_conf_usdt["timeframe_detail"] = "5m" def advise_entry(df, *args, **kwargs): # Mock function to force several entries - df.loc[(df['rsi'] < 40), 'enter_long'] = 1 + df.loc[(df["rsi"] < 40), "enter_long"] = 1 return df def custom_entry_price(proposed_rate, **kwargs): return proposed_rate * 0.997 - default_conf_usdt['max_open_trades'] = 10 + default_conf_usdt["max_open_trades"] = 10 backtesting = Backtesting(default_conf_usdt) - ff_spy = mocker.spy(backtesting.exchange, 'calculate_funding_fees') + ff_spy = mocker.spy(backtesting.exchange, "calculate_funding_fees") backtesting._set_strategy(backtesting.strategylist[0]) backtesting.strategy.populate_entry_trend = advise_entry backtesting.strategy.custom_entry_price = custom_entry_price - pair = 'XRP/USDT:USDT' + pair = "XRP/USDT:USDT" # Pick a timerange adapted to the pair we use to test - timerange = TimeRange.parse_timerange('20211117-20211119') - data = history.load_data(datadir=Path(testdatadir), timeframe='1h', pairs=[pair], - timerange=timerange, candle_type=CandleType.FUTURES) + timerange = TimeRange.parse_timerange("20211117-20211119") + data = history.load_data( + datadir=Path(testdatadir), + timeframe="1h", + pairs=[pair], + timerange=timerange, + candle_type=CandleType.FUTURES, + ) backtesting.load_bt_data_detail() processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -913,32 +1010,36 @@ def test_backtest_one_detail_futures( start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty # Timeout settings from default_conf = entry: 10, exit: 30 assert len(results) == (5 if use_detail else 2) - assert 'orders' in results.columns + assert "orders" in results.columns data_pair = processed[pair] data_1m_pair = backtesting.detail_data[pair] if use_detail else pd.DataFrame() late_entry = 0 for _, t in results.iterrows(): - assert len(t['orders']) == 2 + assert len(t["orders"]) == 2 - entryo = t['orders'][0] - entry_ts = datetime.fromtimestamp(entryo['order_filled_timestamp'] // 1000, tz=timezone.utc) - if entry_ts > t['open_date']: + entryo = t["orders"][0] + entry_ts = datetime.fromtimestamp(entryo["order_filled_timestamp"] // 1000, tz=timezone.utc) + if entry_ts > t["open_date"]: late_entry += 1 # Get "entry fill" candle - ln = (data_1m_pair.loc[data_1m_pair["date"] == entry_ts] - if use_detail else data_pair.loc[data_pair["date"] == entry_ts]) + ln = ( + data_1m_pair.loc[data_1m_pair["date"] == entry_ts] + if use_detail + else data_pair.loc[data_pair["date"] == entry_ts] + ) # Check open trade rate aligns to open rate assert not ln.empty - assert round(ln.iloc[0]["low"], 6) <= round( - t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + assert ( + round(ln.iloc[0]["low"], 6) <= round(t["open_rate"], 6) <= round(ln.iloc[0]["high"], 6) + ) # check close trade rate aligns to close rate or is between high and low ln1 = data_pair.loc[data_pair["date"] == t["close_date"]] if use_detail: @@ -948,67 +1049,86 @@ def test_backtest_one_detail_futures( assert not ln1.empty ln2 = ln1_1m if ln1.empty else ln1 - assert (round(ln2.iloc[0]["low"], 6) <= round( - t["close_rate"], 6) <= round(ln2.iloc[0]["high"], 6)) + assert ( + round(ln2.iloc[0]["low"], 6) + <= round(t["close_rate"], 6) + <= round(ln2.iloc[0]["high"], 6) + ) assert pytest.approx(Trade.trades[1].funding_fees) == exp_funding_fee assert ff_spy.call_count == exp_ff_updates # assert late_entry > 0 -@pytest.mark.parametrize('use_detail,entries,max_stake,ff_updates,expected_ff', [ - (True, 50, 3000, 54, -1.18038144), - (False, 6, 360, 10, -0.14679994), -]) +@pytest.mark.parametrize( + "use_detail,entries,max_stake,ff_updates,expected_ff", + [ + (True, 50, 3000, 54, -1.18038144), + (False, 6, 360, 10, -0.14679994), + ], +) def test_backtest_one_detail_futures_funding_fees( - default_conf_usdt, fee, mocker, testdatadir, use_detail, entries, max_stake, - ff_updates, expected_ff, + default_conf_usdt, + fee, + mocker, + testdatadir, + use_detail, + entries, + max_stake, + ff_updates, + expected_ff, ) -> None: """ Funding fees are expected to differ, as the maximum position size differs. """ - default_conf_usdt['use_exit_signal'] = False - default_conf_usdt['trading_mode'] = 'futures' - default_conf_usdt['margin_mode'] = 'isolated' - default_conf_usdt['candle_type_def'] = CandleType.FUTURES - default_conf_usdt['minimal_roi'] = {'0': 1} - default_conf_usdt['dry_run_wallet'] = 100000 + default_conf_usdt["use_exit_signal"] = False + default_conf_usdt["trading_mode"] = "futures" + default_conf_usdt["margin_mode"] = "isolated" + default_conf_usdt["candle_type_def"] = CandleType.FUTURES + default_conf_usdt["minimal_roi"] = {"0": 1} + default_conf_usdt["dry_run_wallet"] = 100000 - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/USDT:USDT'])) - mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", - return_value=(0.01, 0.01)) - default_conf_usdt['timeframe'] = '1h' + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/USDT:USDT"]), + ) + mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0.01, 0.01)) + default_conf_usdt["timeframe"] = "1h" if use_detail: - default_conf_usdt['timeframe_detail'] = '5m' + default_conf_usdt["timeframe_detail"] = "5m" patch_exchange(mocker) def advise_entry(df, *args, **kwargs): # Mock function to force several entries - df.loc[:, 'enter_long'] = 1 + df.loc[:, "enter_long"] = 1 return df def adjust_trade_position(trade, current_time, **kwargs): if current_time > datetime(2021, 11, 18, 2, 0, 0, tzinfo=timezone.utc): return None - return default_conf_usdt['stake_amount'] + return default_conf_usdt["stake_amount"] - default_conf_usdt['max_open_trades'] = 1 + default_conf_usdt["max_open_trades"] = 1 backtesting = Backtesting(default_conf_usdt) - ff_spy = mocker.spy(backtesting.exchange, 'calculate_funding_fees') + ff_spy = mocker.spy(backtesting.exchange, "calculate_funding_fees") backtesting._set_strategy(backtesting.strategylist[0]) backtesting.strategy.populate_entry_trend = advise_entry backtesting.strategy.adjust_trade_position = adjust_trade_position backtesting.strategy.leverage = lambda **kwargs: 1 backtesting.strategy.position_adjustment_enable = True - pair = 'XRP/USDT:USDT' + pair = "XRP/USDT:USDT" # Pick a timerange adapted to the pair we use to test - timerange = TimeRange.parse_timerange('20211117-20211119') - data = history.load_data(datadir=Path(testdatadir), timeframe='1h', pairs=[pair], - timerange=timerange, candle_type=CandleType.FUTURES) + timerange = TimeRange.parse_timerange("20211117-20211119") + data = history.load_data( + datadir=Path(testdatadir), + timeframe="1h", + pairs=[pair], + timerange=timerange, + candle_type=CandleType.FUTURES, + ) backtesting.load_bt_data_detail() processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -1018,12 +1138,12 @@ def test_backtest_one_detail_futures_funding_fees( start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty # Only one result - as we're not selling. assert len(results) == 1 - assert 'orders' in results.columns + assert "orders" in results.columns # funding_fees have been calculated for each funding-fee candle # the trade is open for 26 hours - hence we expect the 8h fee to apply 4 times. # Additional counts will happen due each successful entry, which needs to call this, too. @@ -1040,21 +1160,22 @@ def test_backtest_one_detail_futures_funding_fees( def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) -> None: # This strategy intentionally places unfillable orders. - default_conf['strategy'] = 'StrategyTestV3CustomEntryPrice' - default_conf['startup_candle_count'] = 0 + default_conf["strategy"] = "StrategyTestV3CustomEntryPrice" + default_conf["startup_candle_count"] = 0 # Cancel unfilled order after 4 minutes on 5m timeframe. default_conf["unfilledtimeout"] = {"entry": 4} - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) - default_conf['max_open_trades'] = 1 + default_conf["max_open_trades"] = 1 backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) # Testing dataframe contains 11 candles. Expecting 10 timed out orders. - timerange = TimeRange('date', 'date', 1517227800, 1517231100) - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=timerange) + timerange = TimeRange("date", "date", 1517227800, 1517231100) + data = history.load_data( + datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange + ) min_date, max_date = get_timerange(data) result = backtesting.backtest( @@ -1063,23 +1184,24 @@ def test_backtest_timedout_entry_orders(default_conf, fee, mocker, testdatadir) end_date=max_date, ) - assert result['timedout_entry_orders'] == 10 + assert result["timedout_entry_orders"] == 10 def test_backtest_1min_timeframe(default_conf, fee, mocker, testdatadir) -> None: - default_conf['use_exit_signal'] = False - default_conf['max_open_trades'] = 1 - mocker.patch(f'{EXMS}.get_fee', fee) + default_conf["use_exit_signal"] = False + default_conf["max_open_trades"] = 1 + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) # Run a backtesting for an exiting 1min timeframe - timerange = TimeRange.parse_timerange('1510688220-1510700340') - data = history.load_data(datadir=testdatadir, timeframe='1m', pairs=['UNITTEST/BTC'], - timerange=timerange) + timerange = TimeRange.parse_timerange("1510688220-1510700340") + data = history.load_data( + datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"], timerange=timerange + ) processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) results = backtesting.backtest( @@ -1087,30 +1209,31 @@ def test_backtest_1min_timeframe(default_conf, fee, mocker, testdatadir) -> None start_date=min_date, end_date=max_date, ) - assert not results['results'].empty - assert len(results['results']) == 1 + assert not results["results"].empty + assert len(results["results"]) == 1 def test_backtest_trim_no_data_left(default_conf, fee, mocker, testdatadir) -> None: - default_conf['use_exit_signal'] = False - default_conf['max_open_trades'] = 10 + default_conf["use_exit_signal"] = False + default_conf["max_open_trades"] = 10 - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - timerange = TimeRange('date', None, 1517227800, 0) + timerange = TimeRange("date", None, 1517227800, 0) backtesting.required_startup = 100 backtesting.timerange = timerange - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=timerange) - df = data['UNITTEST/BTC'] - df['date'] = df.loc[:, 'date'] - timedelta(days=1) + data = history.load_data( + datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange + ) + df = data["UNITTEST/BTC"] + df["date"] = df.loc[:, "date"] - timedelta(days=1) # Trimming 100 candles, so after 2nd trimming, no candle is left. df = df.iloc[:100] - data['XRP/USDT'] = df + data["XRP/USDT"] = df processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -1126,29 +1249,29 @@ def test_processed(default_conf, mocker, testdatadir) -> None: backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - dict_of_tickerrows = load_data_test('raise', testdatadir) + dict_of_tickerrows = load_data_test("raise", testdatadir) dataframes = backtesting.strategy.advise_all_indicators(dict_of_tickerrows) - dataframe = dataframes['UNITTEST/BTC'] + dataframe = dataframes["UNITTEST/BTC"] cols = dataframe.columns # assert the dataframe got some of the indicator columns - for col in ['close', 'high', 'low', 'open', 'date', - 'ema10', 'rsi', 'fastd', 'plus_di']: + for col in ["close", "high", "low", "open", "date", "ema10", "rsi", "fastd", "plus_di"]: assert col in cols def test_backtest_dataprovider_analyzed_df(default_conf, fee, mocker, testdatadir) -> None: - default_conf['use_exit_signal'] = False - default_conf['max_open_trades'] = 10 - default_conf['runmode'] = 'backtest' - mocker.patch(f'{EXMS}.get_fee', fee) + default_conf["use_exit_signal"] = False + default_conf["max_open_trades"] = 10 + default_conf["runmode"] = "backtest" + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=100000) patch_exchange(mocker) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - timerange = TimeRange('date', None, 1517227800, 0) - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=timerange) + timerange = TimeRange("date", None, 1517227800, 0) + data = history.load_data( + datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange + ) processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -1159,17 +1282,18 @@ def test_backtest_dataprovider_analyzed_df(default_conf, fee, mocker, testdatadi dp = backtesting.strategy.dp df, _ = dp.get_analyzed_dataframe(pair, backtesting.strategy.timeframe) current_candle = df.iloc[-1].squeeze() - assert current_candle['enter_long'] == 1 + assert current_candle["enter_long"] == 1 - candle_date = timeframe_to_next_date(backtesting.strategy.timeframe, current_candle['date']) + candle_date = timeframe_to_next_date(backtesting.strategy.timeframe, current_candle["date"]) assert candle_date == current_time # These asserts don't properly raise as they are nested, # therefore we increment count and assert for that. df = dp.get_pair_dataframe(pair, backtesting.strategy.timeframe) - prior_time = timeframe_to_prev_date(backtesting.strategy.timeframe, - candle_date - timedelta(seconds=1)) - assert prior_time == df.iloc[-1].squeeze()['date'] - assert df.iloc[-1].squeeze()['date'] < current_time + prior_time = timeframe_to_prev_date( + backtesting.strategy.timeframe, candle_date - timedelta(seconds=1) + ) + assert prior_time == df.iloc[-1].squeeze()["date"] + assert df.iloc[-1].squeeze()["date"] < current_time count += 1 @@ -1186,24 +1310,25 @@ def test_backtest_pricecontours_protections(default_conf, fee, mocker, testdatad # While this test IS a copy of test_backtest_pricecontours, it's needed to ensure # results do not carry-over to the next run, which is not given by using parametrize. patch_exchange(mocker) - default_conf['protections'] = [ + default_conf["protections"] = [ { "method": "CooldownPeriod", "stop_duration": 3, - }] + } + ] - default_conf['enable_protections'] = True - default_conf['timeframe'] = '1m' - default_conf['max_open_trades'] = 1 - mocker.patch(f'{EXMS}.get_fee', fee) + default_conf["enable_protections"] = True + default_conf["timeframe"] = "1m" + default_conf["max_open_trades"] = 1 + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) tests = [ - ['sine', 9], - ['raise', 10], - ['lower', 0], - ['sine', 9], - ['raise', 10], + ["sine", 9], + ["raise", 10], + ["lower", 0], + ["sine", 9], + ["raise", 10], ] backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) @@ -1222,35 +1347,38 @@ def test_backtest_pricecontours_protections(default_conf, fee, mocker, testdatad start_date=min_date, end_date=max_date, ) - assert len(results['results']) == numres + assert len(results["results"]) == numres -@pytest.mark.parametrize('protections,contour,expected', [ - (None, 'sine', 35), - (None, 'raise', 19), - (None, 'lower', 0), - (None, 'sine', 35), - (None, 'raise', 19), - ([{"method": "CooldownPeriod", "stop_duration": 3}], 'sine', 9), - ([{"method": "CooldownPeriod", "stop_duration": 3}], 'raise', 10), - ([{"method": "CooldownPeriod", "stop_duration": 3}], 'lower', 0), - ([{"method": "CooldownPeriod", "stop_duration": 3}], 'sine', 9), - ([{"method": "CooldownPeriod", "stop_duration": 3}], 'raise', 10), -]) -def test_backtest_pricecontours(default_conf, fee, mocker, testdatadir, - protections, contour, expected) -> None: +@pytest.mark.parametrize( + "protections,contour,expected", + [ + (None, "sine", 35), + (None, "raise", 19), + (None, "lower", 0), + (None, "sine", 35), + (None, "raise", 19), + ([{"method": "CooldownPeriod", "stop_duration": 3}], "sine", 9), + ([{"method": "CooldownPeriod", "stop_duration": 3}], "raise", 10), + ([{"method": "CooldownPeriod", "stop_duration": 3}], "lower", 0), + ([{"method": "CooldownPeriod", "stop_duration": 3}], "sine", 9), + ([{"method": "CooldownPeriod", "stop_duration": 3}], "raise", 10), + ], +) +def test_backtest_pricecontours( + default_conf, mocker, testdatadir, protections, contour, expected +) -> None: if protections: - default_conf['protections'] = protections - default_conf['enable_protections'] = True + default_conf["protections"] = protections + default_conf["enable_protections"] = True + patch_exchange(mocker) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) # While entry-signals are unrealistic, running backtesting # over and over again should not cause different results - patch_exchange(mocker) - default_conf['timeframe'] = '1m' + default_conf["timeframe"] = "1m" backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) @@ -1259,13 +1387,13 @@ def test_backtest_pricecontours(default_conf, fee, mocker, testdatadir, min_date, max_date = get_timerange(processed) assert isinstance(processed, dict) backtesting.strategy.max_open_trades = 1 - backtesting.config.update({'max_open_trades': 1}) + backtesting.config.update({"max_open_trades": 1}) results = backtesting.backtest( processed=processed, start_date=min_date, end_date=max_date, ) - assert len(results['results']) == expected + assert len(results["results"]) == expected def test_backtest_clash_buy_sell(mocker, default_conf, testdatadir): @@ -1274,14 +1402,15 @@ def test_backtest_clash_buy_sell(mocker, default_conf, testdatadir): buy_value = 1 sell_value = 1 return _trend(dataframe, buy_value, sell_value) - default_conf['max_open_trades'] = 10 + + default_conf["max_open_trades"] = 10 backtest_conf = _make_backtest_conf(mocker, conf=default_conf, datadir=testdatadir) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) backtesting.strategy.advise_entry = fun # Override backtesting.strategy.advise_exit = fun # Override result = backtesting.backtest(**backtest_conf) - assert result['results'].empty + assert result["results"].empty def test_backtest_only_sell(mocker, default_conf, testdatadir): @@ -1291,25 +1420,26 @@ def test_backtest_only_sell(mocker, default_conf, testdatadir): sell_value = 1 return _trend(dataframe, buy_value, sell_value) - default_conf['max_open_trades'] = 10 + default_conf["max_open_trades"] = 10 backtest_conf = _make_backtest_conf(mocker, conf=default_conf, datadir=testdatadir) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) backtesting.strategy.advise_entry = fun # Override backtesting.strategy.advise_exit = fun # Override result = backtesting.backtest(**backtest_conf) - assert result['results'].empty + assert result["results"].empty def test_backtest_alternate_buy_sell(default_conf, fee, mocker, testdatadir): mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - mocker.patch(f'{EXMS}.get_fee', fee) - default_conf['max_open_trades'] = 10 - default_conf['runmode'] = 'backtest' - backtest_conf = _make_backtest_conf(mocker, conf=default_conf, - pair='UNITTEST/BTC', datadir=testdatadir) - default_conf['timeframe'] = '1m' + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + mocker.patch(f"{EXMS}.get_fee", fee) + default_conf["max_open_trades"] = 10 + default_conf["runmode"] = "backtest" + backtest_conf = _make_backtest_conf( + mocker, conf=default_conf, pair="UNITTEST/BTC", datadir=testdatadir + ) + default_conf["timeframe"] = "1m" backtesting = Backtesting(default_conf) backtesting.required_startup = 0 backtesting._set_strategy(backtesting.strategylist[0]) @@ -1319,54 +1449,53 @@ def test_backtest_alternate_buy_sell(default_conf, fee, mocker, testdatadir): # 200 candles in backtest data # won't buy on first (shifted by 1) # 100 buys signals - results = result['results'] + results = result["results"] assert len(results) == 100 # Cached data should be 200 - analyzed_df = backtesting.dataprovider.get_analyzed_dataframe('UNITTEST/BTC', '1m')[0] + analyzed_df = backtesting.dataprovider.get_analyzed_dataframe("UNITTEST/BTC", "1m")[0] assert len(analyzed_df) == 200 # Expect last candle to be 1 below end date (as the last candle is assumed as "incomplete" # during backtesting) - expected_last_candle_date = backtest_conf['end_date'] - timedelta(minutes=1) - assert analyzed_df.iloc[-1]['date'].to_pydatetime() == expected_last_candle_date + expected_last_candle_date = backtest_conf["end_date"] - timedelta(minutes=1) + assert analyzed_df.iloc[-1]["date"].to_pydatetime() == expected_last_candle_date # One trade was force-closed at the end - assert len(results.loc[results['is_open']]) == 0 + assert len(results.loc[results["is_open"]]) == 0 -@pytest.mark.parametrize("pair", ['ADA/BTC', 'LTC/BTC']) +@pytest.mark.parametrize("pair", ["ADA/BTC", "LTC/BTC"]) @pytest.mark.parametrize("tres", [0, 20, 30]) def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir): - def _trend_alternate_hold(dataframe=None, metadata=None): """ Buy every xth candle - sell every other xth -2 (hold on to pairs a bit) """ - if metadata['pair'] in ('ETH/BTC', 'LTC/BTC'): + if metadata["pair"] in ("ETH/BTC", "LTC/BTC"): multi = 20 else: multi = 18 - dataframe['enter_long'] = np.where(dataframe.index % multi == 0, 1, 0) - dataframe['exit_long'] = np.where((dataframe.index + multi - 2) % multi == 0, 1, 0) - dataframe['enter_short'] = 0 - dataframe['exit_short'] = 0 + dataframe["enter_long"] = np.where(dataframe.index % multi == 0, 1, 0) + dataframe["exit_long"] = np.where((dataframe.index + multi - 2) % multi == 0, 1, 0) + dataframe["enter_short"] = 0 + dataframe["exit_short"] = 0 return dataframe - default_conf['runmode'] = 'backtest' + default_conf["runmode"] = "backtest" mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + mocker.patch(f"{EXMS}.get_fee", fee) patch_exchange(mocker) - pairs = ['ADA/BTC', 'DASH/BTC', 'ETH/BTC', 'LTC/BTC', 'NXT/BTC'] - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=pairs) + pairs = ["ADA/BTC", "DASH/BTC", "ETH/BTC", "LTC/BTC", "NXT/BTC"] + data = history.load_data(datadir=testdatadir, timeframe="5m", pairs=pairs) # Only use 500 lines to increase performance data = trim_dictlist(data, -500) # Remove data for one pair from the beginning of the data if tres > 0: data[pair] = data[pair][tres:].reset_index() - default_conf['timeframe'] = '5m' - default_conf['max_open_trades'] = 3 + default_conf["timeframe"] = "5m" + default_conf["max_open_trades"] = 3 backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) @@ -1377,70 +1506,75 @@ def test_backtest_multi_pair(default_conf, fee, mocker, tres, pair, testdatadir) min_date, max_date = get_timerange(processed) backtest_conf = { - 'processed': deepcopy(processed), - 'start_date': min_date, - 'end_date': max_date, + "processed": deepcopy(processed), + "start_date": min_date, + "end_date": max_date, } results = backtesting.backtest(**backtest_conf) # Make sure we have parallel trades - assert len(evaluate_result_multi(results['results'], '5m', 2)) > 0 + assert len(evaluate_result_multi(results["results"], "5m", 2)) > 0 # make sure we don't have trades with more than configured max_open_trades - assert len(evaluate_result_multi(results['results'], '5m', 3)) == 0 + assert len(evaluate_result_multi(results["results"], "5m", 3)) == 0 # Cached data correctly removed amounts offset = 1 if tres == 0 else 0 removed_candles = len(data[pair]) - offset - assert len(backtesting.dataprovider.get_analyzed_dataframe(pair, '5m')[0]) == removed_candles - assert len( - backtesting.dataprovider.get_analyzed_dataframe('NXT/BTC', '5m')[0] - ) == len(data['NXT/BTC']) - 1 + assert len(backtesting.dataprovider.get_analyzed_dataframe(pair, "5m")[0]) == removed_candles + assert ( + len(backtesting.dataprovider.get_analyzed_dataframe("NXT/BTC", "5m")[0]) + == len(data["NXT/BTC"]) - 1 + ) backtesting.strategy.max_open_trades = 1 - backtesting.config.update({'max_open_trades': 1}) + backtesting.config.update({"max_open_trades": 1}) backtest_conf = { - 'processed': deepcopy(processed), - 'start_date': min_date, - 'end_date': max_date, + "processed": deepcopy(processed), + "start_date": min_date, + "end_date": max_date, } results = backtesting.backtest(**backtest_conf) - assert len(evaluate_result_multi(results['results'], '5m', 1)) == 0 + assert len(evaluate_result_multi(results["results"], "5m", 1)) == 0 def test_backtest_start_timerange(default_conf, mocker, caplog, testdatadir): - patch_exchange(mocker) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest') - mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats') - mocker.patch('freqtrade.optimize.backtesting.show_backtest_results') - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest") + mocker.patch("freqtrade.optimize.backtesting.generate_backtest_stats") + mocker.patch("freqtrade.optimize.backtesting.show_backtest_results") + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--datadir', str(testdatadir), - '--timeframe', '1m', - '--timerange', '1510694220-1510700340', - '--enable-position-stacking', - '--disable-max-market-positions' + "backtesting", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--datadir", + str(testdatadir), + "--timeframe", + "1m", + "--timerange", + "1510694220-1510700340", + "--enable-position-stacking", + "--disable-max-market-positions", ] args = get_args(args) start_backtesting(args) # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...', - 'Parameter --timerange detected: 1510694220-1510700340 ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2017-11-14 20:57:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Backtesting with data from 2017-11-14 21:17:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Parameter --enable-position-stacking detected ...' + "Parameter -i/--timeframe detected ... Using timeframe: 1m ...", + "Ignoring max_open_trades (--disable-max-market-positions was used) ...", + "Parameter --timerange detected: 1510694220-1510700340 ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).", + "Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).", + "Parameter --enable-position-stacking detected ...", ] for line in exists: @@ -1449,58 +1583,70 @@ def test_backtest_start_timerange(default_conf, mocker, caplog, testdatadir): @pytest.mark.filterwarnings("ignore:deprecated") def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir): - - default_conf.update({ - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - }) + default_conf.update( + { + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, + } + ) patch_exchange(mocker) - backtestmock = MagicMock(return_value={ - 'results': pd.DataFrame(columns=BT_DATA_COLUMNS), - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, - }) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) + backtestmock = MagicMock( + return_value={ + "results": pd.DataFrame(columns=BT_DATA_COLUMNS), + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + } + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) text_table_mock = MagicMock() tag_metrics_mock = MagicMock() strattable_mock = MagicMock() strat_summary = MagicMock() - mocker.patch.multiple('freqtrade.optimize.optimize_reports.bt_output', - text_table_bt_results=text_table_mock, - text_table_strategy=strattable_mock, - ) - mocker.patch.multiple('freqtrade.optimize.optimize_reports.optimize_reports', - generate_pair_metrics=MagicMock(), - generate_tag_metrics=tag_metrics_mock, - generate_strategy_comparison=strat_summary, - generate_daily_stats=MagicMock(), - ) + mocker.patch.multiple( + "freqtrade.optimize.optimize_reports.bt_output", + text_table_bt_results=text_table_mock, + text_table_strategy=strattable_mock, + ) + mocker.patch.multiple( + "freqtrade.optimize.optimize_reports.optimize_reports", + generate_pair_metrics=MagicMock(), + generate_tag_metrics=tag_metrics_mock, + generate_strategy_comparison=strat_summary, + generate_daily_stats=MagicMock(), + ) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1m', - '--timerange', '1510694220-1510700340', - '--enable-position-stacking', - '--disable-max-market-positions', - '--strategy-list', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1m", + "--timerange", + "1510694220-1510700340", + "--enable-position-stacking", + "--disable-max-market-positions", + "--strategy-list", CURRENT_TEST_STRATEGY, - 'StrategyTestV2', + "StrategyTestV2", ] args = get_args(args) start_backtesting(args) @@ -1513,17 +1659,15 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir): # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...', - 'Parameter --timerange detected: 1510694220-1510700340 ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2017-11-14 20:57:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Backtesting with data from 2017-11-14 21:17:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Parameter --enable-position-stacking detected ...', - f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}', - 'Running backtesting for Strategy StrategyTestV2', + "Parameter -i/--timeframe detected ... Using timeframe: 1m ...", + "Ignoring max_open_trades (--disable-max-market-positions was used) ...", + "Parameter --timerange detected: 1510694220-1510700340 ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).", + "Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).", + "Parameter --enable-position-stacking detected ...", + f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}", + "Running backtesting for Strategy StrategyTestV2", ] for line in exists: @@ -1531,151 +1675,181 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir): def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdatadir, capsys): - default_conf.update({ - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - }) - patch_exchange(mocker) - result1 = pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC'], - 'profit_ratio': [0.0, 0.0], - 'profit_abs': [0.0, 0.0], - 'open_date': pd.to_datetime(['2018-01-29 18:40:00', - '2018-01-30 03:30:00', ], utc=True - ), - 'close_date': pd.to_datetime(['2018-01-29 20:45:00', - '2018-01-30 05:35:00', ], utc=True), - 'trade_duration': [235, 40], - 'is_open': [False, False], - 'stake_amount': [0.01, 0.01], - 'open_rate': [0.104445, 0.10302485], - 'close_rate': [0.104969, 0.103541], - "is_short": [False, False], - - 'exit_reason': [ExitType.ROI, ExitType.ROI] - }) - result2 = pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC', 'ETH/BTC'], - 'profit_ratio': [0.03, 0.01, 0.1], - 'profit_abs': [0.01, 0.02, 0.2], - 'open_date': pd.to_datetime(['2018-01-29 18:40:00', - '2018-01-30 03:30:00', - '2018-01-30 05:30:00'], utc=True - ), - 'close_date': pd.to_datetime(['2018-01-29 20:45:00', - '2018-01-30 05:35:00', - '2018-01-30 08:30:00'], utc=True), - 'trade_duration': [47, 40, 20], - 'is_open': [False, False, False], - 'stake_amount': [0.01, 0.01, 0.01], - 'open_rate': [0.104445, 0.10302485, 0.122541], - 'close_rate': [0.104969, 0.103541, 0.123541], - "is_short": [False, False, False], - 'exit_reason': [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS] - }) - backtestmock = MagicMock(side_effect=[ + default_conf.update( { - 'results': result1, - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, - }, - { - 'results': result2, - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, } - ]) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) + ) + patch_exchange(mocker) + result1 = pd.DataFrame( + { + "pair": ["XRP/BTC", "LTC/BTC"], + "profit_ratio": [0.0, 0.0], + "profit_abs": [0.0, 0.0], + "open_date": pd.to_datetime( + [ + "2018-01-29 18:40:00", + "2018-01-30 03:30:00", + ], + utc=True, + ), + "close_date": pd.to_datetime( + [ + "2018-01-29 20:45:00", + "2018-01-30 05:35:00", + ], + utc=True, + ), + "trade_duration": [235, 40], + "is_open": [False, False], + "stake_amount": [0.01, 0.01], + "open_rate": [0.104445, 0.10302485], + "close_rate": [0.104969, 0.103541], + "is_short": [False, False], + "exit_reason": [ExitType.ROI, ExitType.ROI], + } + ) + result2 = pd.DataFrame( + { + "pair": ["XRP/BTC", "LTC/BTC", "ETH/BTC"], + "profit_ratio": [0.03, 0.01, 0.1], + "profit_abs": [0.01, 0.02, 0.2], + "open_date": pd.to_datetime( + ["2018-01-29 18:40:00", "2018-01-30 03:30:00", "2018-01-30 05:30:00"], utc=True + ), + "close_date": pd.to_datetime( + ["2018-01-29 20:45:00", "2018-01-30 05:35:00", "2018-01-30 08:30:00"], utc=True + ), + "trade_duration": [47, 40, 20], + "is_open": [False, False, False], + "stake_amount": [0.01, 0.01, 0.01], + "open_rate": [0.104445, 0.10302485, 0.122541], + "close_rate": [0.104969, 0.103541, 0.123541], + "is_short": [False, False, False], + "exit_reason": [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS], + } + ) + backtestmock = MagicMock( + side_effect=[ + { + "results": result1, + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + { + "results": result2, + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + ] + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1m', - '--timerange', '1510694220-1510700340', - '--enable-position-stacking', - '--disable-max-market-positions', - '--breakdown', 'day', - '--strategy-list', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1m", + "--timerange", + "1510694220-1510700340", + "--enable-position-stacking", + "--disable-max-market-positions", + "--breakdown", + "day", + "--strategy-list", CURRENT_TEST_STRATEGY, - 'StrategyTestV2', + "StrategyTestV2", ] args = get_args(args) start_backtesting(args) # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...', - 'Parameter --timerange detected: 1510694220-1510700340 ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2017-11-14 20:57:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Backtesting with data from 2017-11-14 21:17:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Parameter --enable-position-stacking detected ...', - f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}', - 'Running backtesting for Strategy StrategyTestV2', + "Parameter -i/--timeframe detected ... Using timeframe: 1m ...", + "Ignoring max_open_trades (--disable-max-market-positions was used) ...", + "Parameter --timerange detected: 1510694220-1510700340 ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).", + "Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).", + "Parameter --enable-position-stacking detected ...", + f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}", + "Running backtesting for Strategy StrategyTestV2", ] for line in exists: assert log_has(line, caplog) captured = capsys.readouterr() - assert 'BACKTESTING REPORT' in captured.out - assert 'EXIT REASON STATS' in captured.out - assert 'DAY BREAKDOWN' in captured.out - assert 'LEFT OPEN TRADES REPORT' in captured.out - assert '2017-11-14 21:17:00 -> 2017-11-14 22:59:00 | Max open trades : 1' in captured.out - assert 'STRATEGY SUMMARY' in captured.out + assert "BACKTESTING REPORT" in captured.out + assert "EXIT REASON STATS" in captured.out + assert "DAY BREAKDOWN" in captured.out + assert "LEFT OPEN TRADES REPORT" in captured.out + assert "2017-11-14 21:17:00 -> 2017-11-14 22:59:00 | Max open trades : 1" in captured.out + assert "STRATEGY SUMMARY" in captured.out @pytest.mark.filterwarnings("ignore:deprecated") -def test_backtest_start_futures_noliq(default_conf_usdt, mocker, - caplog, testdatadir, capsys): +def test_backtest_start_futures_noliq(default_conf_usdt, mocker, caplog, testdatadir, capsys): # Tests detail-data loading - default_conf_usdt.update({ - "trading_mode": "futures", - "margin_mode": "isolated", - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - "strategy": CURRENT_TEST_STRATEGY, - }) + default_conf_usdt.update( + { + "trading_mode": "futures", + "margin_mode": "isolated", + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, + "strategy": CURRENT_TEST_STRATEGY, + } + ) patch_exchange(mocker) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['HULUMULU/USDT', 'XRP/USDT:USDT'])) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["HULUMULU/USDT", "XRP/USDT:USDT"]), + ) # mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) patched_configuration_load_config_file(mocker, default_conf_usdt) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1h', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1h", ] args = get_args(args) with pytest.raises(OperationalException, match=r"Pairs .* got no leverage tiers available\."): @@ -1683,343 +1857,410 @@ def test_backtest_start_futures_noliq(default_conf_usdt, mocker, @pytest.mark.filterwarnings("ignore:deprecated") -def test_backtest_start_nomock_futures(default_conf_usdt, mocker, - caplog, testdatadir, capsys): +def test_backtest_start_nomock_futures(default_conf_usdt, mocker, caplog, testdatadir, capsys): # Tests detail-data loading - default_conf_usdt.update({ - "trading_mode": "futures", - "margin_mode": "isolated", - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - "strategy": CURRENT_TEST_STRATEGY, - }) - patch_exchange(mocker) - result1 = pd.DataFrame({'pair': ['XRP/USDT:USDT', 'XRP/USDT:USDT'], - 'profit_ratio': [0.0, 0.0], - 'profit_abs': [0.0, 0.0], - 'open_date': pd.to_datetime(['2021-11-18 18:00:00', - '2021-11-18 03:00:00', ], utc=True - ), - 'close_date': pd.to_datetime(['2021-11-18 20:00:00', - '2021-11-18 05:00:00', ], utc=True), - 'trade_duration': [235, 40], - 'is_open': [False, False], - 'is_short': [False, False], - 'stake_amount': [0.01, 0.01], - 'open_rate': [0.104445, 0.10302485], - 'close_rate': [0.104969, 0.103541], - 'exit_reason': [ExitType.ROI, ExitType.ROI] - }) - result2 = pd.DataFrame({'pair': ['XRP/USDT:USDT', 'XRP/USDT:USDT', 'XRP/USDT:USDT'], - 'profit_ratio': [0.03, 0.01, 0.1], - 'profit_abs': [0.01, 0.02, 0.2], - 'open_date': pd.to_datetime(['2021-11-19 18:00:00', - '2021-11-19 03:00:00', - '2021-11-19 05:00:00'], utc=True - ), - 'close_date': pd.to_datetime(['2021-11-19 20:00:00', - '2021-11-19 05:00:00', - '2021-11-19 08:00:00'], utc=True), - 'trade_duration': [47, 40, 20], - 'is_open': [False, False, False], - 'is_short': [False, False, False], - 'stake_amount': [0.01, 0.01, 0.01], - 'open_rate': [0.104445, 0.10302485, 0.122541], - 'close_rate': [0.104969, 0.103541, 0.123541], - 'exit_reason': [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS] - }) - backtestmock = MagicMock(side_effect=[ + default_conf_usdt.update( { - 'results': result1, - 'config': default_conf_usdt, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, - }, - { - 'results': result2, - 'config': default_conf_usdt, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, + "trading_mode": "futures", + "margin_mode": "isolated", + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, + "strategy": CURRENT_TEST_STRATEGY, } - ]) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/USDT:USDT'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) + ) + patch_exchange(mocker) + result1 = pd.DataFrame( + { + "pair": ["XRP/USDT:USDT", "XRP/USDT:USDT"], + "profit_ratio": [0.0, 0.0], + "profit_abs": [0.0, 0.0], + "open_date": pd.to_datetime( + [ + "2021-11-18 18:00:00", + "2021-11-18 03:00:00", + ], + utc=True, + ), + "close_date": pd.to_datetime( + [ + "2021-11-18 20:00:00", + "2021-11-18 05:00:00", + ], + utc=True, + ), + "trade_duration": [235, 40], + "is_open": [False, False], + "is_short": [False, False], + "stake_amount": [0.01, 0.01], + "open_rate": [0.104445, 0.10302485], + "close_rate": [0.104969, 0.103541], + "exit_reason": [ExitType.ROI, ExitType.ROI], + } + ) + result2 = pd.DataFrame( + { + "pair": ["XRP/USDT:USDT", "XRP/USDT:USDT", "XRP/USDT:USDT"], + "profit_ratio": [0.03, 0.01, 0.1], + "profit_abs": [0.01, 0.02, 0.2], + "open_date": pd.to_datetime( + ["2021-11-19 18:00:00", "2021-11-19 03:00:00", "2021-11-19 05:00:00"], utc=True + ), + "close_date": pd.to_datetime( + ["2021-11-19 20:00:00", "2021-11-19 05:00:00", "2021-11-19 08:00:00"], utc=True + ), + "trade_duration": [47, 40, 20], + "is_open": [False, False, False], + "is_short": [False, False, False], + "stake_amount": [0.01, 0.01, 0.01], + "open_rate": [0.104445, 0.10302485, 0.122541], + "close_rate": [0.104969, 0.103541, 0.123541], + "exit_reason": [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS], + } + ) + backtestmock = MagicMock( + side_effect=[ + { + "results": result1, + "config": default_conf_usdt, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + { + "results": result2, + "config": default_conf_usdt, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + ] + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/USDT:USDT"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) patched_configuration_load_config_file(mocker, default_conf_usdt) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1h', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1h", ] args = get_args(args) start_backtesting(args) # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 1h ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2021-11-17 01:00:00 ' - 'up to 2021-11-21 04:00:00 (4 days).', - 'Backtesting with data from 2021-11-17 21:00:00 ' - 'up to 2021-11-21 04:00:00 (3 days).', - 'XRP/USDT:USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00', - 'XRP/USDT:USDT, mark, 8h, data starts at 2021-11-18 00:00:00', - f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}', + "Parameter -i/--timeframe detected ... Using timeframe: 1h ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2021-11-17 01:00:00 up to 2021-11-21 04:00:00 (4 days).", + "Backtesting with data from 2021-11-17 21:00:00 up to 2021-11-21 04:00:00 (3 days).", + "XRP/USDT:USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00", + "XRP/USDT:USDT, mark, 8h, data starts at 2021-11-18 00:00:00", + f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}", ] for line in exists: assert log_has(line, caplog) captured = capsys.readouterr() - assert 'BACKTESTING REPORT' in captured.out - assert 'EXIT REASON STATS' in captured.out - assert 'LEFT OPEN TRADES REPORT' in captured.out + assert "BACKTESTING REPORT" in captured.out + assert "EXIT REASON STATS" in captured.out + assert "LEFT OPEN TRADES REPORT" in captured.out @pytest.mark.filterwarnings("ignore:deprecated") -def test_backtest_start_multi_strat_nomock_detail(default_conf, mocker, - caplog, testdatadir, capsys): +def test_backtest_start_multi_strat_nomock_detail( + default_conf, mocker, caplog, testdatadir, capsys +): # Tests detail-data loading - default_conf.update({ - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - }) - patch_exchange(mocker) - result1 = pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC'], - 'profit_ratio': [0.0, 0.0], - 'profit_abs': [0.0, 0.0], - 'open_date': pd.to_datetime(['2018-01-29 18:40:00', - '2018-01-30 03:30:00', ], utc=True - ), - 'close_date': pd.to_datetime(['2018-01-29 20:45:00', - '2018-01-30 05:35:00', ], utc=True), - 'trade_duration': [235, 40], - 'is_open': [False, False], - 'is_short': [False, False], - 'stake_amount': [0.01, 0.01], - 'open_rate': [0.104445, 0.10302485], - 'close_rate': [0.104969, 0.103541], - 'exit_reason': [ExitType.ROI, ExitType.ROI] - }) - result2 = pd.DataFrame({'pair': ['XRP/BTC', 'LTC/BTC', 'ETH/BTC'], - 'profit_ratio': [0.03, 0.01, 0.1], - 'profit_abs': [0.01, 0.02, 0.2], - 'open_date': pd.to_datetime(['2018-01-29 18:40:00', - '2018-01-30 03:30:00', - '2018-01-30 05:30:00'], utc=True - ), - 'close_date': pd.to_datetime(['2018-01-29 20:45:00', - '2018-01-30 05:35:00', - '2018-01-30 08:30:00'], utc=True), - 'trade_duration': [47, 40, 20], - 'is_open': [False, False, False], - 'is_short': [False, False, False], - 'stake_amount': [0.01, 0.01, 0.01], - 'open_rate': [0.104445, 0.10302485, 0.122541], - 'close_rate': [0.104969, 0.103541, 0.123541], - 'exit_reason': [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS] - }) - backtestmock = MagicMock(side_effect=[ + default_conf.update( { - 'results': result1, - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, - }, - { - 'results': result2, - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, } - ]) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['XRP/ETH'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) + ) + patch_exchange(mocker) + result1 = pd.DataFrame( + { + "pair": ["XRP/BTC", "LTC/BTC"], + "profit_ratio": [0.0, 0.0], + "profit_abs": [0.0, 0.0], + "open_date": pd.to_datetime( + [ + "2018-01-29 18:40:00", + "2018-01-30 03:30:00", + ], + utc=True, + ), + "close_date": pd.to_datetime( + [ + "2018-01-29 20:45:00", + "2018-01-30 05:35:00", + ], + utc=True, + ), + "trade_duration": [235, 40], + "is_open": [False, False], + "is_short": [False, False], + "stake_amount": [0.01, 0.01], + "open_rate": [0.104445, 0.10302485], + "close_rate": [0.104969, 0.103541], + "exit_reason": [ExitType.ROI, ExitType.ROI], + } + ) + result2 = pd.DataFrame( + { + "pair": ["XRP/BTC", "LTC/BTC", "ETH/BTC"], + "profit_ratio": [0.03, 0.01, 0.1], + "profit_abs": [0.01, 0.02, 0.2], + "open_date": pd.to_datetime( + ["2018-01-29 18:40:00", "2018-01-30 03:30:00", "2018-01-30 05:30:00"], utc=True + ), + "close_date": pd.to_datetime( + ["2018-01-29 20:45:00", "2018-01-30 05:35:00", "2018-01-30 08:30:00"], utc=True + ), + "trade_duration": [47, 40, 20], + "is_open": [False, False, False], + "is_short": [False, False, False], + "stake_amount": [0.01, 0.01, 0.01], + "open_rate": [0.104445, 0.10302485, 0.122541], + "close_rate": [0.104969, 0.103541, 0.123541], + "exit_reason": [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS], + } + ) + backtestmock = MagicMock( + side_effect=[ + { + "results": result1, + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + { + "results": result2, + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + }, + ] + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["XRP/ETH"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '5m', - '--timeframe-detail', '1m', - '--strategy-list', - CURRENT_TEST_STRATEGY + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "5m", + "--timeframe-detail", + "1m", + "--strategy-list", + CURRENT_TEST_STRATEGY, ] args = get_args(args) start_backtesting(args) # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 5m ...', - 'Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2019-10-11 00:00:00 ' - 'up to 2019-10-13 11:15:00 (2 days).', - 'Backtesting with data from 2019-10-11 01:40:00 ' - 'up to 2019-10-13 11:15:00 (2 days).', - f'Running backtesting for Strategy {CURRENT_TEST_STRATEGY}', + "Parameter -i/--timeframe detected ... Using timeframe: 5m ...", + "Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2019-10-11 00:00:00 up to 2019-10-13 11:15:00 (2 days).", + "Backtesting with data from 2019-10-11 01:40:00 up to 2019-10-13 11:15:00 (2 days).", + f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}", ] for line in exists: assert log_has(line, caplog) captured = capsys.readouterr() - assert 'BACKTESTING REPORT' in captured.out - assert 'EXIT REASON STATS' in captured.out - assert 'LEFT OPEN TRADES REPORT' in captured.out + assert "BACKTESTING REPORT" in captured.out + assert "EXIT REASON STATS" in captured.out + assert "LEFT OPEN TRADES REPORT" in captured.out @pytest.mark.filterwarnings("ignore:deprecated") -@pytest.mark.parametrize('run_id', ['2', 'changed']) -@pytest.mark.parametrize('start_delta', [{'days': 0}, {'days': 1}, {'weeks': 1}, {'weeks': 4}]) -@pytest.mark.parametrize('cache', constants.BACKTEST_CACHE_AGE) -def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testdatadir, run_id, - start_delta, cache): - default_conf.update({ - "use_exit_signal": True, - "exit_profit_only": False, - "exit_profit_offset": 0.0, - "ignore_roi_if_entry_signal": False, - }) +@pytest.mark.parametrize("run_id", ["2", "changed"]) +@pytest.mark.parametrize("start_delta", [{"days": 0}, {"days": 1}, {"weeks": 1}, {"weeks": 4}]) +@pytest.mark.parametrize("cache", constants.BACKTEST_CACHE_AGE) +def test_backtest_start_multi_strat_caching( + default_conf, mocker, caplog, testdatadir, run_id, start_delta, cache +): + default_conf.update( + { + "use_exit_signal": True, + "exit_profit_only": False, + "exit_profit_offset": 0.0, + "ignore_roi_if_entry_signal": False, + } + ) patch_exchange(mocker) - backtestmock = MagicMock(return_value={ - 'results': pd.DataFrame(columns=BT_DATA_COLUMNS), - 'config': default_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, - }) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock) - mocker.patch('freqtrade.optimize.backtesting.show_backtest_results', MagicMock()) + backtestmock = MagicMock( + return_value={ + "results": pd.DataFrame(columns=BT_DATA_COLUMNS), + "config": default_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, + } + ) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock) + mocker.patch("freqtrade.optimize.backtesting.show_backtest_results", MagicMock()) now = min_backtest_date = datetime.now(tz=timezone.utc) start_time = now - timedelta(**start_delta) + timedelta(hours=1) - if cache == 'none': + if cache == "none": min_backtest_date = now + timedelta(days=1) - elif cache == 'day': + elif cache == "day": min_backtest_date = now - timedelta(days=1) - elif cache == 'week': + elif cache == "week": min_backtest_date = now - timedelta(weeks=1) - elif cache == 'month': + elif cache == "month": min_backtest_date = now - timedelta(weeks=4) - load_backtest_metadata = MagicMock(return_value={ - 'StrategyTestV2': {'run_id': '1', 'backtest_start_time': now.timestamp()}, - 'StrategyTestV3': {'run_id': run_id, 'backtest_start_time': start_time.timestamp()} - }) - load_backtest_stats = MagicMock(side_effect=[ - { - 'metadata': {'StrategyTestV2': {'run_id': '1'}}, - 'strategy': {'StrategyTestV2': {}}, - 'strategy_comparison': [{'key': 'StrategyTestV2'}] - }, - { - 'metadata': {'StrategyTestV3': {'run_id': '2'}}, - 'strategy': {'StrategyTestV3': {}}, - 'strategy_comparison': [{'key': 'StrategyTestV3'}] + load_backtest_metadata = MagicMock( + return_value={ + "StrategyTestV2": {"run_id": "1", "backtest_start_time": now.timestamp()}, + "StrategyTestV3": {"run_id": run_id, "backtest_start_time": start_time.timestamp()}, } - ]) - mocker.patch('pathlib.Path.glob', return_value=[ - Path(datetime.strftime(datetime.now(), 'backtest-result-%Y-%m-%d_%H-%M-%S.json'))]) - mocker.patch.multiple('freqtrade.data.btanalysis', - load_backtest_metadata=load_backtest_metadata, - load_backtest_stats=load_backtest_stats) - mocker.patch('freqtrade.optimize.backtesting.get_strategy_run_id', side_effect=['1', '2', '2']) + ) + load_backtest_stats = MagicMock( + side_effect=[ + { + "metadata": {"StrategyTestV2": {"run_id": "1"}}, + "strategy": {"StrategyTestV2": {}}, + "strategy_comparison": [{"key": "StrategyTestV2"}], + }, + { + "metadata": {"StrategyTestV3": {"run_id": "2"}}, + "strategy": {"StrategyTestV3": {}}, + "strategy_comparison": [{"key": "StrategyTestV3"}], + }, + ] + ) + mocker.patch( + "pathlib.Path.glob", + return_value=[ + Path(datetime.strftime(datetime.now(), "backtest-result-%Y-%m-%d_%H-%M-%S.json")) + ], + ) + mocker.patch.multiple( + "freqtrade.data.btanalysis", + load_backtest_metadata=load_backtest_metadata, + load_backtest_stats=load_backtest_stats, + ) + mocker.patch("freqtrade.optimize.backtesting.get_strategy_run_id", side_effect=["1", "2", "2"]) patched_configuration_load_config_file(mocker, default_conf) args = [ - 'backtesting', - '--config', 'config.json', - '--datadir', str(testdatadir), - '--strategy-path', str(Path(__file__).parents[1] / 'strategy/strats'), - '--timeframe', '1m', - '--timerange', '1510694220-1510700340', - '--enable-position-stacking', - '--disable-max-market-positions', - '--cache', cache, - '--strategy-list', - 'StrategyTestV2', - 'StrategyTestV3', + "backtesting", + "--config", + "config.json", + "--datadir", + str(testdatadir), + "--strategy-path", + str(Path(__file__).parents[1] / "strategy/strats"), + "--timeframe", + "1m", + "--timerange", + "1510694220-1510700340", + "--enable-position-stacking", + "--disable-max-market-positions", + "--cache", + cache, + "--strategy-list", + "StrategyTestV2", + "StrategyTestV3", ] args = get_args(args) start_backtesting(args) # check the logs, that will contain the backtest result exists = [ - 'Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - 'Parameter --timerange detected: 1510694220-1510700340 ...', - f'Using data directory: {testdatadir} ...', - 'Loading data from 2017-11-14 20:57:00 ' - 'up to 2017-11-14 22:59:00 (0 days).', - 'Parameter --enable-position-stacking detected ...', + "Parameter -i/--timeframe detected ... Using timeframe: 1m ...", + "Parameter --timerange detected: 1510694220-1510700340 ...", + f"Using data directory: {testdatadir} ...", + "Loading data from 2017-11-14 20:57:00 " "up to 2017-11-14 22:59:00 (0 days).", + "Parameter --enable-position-stacking detected ...", ] for line in exists: assert log_has(line, caplog) - if cache == 'none': + if cache == "none": assert backtestmock.call_count == 2 exists = [ - 'Running backtesting for Strategy StrategyTestV2', - 'Running backtesting for Strategy StrategyTestV3', - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...', - 'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).', + "Running backtesting for Strategy StrategyTestV2", + "Running backtesting for Strategy StrategyTestV3", + "Ignoring max_open_trades (--disable-max-market-positions was used) ...", + "Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).", ] - elif run_id == '2' and min_backtest_date < start_time: + elif run_id == "2" and min_backtest_date < start_time: assert backtestmock.call_count == 0 exists = [ - 'Reusing result of previous backtest for StrategyTestV2', - 'Reusing result of previous backtest for StrategyTestV3', + "Reusing result of previous backtest for StrategyTestV2", + "Reusing result of previous backtest for StrategyTestV3", ] else: exists = [ - 'Reusing result of previous backtest for StrategyTestV2', - 'Running backtesting for Strategy StrategyTestV3', - 'Ignoring max_open_trades (--disable-max-market-positions was used) ...', - 'Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).', + "Reusing result of previous backtest for StrategyTestV2", + "Running backtesting for Strategy StrategyTestV3", + "Ignoring max_open_trades (--disable-max-market-positions was used) ...", + "Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).", ] assert backtestmock.call_count == 1 @@ -2028,10 +2269,7 @@ def test_backtest_start_multi_strat_caching(default_conf, mocker, caplog, testda def test_get_strategy_run_id(default_conf_usdt): - default_conf_usdt.update({ - 'strategy': 'StrategyTestV2', - 'max_open_trades': float('inf') - }) + default_conf_usdt.update({"strategy": "StrategyTestV2", "max_open_trades": float("inf")}) strategy = StrategyResolver.load_strategy(default_conf_usdt) x = get_strategy_run_id(strategy) assert isinstance(x, str) @@ -2039,36 +2277,36 @@ def test_get_strategy_run_id(default_conf_usdt): def test_get_backtest_metadata_filename(): # Test with a file path - filename = Path('backtest_results.json') - expected = Path('backtest_results.meta.json') + filename = Path("backtest_results.json") + expected = Path("backtest_results.meta.json") assert get_backtest_metadata_filename(filename) == expected # Test with a file path with multiple dots in the name - filename = Path('/path/to/backtest.results.json') - expected = Path('/path/to/backtest.results.meta.json') + filename = Path("/path/to/backtest.results.json") + expected = Path("/path/to/backtest.results.meta.json") assert get_backtest_metadata_filename(filename) == expected # Test with a file path with no parent directory - filename = Path('backtest_results.json') - expected = Path('backtest_results.meta.json') + filename = Path("backtest_results.json") + expected = Path("backtest_results.meta.json") assert get_backtest_metadata_filename(filename) == expected # Test with a string file path - filename = '/path/to/backtest_results.json' - expected = Path('/path/to/backtest_results.meta.json') + filename = "/path/to/backtest_results.json" + expected = Path("/path/to/backtest_results.meta.json") assert get_backtest_metadata_filename(filename) == expected # Test with a string file path with no extension - filename = '/path/to/backtest_results' - expected = Path('/path/to/backtest_results.meta') + filename = "/path/to/backtest_results" + expected = Path("/path/to/backtest_results.meta") assert get_backtest_metadata_filename(filename) == expected # Test with a string file path with multiple dots in the name - filename = '/path/to/backtest.results.json' - expected = Path('/path/to/backtest.results.meta.json') + filename = "/path/to/backtest.results.json" + expected = Path("/path/to/backtest.results.meta.json") assert get_backtest_metadata_filename(filename) == expected # Test with a string file path with no parent directory - filename = 'backtest_results.json' - expected = Path('backtest_results.meta.json') + filename = "backtest_results.json" + expected = Path("backtest_results.meta.json") assert get_backtest_metadata_filename(filename) == expected diff --git a/tests/optimize/test_backtesting_adjust_position.py b/tests/optimize/test_backtesting_adjust_position.py index 983e4b47f..64df6537b 100644 --- a/tests/optimize/test_backtesting_adjust_position.py +++ b/tests/optimize/test_backtesting_adjust_position.py @@ -16,25 +16,26 @@ from tests.conftest import EXMS, patch_exchange def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) -> None: - default_conf['use_exit_signal'] = False - default_conf['max_open_trades'] = 10 - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch('freqtrade.optimize.backtesting.amount_to_contract_precision', - lambda x, *args, **kwargs: round(x, 8)) + default_conf["use_exit_signal"] = False + default_conf["max_open_trades"] = 10 + mocker.patch(f"{EXMS}.get_fee", fee) + mocker.patch( + "freqtrade.optimize.backtesting.amount_to_contract_precision", + lambda x, *args, **kwargs: round(x, 8), + ) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) - default_conf.update({ - "stake_amount": 100.0, - "dry_run_wallet": 1000.0, - "strategy": "StrategyTestV3" - }) + default_conf.update( + {"stake_amount": 100.0, "dry_run_wallet": 1000.0, "strategy": "StrategyTestV3"} + ) backtesting = Backtesting(default_conf) backtesting._set_strategy(backtesting.strategylist[0]) - pair = 'UNITTEST/BTC' - timerange = TimeRange('date', None, 1517227800, 0) - data = history.load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'], - timerange=timerange) + pair = "UNITTEST/BTC" + timerange = TimeRange("date", None, 1517227800, 0) + data = history.load_data( + datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange + ) backtesting.strategy.position_adjustment_enable = True processed = backtesting.strategy.advise_all_indicators(data) min_date, max_date = get_timerange(processed) @@ -43,47 +44,50 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) -> start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty assert len(results) == 2 expected = pd.DataFrame( - {'pair': [pair, pair], - 'stake_amount': [500.0, 100.0], - 'max_stake_amount': [500.0, 100], - 'amount': [4806.87657523, 970.63960782], - 'open_date': pd.to_datetime([dt_utc(2018, 1, 29, 18, 40, 0), - dt_utc(2018, 1, 30, 3, 30, 0)], utc=True - ), - 'close_date': pd.to_datetime([dt_utc(2018, 1, 29, 22, 00, 0), - dt_utc(2018, 1, 30, 4, 10, 0)], utc=True), - 'open_rate': [0.10401764891917063, 0.10302485], - 'close_rate': [0.10453904064307624, 0.10354126528822055], - 'fee_open': [0.0025, 0.0025], - 'fee_close': [0.0025, 0.0025], - 'trade_duration': [200, 40], - 'profit_ratio': [0.0, 0.0], - 'profit_abs': [0.0, 0.0], - 'exit_reason': [ExitType.ROI.value, ExitType.ROI.value], - 'initial_stop_loss_abs': [0.0940005, 0.092722365], - 'initial_stop_loss_ratio': [-0.1, -0.1], - 'stop_loss_abs': [0.0940005, 0.092722365], - 'stop_loss_ratio': [-0.1, -0.1], - 'min_rate': [0.10370188, 0.10300000000000001], - 'max_rate': [0.10481985, 0.10388887000000001], - 'is_open': [False, False], - 'enter_tag': ['', ''], - 'leverage': [1.0, 1.0], - 'is_short': [False, False], - 'open_timestamp': [1517251200000, 1517283000000], - 'close_timestamp': [1517263200000, 1517285400000], - }) - results_no = results.drop(columns=['orders']) + { + "pair": [pair, pair], + "stake_amount": [500.0, 100.0], + "max_stake_amount": [500.0, 100], + "amount": [4806.87657523, 970.63960782], + "open_date": pd.to_datetime( + [dt_utc(2018, 1, 29, 18, 40, 0), dt_utc(2018, 1, 30, 3, 30, 0)], utc=True + ), + "close_date": pd.to_datetime( + [dt_utc(2018, 1, 29, 22, 00, 0), dt_utc(2018, 1, 30, 4, 10, 0)], utc=True + ), + "open_rate": [0.10401764891917063, 0.10302485], + "close_rate": [0.10453904064307624, 0.10354126528822055], + "fee_open": [0.0025, 0.0025], + "fee_close": [0.0025, 0.0025], + "trade_duration": [200, 40], + "profit_ratio": [0.0, 0.0], + "profit_abs": [0.0, 0.0], + "exit_reason": [ExitType.ROI.value, ExitType.ROI.value], + "initial_stop_loss_abs": [0.0940005, 0.092722365], + "initial_stop_loss_ratio": [-0.1, -0.1], + "stop_loss_abs": [0.0940005, 0.092722365], + "stop_loss_ratio": [-0.1, -0.1], + "min_rate": [0.10370188, 0.10300000000000001], + "max_rate": [0.10481985, 0.10388887000000001], + "is_open": [False, False], + "enter_tag": ["", ""], + "leverage": [1.0, 1.0], + "is_short": [False, False], + "open_timestamp": [1517251200000, 1517283000000], + "close_timestamp": [1517263200000, 1517285400000], + } + ) + results_no = results.drop(columns=["orders"]) pd.testing.assert_frame_equal(results_no, expected, check_exact=True) data_pair = processed[pair] - assert len(results.iloc[0]['orders']) == 6 - assert len(results.iloc[1]['orders']) == 2 + assert len(results.iloc[0]["orders"]) == 6 + assert len(results.iloc[1]["orders"]) == 2 for _, t in results.iterrows(): ln = data_pair.loc[data_pair["date"] == t["open_date"]] @@ -91,65 +95,65 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) -> assert ln is not None # check close trade rate aligns to close rate or is between high and low ln = data_pair.loc[data_pair["date"] == t["close_date"]] - assert (round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or - round(ln.iloc[0]["low"], 6) < round( - t["close_rate"], 6) < round(ln.iloc[0]["high"], 6)) + assert round(ln.iloc[0]["open"], 6) == round(t["close_rate"], 6) or round( + ln.iloc[0]["low"], 6 + ) < round(t["close_rate"], 6) < round(ln.iloc[0]["high"], 6) -@pytest.mark.parametrize('leverage', [ - 1, 2 -]) +@pytest.mark.parametrize("leverage", [1, 2]) def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, leverage) -> None: - default_conf['use_exit_signal'] = False - mocker.patch(f'{EXMS}.get_fee', fee) + default_conf["use_exit_signal"] = False + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=10) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) mocker.patch(f"{EXMS}.get_max_leverage", return_value=10) mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0.1, 0.1)) - mocker.patch('freqtrade.optimize.backtesting.Backtesting._run_funding_fees') + mocker.patch("freqtrade.optimize.backtesting.Backtesting._run_funding_fees") patch_exchange(mocker) - default_conf.update({ - "stake_amount": 100.0, - "dry_run_wallet": 1000.0, - "strategy": "StrategyTestV3", - "trading_mode": "futures", - "margin_mode": "isolated", - }) - default_conf['pairlists'] = [{'method': 'StaticPairList', 'allow_inactive': True}] + default_conf.update( + { + "stake_amount": 100.0, + "dry_run_wallet": 1000.0, + "strategy": "StrategyTestV3", + "trading_mode": "futures", + "margin_mode": "isolated", + } + ) + default_conf["pairlists"] = [{"method": "StaticPairList", "allow_inactive": True}] backtesting = Backtesting(default_conf) backtesting._can_short = True backtesting._set_strategy(backtesting.strategylist[0]) - pair = 'XRP/USDT:USDT' + pair = "XRP/USDT:USDT" row_enter = [ - pd.Timestamp(year=2020, month=1, day=1, hour=4, minute=0), - 2.1, # Open - 2.2, # High - 1.9, # Low - 2.1, # Close - 1, # enter_long - 0, # exit_long - 0, # enter_short - 0, # exit_short - '', # enter_tag - '', # exit_tag - ] + pd.Timestamp(year=2020, month=1, day=1, hour=4, minute=0), + 2.1, # Open + 2.2, # High + 1.9, # Low + 2.1, # Close + 1, # enter_long + 0, # exit_long + 0, # enter_short + 0, # exit_short + "", # enter_tag + "", # exit_tag + ] # Exit row - with slightly different values row_exit = [ - pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0), - 2.2, # Open - 2.3, # High - 2.0, # Low - 2.2, # Close - 1, # enter_long - 0, # exit_long - 0, # enter_short - 0, # exit_short - '', # enter_tag - '', # exit_tag - ] + pd.Timestamp(year=2020, month=1, day=1, hour=5, minute=0), + 2.2, # Open + 2.3, # High + 2.0, # Low + 2.2, # Close + 1, # enter_long + 0, # exit_long + 0, # enter_short + 0, # exit_short + "", # enter_tag + "", # exit_tag + ] backtesting.strategy.leverage = MagicMock(return_value=leverage) - trade = backtesting._enter_trade(pair, row=row_enter, direction='long') + trade = backtesting._enter_trade(pair, row=row_enter, direction="long") current_time = row_enter[0].to_pydatetime() assert trade assert pytest.approx(trade.stake_amount) == 100.0 @@ -164,7 +168,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, levera assert pytest.approx(trade.amount) == 47.61904762 * leverage assert len(trade.orders) == 1 # Increase position by 100 - backtesting.strategy.adjust_trade_position = MagicMock(return_value=(100, 'PartIncrease')) + backtesting.strategy.adjust_trade_position = MagicMock(return_value=(100, "PartIncrease")) trade = backtesting._get_adjust_trade_entry_for_candle(trade, row_enter, current_time) @@ -173,7 +177,7 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, levera assert pytest.approx(trade.stake_amount) == 200.0 assert pytest.approx(trade.amount) == 95.23809524 * leverage assert len(trade.orders) == 2 - assert trade.orders[-1].ft_order_tag == 'PartIncrease' + assert trade.orders[-1].ft_order_tag == "PartIncrease" assert pytest.approx(trade.liquidation_price) == liq_price # Reduce by more than amount - no change to trade. @@ -190,14 +194,14 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, levera assert pytest.approx(trade.liquidation_price) == liq_price # Reduce position by 50 - backtesting.strategy.adjust_trade_position = MagicMock(return_value=(-100, 'partDecrease')) + backtesting.strategy.adjust_trade_position = MagicMock(return_value=(-100, "partDecrease")) trade = backtesting._get_adjust_trade_entry_for_candle(trade, row_exit, current_time) assert trade assert pytest.approx(trade.stake_amount) == 100.0 assert pytest.approx(trade.amount) == 47.61904762 * leverage assert len(trade.orders) == 3 - assert trade.orders[-1].ft_order_tag == 'partDecrease' + assert trade.orders[-1].ft_order_tag == "partDecrease" assert trade.nr_of_successful_entries == 2 assert trade.nr_of_successful_exits == 1 assert pytest.approx(trade.liquidation_price) == liq_price @@ -213,3 +217,8 @@ def test_backtest_position_adjustment_detailed(default_conf, fee, mocker, levera assert trade.nr_of_successful_entries == 2 assert trade.nr_of_successful_exits == 1 assert pytest.approx(trade.liquidation_price) == liq_price + + # Adjust to close trade + backtesting.strategy.adjust_trade_position = MagicMock(return_value=-trade.stake_amount) + trade = backtesting._get_adjust_trade_entry_for_candle(trade, row_exit, current_time) + assert trade.is_open is False diff --git a/tests/optimize/test_edge_cli.py b/tests/optimize/test_edge_cli.py index 64172bf1c..3f515bebe 100644 --- a/tests/optimize/test_edge_cli.py +++ b/tests/optimize/test_edge_cli.py @@ -6,114 +6,126 @@ from unittest.mock import MagicMock from freqtrade.commands.optimize_commands import setup_optimize_configuration, start_edge from freqtrade.enums import RunMode from freqtrade.optimize.edge_cli import EdgeCli -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, get_args, log_has, patch_exchange, - patched_configuration_load_config_file) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + get_args, + log_has, + patch_exchange, + patched_configuration_load_config_file, +) def test_setup_optimize_configuration_without_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) args = [ - 'edge', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, + "edge", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, ] config = setup_optimize_configuration(get_args(args), RunMode.EDGE) - assert config['runmode'] == RunMode.EDGE + assert config["runmode"] == RunMode.EDGE - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config - assert 'timerange' not in config - assert 'stoploss_range' not in config + assert "timerange" not in config + assert "stoploss_range" not in config def test_setup_edge_configuration_with_arguments(mocker, edge_conf, caplog) -> None: patched_configuration_load_config_file(mocker, edge_conf) - mocker.patch( - 'freqtrade.configuration.configuration.create_datadir', - lambda c, x: x - ) + mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x) args = [ - 'edge', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--datadir', '/foo/bar', - '--timeframe', '1m', - '--timerange', ':100', - '--stoplosses=-0.01,-0.10,-0.001' + "edge", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--datadir", + "/foo/bar", + "--timeframe", + "1m", + "--timerange", + ":100", + "--stoplosses=-0.01,-0.10,-0.001", ] config = setup_optimize_configuration(get_args(args), RunMode.EDGE) - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert config['runmode'] == RunMode.EDGE - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config - assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - caplog) + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert config["runmode"] == RunMode.EDGE + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config + assert log_has("Parameter -i/--timeframe detected ... Using timeframe: 1m ...", caplog) - assert 'timerange' in config - assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog) + assert "timerange" in config + assert log_has("Parameter --timerange detected: {} ...".format(config["timerange"]), caplog) def test_start(mocker, fee, edge_conf, caplog) -> None: start_mock = MagicMock() - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) patch_exchange(mocker) - mocker.patch('freqtrade.optimize.edge_cli.EdgeCli.start', start_mock) + mocker.patch("freqtrade.optimize.edge_cli.EdgeCli.start", start_mock) patched_configuration_load_config_file(mocker, edge_conf) args = [ - 'edge', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, + "edge", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, ] pargs = get_args(args) start_edge(pargs) - assert log_has('Starting freqtrade in Edge mode', caplog) + assert log_has("Starting freqtrade in Edge mode", caplog) assert start_mock.call_count == 1 def test_edge_init(mocker, edge_conf) -> None: patch_exchange(mocker) - edge_conf['stake_amount'] = 20 + edge_conf["stake_amount"] = 20 edge_cli = EdgeCli(edge_conf) assert edge_cli.config == edge_conf - assert edge_cli.config['stake_amount'] == 'unlimited' + assert edge_cli.config["stake_amount"] == "unlimited" assert callable(edge_cli.edge.calculate) assert edge_cli.strategy.bot_started is True def test_edge_init_fee(mocker, edge_conf) -> None: patch_exchange(mocker) - edge_conf['fee'] = 0.1234 - edge_conf['stake_amount'] = 20 - fee_mock = mocker.patch(f'{EXMS}.get_fee', return_value=0.5) + edge_conf["fee"] = 0.01234 + edge_conf["stake_amount"] = 20 + fee_mock = mocker.patch(f"{EXMS}.get_fee", return_value=0.5) edge_cli = EdgeCli(edge_conf) - assert edge_cli.edge.fee == 0.1234 + assert edge_cli.edge.fee == 0.01234 assert fee_mock.call_count == 0 def test_edge_start(mocker, edge_conf) -> None: - mock_calculate = mocker.patch('freqtrade.edge.edge_positioning.Edge.calculate', - return_value=True) - table_mock = mocker.patch('freqtrade.optimize.edge_cli.generate_edge_table') + mock_calculate = mocker.patch( + "freqtrade.edge.edge_positioning.Edge.calculate", return_value=True + ) + table_mock = mocker.patch("freqtrade.optimize.edge_cli.generate_edge_table") patch_exchange(mocker) - edge_conf['stake_amount'] = 20 + edge_conf["stake_amount"] = 20 edge_cli = EdgeCli(edge_conf) edge_cli.start() diff --git a/tests/optimize/test_hyperopt.py b/tests/optimize/test_hyperopt.py index a68a0fc39..4d2f9ae36 100644 --- a/tests/optimize/test_hyperopt.py +++ b/tests/optimize/test_hyperopt.py @@ -20,31 +20,39 @@ from freqtrade.optimize.optimize_reports import generate_strategy_stats from freqtrade.optimize.space import SKDecimal from freqtrade.strategy import IntParameter from freqtrade.util import dt_utc -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, get_args, get_markets, log_has, log_has_re, - patch_exchange, patched_configuration_load_config_file) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + get_args, + get_markets, + log_has, + log_has_re, + patch_exchange, + patched_configuration_load_config_file, +) def generate_result_metrics(): return { - 'trade_count': 1, - 'total_trades': 1, - 'avg_profit': 0.1, - 'total_profit': 0.001, - 'profit': 0.01, - 'duration': 20.0, - 'wins': 1, - 'draws': 0, - 'losses': 0, - 'profit_mean': 0.01, - 'profit_total_abs': 0.001, - 'profit_total': 0.01, - 'holding_avg': timedelta(minutes=20), - 'max_drawdown': 0.001, - 'max_drawdown_abs': 0.001, - 'loss': 0.001, - 'is_initial_point': 0.001, - 'is_random': False, - 'is_best': 1, + "trade_count": 1, + "total_trades": 1, + "avg_profit": 0.1, + "total_profit": 0.001, + "profit": 0.01, + "duration": 20.0, + "wins": 1, + "draws": 0, + "losses": 0, + "profit_mean": 0.01, + "profit_total_abs": 0.001, + "profit_total": 0.01, + "holding_avg": timedelta(minutes=20), + "max_drawdown_account": 0.001, + "max_drawdown_abs": 0.001, + "loss": 0.001, + "is_initial_point": 0.001, + "is_random": False, + "is_best": 1, } @@ -52,104 +60,117 @@ def test_setup_hyperopt_configuration_without_arguments(mocker, default_conf, ca patched_configuration_load_config_file(mocker, default_conf) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config - assert 'position_stacking' not in config - assert not log_has('Parameter --enable-position-stacking detected ...', caplog) + assert "position_stacking" not in config + assert not log_has("Parameter --enable-position-stacking detected ...", caplog) - assert 'timerange' not in config - assert 'runmode' in config - assert config['runmode'] == RunMode.HYPEROPT + assert "timerange" not in config + assert "runmode" in config + assert config["runmode"] == RunMode.HYPEROPT def test_setup_hyperopt_configuration_with_arguments(mocker, default_conf, caplog) -> None: patched_configuration_load_config_file(mocker, default_conf) - mocker.patch( - 'freqtrade.configuration.configuration.create_datadir', - lambda c, x: x - ) + mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', - '--datadir', '/foo/bar', - '--timeframe', '1m', - '--timerange', ':100', - '--enable-position-stacking', - '--disable-max-market-positions', - '--epochs', '1000', - '--spaces', 'default', - '--print-all' + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", + "--datadir", + "/foo/bar", + "--timeframe", + "1m", + "--timerange", + ":100", + "--enable-position-stacking", + "--disable-max-market-positions", + "--epochs", + "1000", + "--spaces", + "default", + "--print-all", ] config = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) - assert 'max_open_trades' in config - assert 'stake_currency' in config - assert 'stake_amount' in config - assert 'exchange' in config - assert 'pair_whitelist' in config['exchange'] - assert 'datadir' in config - assert config['runmode'] == RunMode.HYPEROPT + assert "max_open_trades" in config + assert "stake_currency" in config + assert "stake_amount" in config + assert "exchange" in config + assert "pair_whitelist" in config["exchange"] + assert "datadir" in config + assert config["runmode"] == RunMode.HYPEROPT - assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog) - assert 'timeframe' in config - assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...', - caplog) + assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog) + assert "timeframe" in config + assert log_has("Parameter -i/--timeframe detected ... Using timeframe: 1m ...", caplog) - assert 'position_stacking' in config - assert log_has('Parameter --enable-position-stacking detected ...', caplog) + assert "position_stacking" in config + assert log_has("Parameter --enable-position-stacking detected ...", caplog) - assert 'use_max_market_positions' in config - assert log_has('Parameter --disable-max-market-positions detected ...', caplog) - assert log_has('max_open_trades set to unlimited ...', caplog) + assert "use_max_market_positions" in config + assert log_has("Parameter --disable-max-market-positions detected ...", caplog) + assert log_has("max_open_trades set to unlimited ...", caplog) - assert 'timerange' in config - assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog) + assert "timerange" in config + assert log_has("Parameter --timerange detected: {} ...".format(config["timerange"]), caplog) - assert 'epochs' in config - assert log_has('Parameter --epochs detected ... Will run Hyperopt with for 1000 epochs ...', - caplog) + assert "epochs" in config + assert log_has( + "Parameter --epochs detected ... Will run Hyperopt with for 1000 epochs ...", caplog + ) - assert 'spaces' in config - assert log_has('Parameter -s/--spaces detected: {}'.format(config['spaces']), caplog) - assert 'print_all' in config - assert log_has('Parameter --print-all detected ...', caplog) + assert "spaces" in config + assert log_has("Parameter -s/--spaces detected: {}".format(config["spaces"]), caplog) + assert "print_all" in config + assert log_has("Parameter --print-all detected ...", caplog) def test_setup_hyperopt_configuration_stake_amount(mocker, default_conf) -> None: - patched_configuration_load_config_file(mocker, default_conf) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', - '--stake-amount', '1', - '--starting-balance', '2' + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", + "--stake-amount", + "1", + "--starting-balance", + "2", ] conf = setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) assert isinstance(conf, dict) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', CURRENT_TEST_STRATEGY, - '--stake-amount', '1', - '--starting-balance', '0.5' + "hyperopt", + "--config", + "config.json", + "--strategy", + CURRENT_TEST_STRATEGY, + "--stake-amount", + "1", + "--starting-balance", + "0.5", ] with pytest.raises(OperationalException, match=r"Starting balance .* smaller .*"): setup_optimize_configuration(get_args(args), RunMode.HYPEROPT) @@ -159,15 +180,19 @@ def test_start_not_installed(mocker, default_conf, import_fails) -> None: start_mock = MagicMock() patched_configuration_load_config_file(mocker, default_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) + mocker.patch("freqtrade.optimize.hyperopt.Hyperopt.start", start_mock) patch_exchange(mocker) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', - '--epochs', '5', - '--hyperopt-loss', 'SharpeHyperOptLossDaily', + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", + "--epochs", + "5", + "--hyperopt-loss", + "SharpeHyperOptLossDaily", ] pargs = get_args(args) @@ -178,15 +203,19 @@ def test_start_not_installed(mocker, default_conf, import_fails) -> None: def test_start_no_hyperopt_allowed(mocker, hyperopt_conf, caplog) -> None: start_mock = MagicMock() patched_configuration_load_config_file(mocker, hyperopt_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.start', start_mock) + mocker.patch("freqtrade.optimize.hyperopt.Hyperopt.start", start_mock) patch_exchange(mocker) args = [ - 'hyperopt', - '--config', 'config.json', - '--hyperopt', 'HyperoptTestSepFile', - '--hyperopt-loss', 'SharpeHyperOptLossDaily', - '--epochs', '5' + "hyperopt", + "--config", + "config.json", + "--hyperopt", + "HyperoptTestSepFile", + "--hyperopt-loss", + "SharpeHyperOptLossDaily", + "--epochs", + "5", ] pargs = get_args(args) with pytest.raises(OperationalException, match=r"Using separate Hyperopt files has been.*"): @@ -194,24 +223,28 @@ def test_start_no_hyperopt_allowed(mocker, hyperopt_conf, caplog) -> None: def test_start_no_data(mocker, hyperopt_conf, tmp_path) -> None: - hyperopt_conf['user_data_dir'] = tmp_path + hyperopt_conf["user_data_dir"] = tmp_path patched_configuration_load_config_file(mocker, hyperopt_conf) - mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame)) + mocker.patch("freqtrade.data.history.load_pair_history", MagicMock(return_value=pd.DataFrame)) mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) patch_exchange(mocker) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', - '--hyperopt-loss', 'SharpeHyperOptLossDaily', - '--epochs', '5' + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", + "--hyperopt-loss", + "SharpeHyperOptLossDaily", + "--epochs", + "5", ] pargs = get_args(args) - with pytest.raises(OperationalException, match='No data found. Terminating.'): + with pytest.raises(OperationalException, match="No data found. Terminating."): start_hyperopt(pargs) # Cleanup since that failed hyperopt start leaves a lockfile. @@ -224,15 +257,19 @@ def test_start_no_data(mocker, hyperopt_conf, tmp_path) -> None: def test_start_filelock(mocker, hyperopt_conf, caplog) -> None: hyperopt_mock = MagicMock(side_effect=Timeout(Hyperopt.get_lock_filename(hyperopt_conf))) patched_configuration_load_config_file(mocker, hyperopt_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.__init__', hyperopt_mock) + mocker.patch("freqtrade.optimize.hyperopt.Hyperopt.__init__", hyperopt_mock) patch_exchange(mocker) args = [ - 'hyperopt', - '--config', 'config.json', - '--strategy', 'HyperoptableStrategy', - '--hyperopt-loss', 'SharpeHyperOptLossDaily', - '--epochs', '5' + "hyperopt", + "--config", + "config.json", + "--strategy", + "HyperoptableStrategy", + "--hyperopt-loss", + "SharpeHyperOptLossDaily", + "--epochs", + "5", ] pargs = get_args(args) start_hyperopt(pargs) @@ -245,27 +282,28 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None: hyperopt.print_results( { - 'loss': 1, - 'results_metrics': generate_result_metrics(), - 'total_profit': 0, - 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) - 'is_initial_point': False, - 'is_random': False, - 'is_best': True + "loss": 1, + "results_metrics": generate_result_metrics(), + "total_profit": 0, + "current_epoch": 2, # This starts from 1 (in a human-friendly manner) + "is_initial_point": False, + "is_random": False, + "is_best": True, } ) out, _err = capsys.readouterr() - assert all(x in out - for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"]) + assert all( + x in out for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"] + ) def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: hyperopt.current_best_loss = 2 hyperopt.print_results( { - 'is_best': False, - 'loss': 3, - 'current_epoch': 1, + "is_best": False, + "loss": 3, + "current_epoch": 1, } ) assert caplog.record_tuples == [] @@ -273,57 +311,64 @@ def test_no_log_if_loss_does_not_improve(hyperopt, caplog) -> None: def test_roi_table_generation(hyperopt) -> None: params = { - 'roi_t1': 5, - 'roi_t2': 10, - 'roi_t3': 15, - 'roi_p1': 1, - 'roi_p2': 2, - 'roi_p3': 3, + "roi_t1": 5, + "roi_t2": 10, + "roi_t3": 15, + "roi_p1": 1, + "roi_p2": 2, + "roi_p3": 3, } assert hyperopt.custom_hyperopt.generate_roi_table(params) == {0: 6, 15: 3, 25: 1, 30: 0} def test_params_no_optimize_details(hyperopt) -> None: - hyperopt.config['spaces'] = ['buy'] + hyperopt.config["spaces"] = ["buy"] res = hyperopt._get_no_optimize_details() assert isinstance(res, dict) assert "trailing" in res - assert res["trailing"]['trailing_stop'] is False + assert res["trailing"]["trailing_stop"] is False assert "roi" in res - assert res['roi']['0'] == 0.04 + assert res["roi"]["0"] == 0.04 assert "stoploss" in res - assert res['stoploss']['stoploss'] == -0.1 + assert res["stoploss"]["stoploss"] == -0.1 assert "max_open_trades" in res - assert res['max_open_trades']['max_open_trades'] == 1 + assert res["max_open_trades"]["max_open_trades"] == 1 def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) # Dummy-reduce points to ensure scikit-learn is forced to generate new values - mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2) + mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', - 'params': {'buy': {}, 'sell': {}, 'roi': {}, 'stoploss': 0.0}, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {"buy": {}, "sell": {}, "roi": {}, "stoploss": 0.0}, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) # Co-test loading timeframe from strategy - del hyperopt_conf['timeframe'] + del hyperopt_conf["timeframe"] hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -334,184 +379,217 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None: parallel.assert_called_once() out, _err = capsys.readouterr() - assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out + assert "Best result:\n\n* 1/1: foo result Objective: 1.00000\n" in out # Should be called for historical candle data assert dumper.call_count == 1 assert dumper2.call_count == 1 assert hasattr(hyperopt.backtesting.strategy, "advise_exit") assert hasattr(hyperopt.backtesting.strategy, "advise_entry") - assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf['max_open_trades'] + assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"] assert hasattr(hyperopt.backtesting, "_position_stacking") def test_hyperopt_format_results(hyperopt): - bt_result = { - 'results': pd.DataFrame({"pair": ["UNITTEST/BTC", "UNITTEST/BTC", - "UNITTEST/BTC", "UNITTEST/BTC"], - "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], - "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], - "open_date": [dt_utc(2017, 11, 14, 19, 32, 00), - dt_utc(2017, 11, 14, 21, 36, 00), - dt_utc(2017, 11, 14, 22, 12, 00), - dt_utc(2017, 11, 14, 22, 44, 00)], - "close_date": [dt_utc(2017, 11, 14, 21, 35, 00), - dt_utc(2017, 11, 14, 22, 10, 00), - dt_utc(2017, 11, 14, 22, 43, 00), - dt_utc(2017, 11, 14, 22, 58, 00)], - "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], - "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], - "trade_duration": [123, 34, 31, 14], - "is_open": [False, False, False, True], - "is_short": [False, False, False, False], - "stake_amount": [0.01, 0.01, 0.01, 0.01], - "exit_reason": [ExitType.ROI, ExitType.STOP_LOSS, - ExitType.ROI, ExitType.FORCE_EXIT] - }), - 'config': hyperopt.config, - 'locks': [], - 'final_balance': 0.02, - 'rejected_signals': 2, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'backtest_start_time': 1619718665, - 'backtest_end_time': 1619718665, + "results": pd.DataFrame( + { + "pair": ["UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC"], + "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], + "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], + "open_date": [ + dt_utc(2017, 11, 14, 19, 32, 00), + dt_utc(2017, 11, 14, 21, 36, 00), + dt_utc(2017, 11, 14, 22, 12, 00), + dt_utc(2017, 11, 14, 22, 44, 00), + ], + "close_date": [ + dt_utc(2017, 11, 14, 21, 35, 00), + dt_utc(2017, 11, 14, 22, 10, 00), + dt_utc(2017, 11, 14, 22, 43, 00), + dt_utc(2017, 11, 14, 22, 58, 00), + ], + "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], + "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], + "trade_duration": [123, 34, 31, 14], + "is_open": [False, False, False, True], + "is_short": [False, False, False, False], + "stake_amount": [0.01, 0.01, 0.01, 0.01], + "exit_reason": [ + ExitType.ROI, + ExitType.STOP_LOSS, + ExitType.ROI, + ExitType.FORCE_EXIT, + ], + } + ), + "config": hyperopt.config, + "locks": [], + "final_balance": 0.02, + "rejected_signals": 2, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "backtest_start_time": 1619718665, + "backtest_end_time": 1619718665, } - results_metrics = generate_strategy_stats(['XRP/BTC'], '', bt_result, - dt_utc(2017, 11, 14, 19, 32, 00), - dt_utc(2017, 12, 14, 19, 32, 00), market_change=0) + results_metrics = generate_strategy_stats( + ["XRP/BTC"], + "", + bt_result, + dt_utc(2017, 11, 14, 19, 32, 00), + dt_utc(2017, 12, 14, 19, 32, 00), + market_change=0, + ) - results_explanation = HyperoptTools.format_results_explanation_string(results_metrics, 'BTC') - total_profit = results_metrics['profit_total_abs'] + results_explanation = HyperoptTools.format_results_explanation_string(results_metrics, "BTC") + total_profit = results_metrics["profit_total_abs"] results = { - 'loss': 0.0, - 'params_dict': None, - 'params_details': None, - 'results_metrics': results_metrics, - 'results_explanation': results_explanation, - 'total_profit': total_profit, - 'current_epoch': 1, - 'is_initial_point': True, + "loss": 0.0, + "params_dict": None, + "params_details": None, + "results_metrics": results_metrics, + "results_explanation": results_explanation, + "total_profit": total_profit, + "current_epoch": 1, + "is_initial_point": True, } result = HyperoptTools._format_explanation_string(results, 1) - assert ' 0.71%' in result - assert 'Total profit 0.00003100 BTC' in result - assert '0:50:00 min' in result + assert " 0.71%" in result + assert "Total profit 0.00003100 BTC" in result + assert "0:50:00 min" in result def test_populate_indicators(hyperopt, testdatadir) -> None: - data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], fill_up_missing=True) + data = load_data(testdatadir, "1m", ["UNITTEST/BTC"], fill_up_missing=True) dataframes = hyperopt.backtesting.strategy.advise_all_indicators(data) - dataframe = dataframes['UNITTEST/BTC'] + dataframe = dataframes["UNITTEST/BTC"] # Check if some indicators are generated. We will not test all of them - assert 'adx' in dataframe - assert 'macd' in dataframe - assert 'rsi' in dataframe + assert "adx" in dataframe + assert "macd" in dataframe + assert "rsi" in dataframe def test_generate_optimizer(mocker, hyperopt_conf) -> None: - hyperopt_conf.update({'spaces': 'all', - 'hyperopt_min_trades': 1, - }) + hyperopt_conf.update( + { + "spaces": "all", + "hyperopt_min_trades": 1, + } + ) backtest_result = { - 'results': pd.DataFrame({"pair": ["UNITTEST/BTC", "UNITTEST/BTC", - "UNITTEST/BTC", "UNITTEST/BTC"], - "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], - "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], - "open_date": [dt_utc(2017, 11, 14, 19, 32, 00), - dt_utc(2017, 11, 14, 21, 36, 00), - dt_utc(2017, 11, 14, 22, 12, 00), - dt_utc(2017, 11, 14, 22, 44, 00)], - "close_date": [dt_utc(2017, 11, 14, 21, 35, 00), - dt_utc(2017, 11, 14, 22, 10, 00), - dt_utc(2017, 11, 14, 22, 43, 00), - dt_utc(2017, 11, 14, 22, 58, 00)], - "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], - "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], - "trade_duration": [123, 34, 31, 14], - "is_open": [False, False, False, True], - "is_short": [False, False, False, False], - "stake_amount": [0.01, 0.01, 0.01, 0.01], - "exit_reason": [ExitType.ROI, ExitType.STOP_LOSS, - ExitType.ROI, ExitType.FORCE_EXIT] - }), - 'config': hyperopt_conf, - 'locks': [], - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'final_balance': 1000, + "results": pd.DataFrame( + { + "pair": ["UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC"], + "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], + "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], + "open_date": [ + dt_utc(2017, 11, 14, 19, 32, 00), + dt_utc(2017, 11, 14, 21, 36, 00), + dt_utc(2017, 11, 14, 22, 12, 00), + dt_utc(2017, 11, 14, 22, 44, 00), + ], + "close_date": [ + dt_utc(2017, 11, 14, 21, 35, 00), + dt_utc(2017, 11, 14, 22, 10, 00), + dt_utc(2017, 11, 14, 22, 43, 00), + dt_utc(2017, 11, 14, 22, 58, 00), + ], + "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], + "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], + "trade_duration": [123, 34, 31, 14], + "is_open": [False, False, False, True], + "is_short": [False, False, False, False], + "stake_amount": [0.01, 0.01, 0.01, 0.01], + "exit_reason": [ + ExitType.ROI, + ExitType.STOP_LOSS, + ExitType.ROI, + ExitType.FORCE_EXIT, + ], + } + ), + "config": hyperopt_conf, + "locks": [], + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "final_balance": 1000, } - mocker.patch('freqtrade.optimize.hyperopt.Backtesting.backtest', return_value=backtest_result) - mocker.patch('freqtrade.optimize.hyperopt.get_timerange', - return_value=(dt_utc(2017, 12, 10), dt_utc(2017, 12, 13))) + mocker.patch("freqtrade.optimize.hyperopt.Backtesting.backtest", return_value=backtest_result) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + return_value=(dt_utc(2017, 12, 10), dt_utc(2017, 12, 13)), + ) patch_exchange(mocker) - mocker.patch.object(Path, 'open') - mocker.patch('freqtrade.configuration.config_validation.validate_config_schema') - mocker.patch('freqtrade.optimize.hyperopt.load', return_value={'XRP/BTC': None}) + mocker.patch.object(Path, "open") + mocker.patch("freqtrade.configuration.config_validation.validate_config_schema") + mocker.patch("freqtrade.optimize.hyperopt.load", return_value={"XRP/BTC": None}) optimizer_param = { - 'buy_plusdi': 0.02, - 'buy_rsi': 35, - 'sell_minusdi': 0.02, - 'sell_rsi': 75, - 'protection_cooldown_lookback': 20, - 'protection_enabled': True, - 'roi_t1': 60.0, - 'roi_t2': 30.0, - 'roi_t3': 20.0, - 'roi_p1': 0.01, - 'roi_p2': 0.01, - 'roi_p3': 0.1, - 'stoploss': -0.4, - 'trailing_stop': True, - 'trailing_stop_positive': 0.02, - 'trailing_stop_positive_offset_p1': 0.05, - 'trailing_only_offset_is_reached': False, - 'max_open_trades': 3, + "buy_plusdi": 0.02, + "buy_rsi": 35, + "sell_minusdi": 0.02, + "sell_rsi": 75, + "protection_cooldown_lookback": 20, + "protection_enabled": True, + "roi_t1": 60.0, + "roi_t2": 30.0, + "roi_t3": 20.0, + "roi_p1": 0.01, + "roi_p2": 0.01, + "roi_p3": 0.1, + "stoploss": -0.4, + "trailing_stop": True, + "trailing_stop_positive": 0.02, + "trailing_stop_positive_offset_p1": 0.05, + "trailing_only_offset_is_reached": False, + "max_open_trades": 3, } response_expected = { - 'loss': 1.9147239021396234, - 'results_explanation': (' 4 trades. 4/0/0 Wins/Draws/Losses. ' - 'Avg profit 0.77%. Median profit 0.71%. Total profit ' - '0.00003100 BTC ( 0.00%). ' - 'Avg duration 0:50:00 min.' - ), - 'params_details': {'buy': {'buy_plusdi': 0.02, - 'buy_rsi': 35, - }, - 'roi': {"0": 0.12000000000000001, - "20.0": 0.02, - "50.0": 0.01, - "110.0": 0}, - 'protection': {'protection_cooldown_lookback': 20, - 'protection_enabled': True, - }, - 'sell': {'sell_minusdi': 0.02, - 'sell_rsi': 75, - }, - 'stoploss': {'stoploss': -0.4}, - 'trailing': {'trailing_only_offset_is_reached': False, - 'trailing_stop': True, - 'trailing_stop_positive': 0.02, - 'trailing_stop_positive_offset': 0.07}, - 'max_open_trades': {'max_open_trades': 3} - }, - 'params_dict': optimizer_param, - 'params_not_optimized': {'buy': {}, 'protection': {}, 'sell': {}}, - 'results_metrics': ANY, - 'total_profit': 3.1e-08 + "loss": 1.9147239021396234, + "results_explanation": ( + " 4 trades. 4/0/0 Wins/Draws/Losses. " + "Avg profit 0.77%. Median profit 0.71%. Total profit " + "0.00003100 BTC ( 0.00%). " + "Avg duration 0:50:00 min." + ), + "params_details": { + "buy": { + "buy_plusdi": 0.02, + "buy_rsi": 35, + }, + "roi": {"0": 0.12000000000000001, "20.0": 0.02, "50.0": 0.01, "110.0": 0}, + "protection": { + "protection_cooldown_lookback": 20, + "protection_enabled": True, + }, + "sell": { + "sell_minusdi": 0.02, + "sell_rsi": 75, + }, + "stoploss": {"stoploss": -0.4}, + "trailing": { + "trailing_only_offset_is_reached": False, + "trailing_stop": True, + "trailing_stop_positive": 0.02, + "trailing_stop_positive_offset": 0.07, + }, + "max_open_trades": {"max_open_trades": 3}, + }, + "params_dict": optimizer_param, + "params_not_optimized": {"buy": {}, "protection": {}, "sell": {}}, + "results_metrics": ANY, + "total_profit": 3.1e-08, } hyperopt = Hyperopt(hyperopt_conf) @@ -525,8 +603,10 @@ def test_generate_optimizer(mocker, hyperopt_conf) -> None: def test_clean_hyperopt(mocker, hyperopt_conf, caplog): patch_exchange(mocker) - mocker.patch("freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file", - MagicMock(return_value={})) + mocker.patch( + "freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file", + MagicMock(return_value={}), + ) mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True)) unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock()) h = Hyperopt(hyperopt_conf) @@ -536,38 +616,50 @@ def test_clean_hyperopt(mocker, hyperopt_conf, caplog): def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': { - 'buy': {'mfi-value': None}, - 'sell': {'sell-mfi-value': None}, - 'roi': {}, 'stoploss': {'stoploss': None}, - 'trailing': {'trailing_stop': None}, - 'max_open_trades': {'max_open_trades': None} - }, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {}, + "params_details": { + "buy": {"mfi-value": None}, + "sell": {"sell-mfi-value": None}, + "roi": {}, + "stoploss": {"stoploss": None}, + "trailing": {"trailing_stop": None}, + "max_open_trades": {"max_open_trades": None}, + }, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'all', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + hyperopt_conf.update( + { + "spaces": "all", + "hyperopt_jobs": 1, + "print_json": True, + } + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -589,32 +681,41 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None: def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': { - 'buy': {'mfi-value': None}, - 'sell': {'sell-mfi-value': None}, - 'roi': {}, 'stoploss': {'stoploss': None} - }, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {}, + "params_details": { + "buy": {"mfi-value": None}, + "sell": {"sell-mfi-value": None}, + "roi": {}, + "stoploss": {"stoploss": None}, + }, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'print_json': True}) + hyperopt_conf.update({"print_json": True}) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -625,38 +726,52 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None: parallel.assert_called_once() out, _err = capsys.readouterr() - assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501 + assert ( + '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' + in out + ) # noqa: E501 # Should be called for historical candle data assert dumper.call_count == 1 assert dumper2.call_count == 1 def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'params_details': {'roi': {}, 'stoploss': {'stoploss': None}}, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {}, + "params_details": {"roi": {}, "stoploss": {"stoploss": None}}, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'roi stoploss', - 'hyperopt_jobs': 1, - 'print_json': True, - }) + hyperopt_conf.update( + { + "spaces": "roi stoploss", + "hyperopt_jobs": 1, + "print_json": True, + } + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -674,27 +789,35 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None: def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {'stoploss': 0.0}, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {"stoploss": 0.0}, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'roi stoploss'}) + hyperopt_conf.update({"spaces": "roi stoploss"}) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -705,32 +828,39 @@ def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> Non parallel.assert_called_once() out, _err = capsys.readouterr() - assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out + assert "Best result:\n\n* 1/1: foo result Objective: 1.00000\n" in out assert dumper.call_count == 1 assert dumper2.call_count == 1 assert hasattr(hyperopt.backtesting.strategy, "advise_exit") assert hasattr(hyperopt.backtesting.strategy, "advise_entry") - assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf['max_open_trades'] + assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"] assert hasattr(hyperopt.backtesting, "_position_stacking") def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None: - mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + mocker.patch("freqtrade.optimize.hyperopt.dump", MagicMock()) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'all', }) + hyperopt_conf.update( + { + "spaces": "all", + } + ) - mocker.patch('freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space', - return_value=[]) + mocker.patch( + "freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space", return_value=[] + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -739,7 +869,7 @@ def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None: with pytest.raises(OperationalException, match=r"The 'protection' space is included into *"): hyperopt.init_spaces() - hyperopt.config['hyperopt_ignore_missing_space'] = True + hyperopt.config["hyperopt_ignore_missing_space"] = True caplog.clear() hyperopt.init_spaces() assert log_has_re(r"The 'protection' space is included into *", caplog) @@ -747,27 +877,35 @@ def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None: def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {}, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'buy'}) + hyperopt_conf.update({"spaces": "buy"}) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -778,38 +916,50 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None: parallel.assert_called_once() out, _err = capsys.readouterr() - assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out + assert "Best result:\n\n* 1/1: foo result Objective: 1.00000\n" in out assert dumper.called assert dumper.call_count == 1 assert dumper2.call_count == 1 assert hasattr(hyperopt.backtesting.strategy, "advise_exit") assert hasattr(hyperopt.backtesting.strategy, "advise_entry") - assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf['max_open_trades'] + assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"] assert hasattr(hyperopt.backtesting, "_position_stacking") def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None: - dumper = mocker.patch('freqtrade.optimize.hyperopt.dump') - dumper2 = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._save_result') - mocker.patch('freqtrade.optimize.hyperopt.calculate_market_change', return_value=1.5) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + dumper = mocker.patch("freqtrade.optimize.hyperopt.dump") + dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result") + mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), ) parallel = mocker.patch( - 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', - MagicMock(return_value=[{ - 'loss': 1, 'results_explanation': 'foo result', 'params': {}, - 'results_metrics': generate_result_metrics(), - }]) + "freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel", + MagicMock( + return_value=[ + { + "loss": 1, + "results_explanation": "foo result", + "params": {}, + "results_metrics": generate_result_metrics(), + } + ] + ), ) patch_exchange(mocker) - hyperopt_conf.update({'spaces': 'sell', }) + hyperopt_conf.update( + { + "spaces": "sell", + } + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -820,36 +970,42 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None: parallel.assert_called_once() out, _err = capsys.readouterr() - assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out + assert "Best result:\n\n* 1/1: foo result Objective: 1.00000\n" in out assert dumper.called assert dumper.call_count == 1 assert dumper2.call_count == 1 assert hasattr(hyperopt.backtesting.strategy, "advise_exit") assert hasattr(hyperopt.backtesting.strategy, "advise_entry") - assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf['max_open_trades'] + assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"] assert hasattr(hyperopt.backtesting, "_position_stacking") -@pytest.mark.parametrize("space", [ - ('buy'), - ('sell'), - ('protection'), -]) +@pytest.mark.parametrize( + "space", + [ + ("buy"), + ("sell"), + ("protection"), + ], +) def test_simplified_interface_failed(mocker, hyperopt_conf, space) -> None: - mocker.patch('freqtrade.optimize.hyperopt.dump', MagicMock()) - mocker.patch('freqtrade.optimize.hyperopt.file_dump_json') - mocker.patch('freqtrade.optimize.backtesting.Backtesting.load_bt_data', - MagicMock(return_value=(MagicMock(), None))) + mocker.patch("freqtrade.optimize.hyperopt.dump", MagicMock()) + mocker.patch("freqtrade.optimize.hyperopt.file_dump_json") mocker.patch( - 'freqtrade.optimize.hyperopt.get_timerange', - MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) + "freqtrade.optimize.backtesting.Backtesting.load_bt_data", + MagicMock(return_value=(MagicMock(), None)), + ) + mocker.patch( + "freqtrade.optimize.hyperopt.get_timerange", + MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))), + ) + mocker.patch( + "freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space", return_value=[] ) - mocker.patch('freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space', - return_value=[]) patch_exchange(mocker) - hyperopt_conf.update({'spaces': space}) + hyperopt_conf.update({"spaces": space}) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.strategy.advise_all_indicators = MagicMock() @@ -861,17 +1017,19 @@ def test_simplified_interface_failed(mocker, hyperopt_conf, space) -> None: def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmp_path, fee) -> None: patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) # Dummy-reduce points to ensure scikit-learn is forced to generate new values - mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2) - (tmp_path / 'hyperopt_results').mkdir(parents=True) + mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2) + (tmp_path / "hyperopt_results").mkdir(parents=True) # No hyperopt needed - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['all'], - }) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["all"], + } + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) @@ -896,32 +1054,33 @@ def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmp_path, fee) -> None assert hyperopt.backtesting.strategy.sell_rsi.value != 74 assert hyperopt.backtesting.strategy.max_open_trades != 1 - hyperopt.custom_hyperopt.generate_estimator = lambda *args, **kwargs: 'ET1' + hyperopt.custom_hyperopt.generate_estimator = lambda *args, **kwargs: "ET1" with pytest.raises(OperationalException, match="Estimator ET1 not supported."): hyperopt.get_optimizer([], 2) @pytest.mark.filterwarnings("ignore::DeprecationWarning") def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmp_path, fee) -> None: - mocker.patch(f'{EXMS}.validate_config', MagicMock()) - mocker.patch(f'{EXMS}.get_fee', fee) - mocker.patch(f'{EXMS}._load_markets') - mocker.patch(f'{EXMS}.markets', - PropertyMock(return_value=get_markets())) - (tmp_path / 'hyperopt_results').mkdir(parents=True) + mocker.patch(f"{EXMS}.validate_config", MagicMock()) + mocker.patch(f"{EXMS}.get_fee", fee) + mocker.patch(f"{EXMS}._load_markets") + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=get_markets())) + (tmp_path / "hyperopt_results").mkdir(parents=True) # Dummy-reduce points to ensure scikit-learn is forced to generate new values - mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2) + mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2) # No hyperopt needed - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['all'], - # Enforce parallelity - 'epochs': 2, - 'hyperopt_jobs': 2, - 'fee': fee.return_value, - }) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["all"], + # Enforce parallelity + "epochs": 2, + "hyperopt_jobs": 2, + "fee": fee.return_value, + } + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.exchange.get_max_leverage = lambda *x, **xx: 1.0 hyperopt.backtesting.exchange.get_min_pair_stake_amount = lambda *x, **xx: 0.00001 @@ -947,23 +1106,28 @@ def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmp_path def test_in_strategy_auto_hyperopt_per_epoch(mocker, hyperopt_conf, tmp_path, fee) -> None: patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_fee', fee) - (tmp_path / 'hyperopt_results').mkdir(parents=True) + mocker.patch(f"{EXMS}.get_fee", fee) + (tmp_path / "hyperopt_results").mkdir(parents=True) - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['all'], - 'epochs': 3, - 'analyze_per_epoch': True, - }) - go = mocker.patch('freqtrade.optimize.hyperopt.Hyperopt.generate_optimizer', - return_value={ - 'loss': 0.05, - 'results_explanation': 'foo result', 'params': {}, - 'results_metrics': generate_result_metrics(), - }) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["all"], + "epochs": 3, + "analyze_per_epoch": True, + } + ) + go = mocker.patch( + "freqtrade.optimize.hyperopt.Hyperopt.generate_optimizer", + return_value={ + "loss": 0.05, + "results_explanation": "foo result", + "params": {}, + "results_metrics": generate_result_metrics(), + }, + ) hyperopt = Hyperopt(hyperopt_conf) hyperopt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) @@ -1006,20 +1170,22 @@ def test_stake_amount_unlimited_max_open_trades(mocker, hyperopt_conf, tmp_path, # This test is to ensure that unlimited max_open_trades are ignored for the backtesting # if we have an unlimited stake amount patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_fee', fee) - (tmp_path / 'hyperopt_results').mkdir(parents=True) - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['trades'], - 'stake_amount': 'unlimited' - }) + mocker.patch(f"{EXMS}.get_fee", fee) + (tmp_path / "hyperopt_results").mkdir(parents=True) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["trades"], + "stake_amount": "unlimited", + } + ) hyperopt = Hyperopt(hyperopt_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._get_params_dict', - return_value={ - 'max_open_trades': -1 - }) + mocker.patch( + "freqtrade.optimize.hyperopt.Hyperopt._get_params_dict", + return_value={"max_open_trades": -1}, + ) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) @@ -1034,19 +1200,21 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N # This test is to ensure that after hyperopting, max_open_trades is never # saved as inf in the output json params patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_fee', fee) - (tmp_path / 'hyperopt_results').mkdir(parents=True) - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['trades'], - }) + mocker.patch(f"{EXMS}.get_fee", fee) + (tmp_path / "hyperopt_results").mkdir(parents=True) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["trades"], + } + ) hyperopt = Hyperopt(hyperopt_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._get_params_dict', - return_value={ - 'max_open_trades': -1 - }) + mocker.patch( + "freqtrade.optimize.hyperopt.Hyperopt._get_params_dict", + return_value={"max_open_trades": -1}, + ) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) @@ -1054,18 +1222,18 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N out, _err = capsys.readouterr() - assert 'max_open_trades = -1' in out - assert 'max_open_trades = inf' not in out + assert "max_open_trades = -1" in out + assert "max_open_trades = inf" not in out ############## - hyperopt_conf.update({'print_json': True}) + hyperopt_conf.update({"print_json": True}) hyperopt = Hyperopt(hyperopt_conf) - mocker.patch('freqtrade.optimize.hyperopt.Hyperopt._get_params_dict', - return_value={ - 'max_open_trades': -1 - }) + mocker.patch( + "freqtrade.optimize.hyperopt.Hyperopt._get_params_dict", + return_value={"max_open_trades": -1}, + ) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) @@ -1080,26 +1248,29 @@ def test_max_open_trades_consistency(mocker, hyperopt_conf, tmp_path, fee) -> No # This test is to ensure that max_open_trades is the same across all functions needing it # after it has been changed from the hyperopt patch_exchange(mocker) - mocker.patch(f'{EXMS}.get_fee', return_value=0) + mocker.patch(f"{EXMS}.get_fee", return_value=0) - (tmp_path / 'hyperopt_results').mkdir(parents=True) - hyperopt_conf.update({ - 'strategy': 'HyperoptableStrategy', - 'user_data_dir': tmp_path, - 'hyperopt_random_state': 42, - 'spaces': ['trades'], - 'stake_amount': 'unlimited', - 'dry_run_wallet': 8, - 'available_capital': 8, - 'dry_run': True, - 'epochs': 1 - }) + (tmp_path / "hyperopt_results").mkdir(parents=True) + hyperopt_conf.update( + { + "strategy": "HyperoptableStrategy", + "user_data_dir": tmp_path, + "hyperopt_random_state": 42, + "spaces": ["trades"], + "stake_amount": "unlimited", + "dry_run_wallet": 8, + "available_capital": 8, + "dry_run": True, + "epochs": 1, + } + ) hyperopt = Hyperopt(hyperopt_conf) assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto) hyperopt.custom_hyperopt.max_open_trades_space = lambda: [ - Integer(1, 10, name='max_open_trades')] + Integer(1, 10, name="max_open_trades") + ] first_time_evaluated = False @@ -1112,12 +1283,14 @@ def test_max_open_trades_consistency(mocker, hyperopt_conf, tmp_path, fee) -> No assert stake_amount == 1 first_time_evaluated = True return stake_amount + return wrapper hyperopt.backtesting.wallets._calculate_unlimited_stake_amount = stake_amount_interceptor( - hyperopt.backtesting.wallets._calculate_unlimited_stake_amount) + hyperopt.backtesting.wallets._calculate_unlimited_stake_amount + ) hyperopt.start() assert hyperopt.backtesting.strategy.max_open_trades == 8 - assert hyperopt.config['max_open_trades'] == 8 + assert hyperopt.config["max_open_trades"] == 8 diff --git a/tests/optimize/test_hyperopt_tools.py b/tests/optimize/test_hyperopt_tools.py index 47aba6b76..c8a54e462 100644 --- a/tests/optimize/test_hyperopt_tools.py +++ b/tests/optimize/test_hyperopt_tools.py @@ -15,13 +15,11 @@ from tests.conftest import CURRENT_TEST_STRATEGY, log_has, log_has_re # Functions for recurrent object patching def create_results() -> List[Dict]: - - return [{'loss': 1, 'result': 'foo', 'params': {}, 'is_best': True}] + return [{"loss": 1, "result": "foo", "params": {}, "is_best": True}] def test_save_results_saves_epochs(hyperopt, tmp_path, caplog) -> None: - - hyperopt.results_file = tmp_path / 'ut_results.fthypt' + hyperopt.results_file = tmp_path / "ut_results.fthypt" hyperopt_epochs = HyperoptTools.load_filtered_results(hyperopt.results_file, {}) assert log_has_re("Hyperopt file .* not found.", caplog) @@ -57,152 +55,296 @@ def test_save_results_saves_epochs(hyperopt, tmp_path, caplog) -> None: def test_load_previous_results2(mocker, testdatadir, caplog) -> None: - results_file = testdatadir / 'hyperopt_results_SampleStrategy.pickle' - with pytest.raises(OperationalException, - match=r"Legacy hyperopt results are no longer supported.*"): + results_file = testdatadir / "hyperopt_results_SampleStrategy.pickle" + with pytest.raises( + OperationalException, match=r"Legacy hyperopt results are no longer supported.*" + ): HyperoptTools.load_filtered_results(results_file, {}) -@pytest.mark.parametrize("spaces, expected_results", [ - (['buy'], - {'buy': True, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': False, - 'protection': False, 'trades': False}), - (['sell'], - {'buy': False, 'sell': True, 'roi': False, 'stoploss': False, 'trailing': False, - 'protection': False, 'trades': False}), - (['roi'], - {'buy': False, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False, - 'protection': False, 'trades': False}), - (['stoploss'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': True, 'trailing': False, - 'protection': False, 'trades': False}), - (['trailing'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': True, - 'protection': False, 'trades': False}), - (['buy', 'sell', 'roi', 'stoploss'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False, - 'protection': False, 'trades': False}), - (['buy', 'sell', 'roi', 'stoploss', 'trailing'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True, - 'protection': False, 'trades': False}), - (['buy', 'roi'], - {'buy': True, 'sell': False, 'roi': True, 'stoploss': False, 'trailing': False, - 'protection': False, 'trades': False}), - (['all'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True, - 'protection': True, 'trades': True}), - (['default'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False, - 'protection': False, 'trades': False}), - (['default', 'trailing'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True, - 'protection': False, 'trades': False}), - (['all', 'buy'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True, - 'protection': True, 'trades': True}), - (['default', 'buy'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False, - 'protection': False, 'trades': False}), - (['all'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': True, - 'protection': True, 'trades': True}), - (['protection'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': False, - 'protection': True, 'trades': False}), - (['trades'], - {'buy': False, 'sell': False, 'roi': False, 'stoploss': False, 'trailing': False, - 'protection': False, 'trades': True}), - (['default', 'trades'], - {'buy': True, 'sell': True, 'roi': True, 'stoploss': True, 'trailing': False, - 'protection': False, 'trades': True}), -]) +@pytest.mark.parametrize( + "spaces, expected_results", + [ + ( + ["buy"], + { + "buy": True, + "sell": False, + "roi": False, + "stoploss": False, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["sell"], + { + "buy": False, + "sell": True, + "roi": False, + "stoploss": False, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["roi"], + { + "buy": False, + "sell": False, + "roi": True, + "stoploss": False, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["stoploss"], + { + "buy": False, + "sell": False, + "roi": False, + "stoploss": True, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["trailing"], + { + "buy": False, + "sell": False, + "roi": False, + "stoploss": False, + "trailing": True, + "protection": False, + "trades": False, + }, + ), + ( + ["buy", "sell", "roi", "stoploss"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["buy", "sell", "roi", "stoploss", "trailing"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": True, + "protection": False, + "trades": False, + }, + ), + ( + ["buy", "roi"], + { + "buy": True, + "sell": False, + "roi": True, + "stoploss": False, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["all"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": True, + "protection": True, + "trades": True, + }, + ), + ( + ["default"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["default", "trailing"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": True, + "protection": False, + "trades": False, + }, + ), + ( + ["all", "buy"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": True, + "protection": True, + "trades": True, + }, + ), + ( + ["default", "buy"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": False, + "protection": False, + "trades": False, + }, + ), + ( + ["all"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": True, + "protection": True, + "trades": True, + }, + ), + ( + ["protection"], + { + "buy": False, + "sell": False, + "roi": False, + "stoploss": False, + "trailing": False, + "protection": True, + "trades": False, + }, + ), + ( + ["trades"], + { + "buy": False, + "sell": False, + "roi": False, + "stoploss": False, + "trailing": False, + "protection": False, + "trades": True, + }, + ), + ( + ["default", "trades"], + { + "buy": True, + "sell": True, + "roi": True, + "stoploss": True, + "trailing": False, + "protection": False, + "trades": True, + }, + ), + ], +) def test_has_space(hyperopt_conf, spaces, expected_results): - for s in ['buy', 'sell', 'roi', 'stoploss', 'trailing', 'protection', 'trades']: - hyperopt_conf.update({'spaces': spaces}) + for s in ["buy", "sell", "roi", "stoploss", "trailing", "protection", "trades"]: + hyperopt_conf.update({"spaces": spaces}) assert HyperoptTools.has_space(hyperopt_conf, s) == expected_results[s] def test_show_epoch_details(capsys): test_result = { - 'params_details': { - 'trailing': { - 'trailing_stop': True, - 'trailing_stop_positive': 0.02, - 'trailing_stop_positive_offset': 0.04, - 'trailing_only_offset_is_reached': True + "params_details": { + "trailing": { + "trailing_stop": True, + "trailing_stop_positive": 0.02, + "trailing_stop_positive_offset": 0.04, + "trailing_only_offset_is_reached": True, }, - 'roi': { - 0: 0.18, - 90: 0.14, - 225: 0.05, - 430: 0}, + "roi": {0: 0.18, 90: 0.14, 225: 0.05, 430: 0}, }, - 'results_explanation': 'foo result', - 'is_initial_point': False, - 'total_profit': 0, - 'current_epoch': 2, # This starts from 1 (in a human-friendly manner) - 'is_best': True + "results_explanation": "foo result", + "is_initial_point": False, + "total_profit": 0, + "current_epoch": 2, # This starts from 1 (in a human-friendly manner) + "is_best": True, } HyperoptTools.show_epoch_details(test_result, 5, False, no_header=True) captured = capsys.readouterr() - assert '# Trailing stop:' in captured.out + assert "# Trailing stop:" in captured.out # re.match(r"Pairs for .*", captured.out) - assert re.search(r'^\s+trailing_stop = True$', captured.out, re.MULTILINE) - assert re.search(r'^\s+trailing_stop_positive = 0.02$', captured.out, re.MULTILINE) - assert re.search(r'^\s+trailing_stop_positive_offset = 0.04$', captured.out, re.MULTILINE) - assert re.search(r'^\s+trailing_only_offset_is_reached = True$', captured.out, re.MULTILINE) + assert re.search(r"^\s+trailing_stop = True$", captured.out, re.MULTILINE) + assert re.search(r"^\s+trailing_stop_positive = 0.02$", captured.out, re.MULTILINE) + assert re.search(r"^\s+trailing_stop_positive_offset = 0.04$", captured.out, re.MULTILINE) + assert re.search(r"^\s+trailing_only_offset_is_reached = True$", captured.out, re.MULTILINE) - assert '# ROI table:' in captured.out - assert re.search(r'^\s+minimal_roi = \{$', captured.out, re.MULTILINE) - assert re.search(r'^\s+\"90\"\:\s0.14,\s*$', captured.out, re.MULTILINE) + assert "# ROI table:" in captured.out + assert re.search(r"^\s+minimal_roi = \{$", captured.out, re.MULTILINE) + assert re.search(r"^\s+\"90\"\:\s0.14,\s*$", captured.out, re.MULTILINE) def test__pprint_dict(): - params = {'buy_std': 1.2, 'buy_rsi': 31, 'buy_enable': True, 'buy_what': 'asdf'} - non_params = {'buy_notoptimied': 55} + params = {"buy_std": 1.2, "buy_rsi": 31, "buy_enable": True, "buy_what": "asdf"} + non_params = {"buy_notoptimied": 55} x = HyperoptTools._pprint_dict(params, non_params) - assert x == """{ + assert ( + x + == """{ "buy_std": 1.2, "buy_rsi": 31, "buy_enable": True, "buy_what": "asdf", "buy_notoptimied": 55, # value loaded from strategy }""" + ) def test_get_strategy_filename(default_conf, tmp_path): - default_conf['user_data_dir'] = tmp_path - x = HyperoptTools.get_strategy_filename(default_conf, 'StrategyTestV3') + default_conf["user_data_dir"] = tmp_path + x = HyperoptTools.get_strategy_filename(default_conf, "StrategyTestV3") assert isinstance(x, Path) - assert x == Path(__file__).parents[1] / 'strategy/strats/strategy_test_v3.py' + assert x == Path(__file__).parents[1] / "strategy/strats/strategy_test_v3.py" - x = HyperoptTools.get_strategy_filename(default_conf, 'NonExistingStrategy') + x = HyperoptTools.get_strategy_filename(default_conf, "NonExistingStrategy") assert x is None def test_export_params(tmp_path): - filename = tmp_path / f"{CURRENT_TEST_STRATEGY}.json" assert not filename.is_file() params = { "params_details": { - "buy": { - "buy_rsi": 30 - }, - "sell": { - "sell_rsi": 70 - }, - "roi": { - "0": 0.528, - "346": 0.08499, - "507": 0.049, - "1595": 0 - }, - "max_open_trades": { - "max_open_trades": 5 - } + "buy": {"buy_rsi": 30}, + "sell": {"sell_rsi": 70}, + "roi": {"0": 0.528, "346": 0.08499, "507": 0.049, "1595": 0}, + "max_open_trades": {"max_open_trades": 5}, }, "params_not_optimized": { "stoploss": -0.05, @@ -210,19 +352,18 @@ def test_export_params(tmp_path): "trailing_stop": False, "trailing_stop_positive": 0.05, "trailing_stop_positive_offset": 0.1, - "trailing_only_offset_is_reached": True + "trailing_only_offset_is_reached": True, }, - } - + }, } HyperoptTools.export_params(params, CURRENT_TEST_STRATEGY, filename) assert filename.is_file() - with filename.open('r') as f: + with filename.open("r") as f: content = rapidjson.load(f) - assert content['strategy_name'] == CURRENT_TEST_STRATEGY - assert 'params' in content + assert content["strategy_name"] == CURRENT_TEST_STRATEGY + assert "params" in content assert "buy" in content["params"] assert "sell" in content["params"] assert "roi" in content["params"] @@ -232,26 +373,17 @@ def test_export_params(tmp_path): def test_try_export_params(default_conf, tmp_path, caplog, mocker): - default_conf['disableparamexport'] = False - default_conf['user_data_dir'] = tmp_path + default_conf["disableparamexport"] = False + default_conf["user_data_dir"] = tmp_path export_mock = mocker.patch("freqtrade.optimize.hyperopt_tools.HyperoptTools.export_params") filename = tmp_path / f"{CURRENT_TEST_STRATEGY}.json" assert not filename.is_file() params = { "params_details": { - "buy": { - "buy_rsi": 30 - }, - "sell": { - "sell_rsi": 70 - }, - "roi": { - "0": 0.528, - "346": 0.08499, - "507": 0.049, - "1595": 0 - } + "buy": {"buy_rsi": 30}, + "sell": {"sell_rsi": 70}, + "roi": {"0": 0.528, "346": 0.08499, "507": 0.049, "1595": 0}, }, "params_not_optimized": { "stoploss": -0.05, @@ -259,11 +391,10 @@ def test_try_export_params(default_conf, tmp_path, caplog, mocker): "trailing_stop": False, "trailing_stop_positive": 0.05, "trailing_stop_positive_offset": 0.1, - "trailing_only_offset_is_reached": True + "trailing_only_offset_is_reached": True, }, }, FTHYPT_FILEVERSION: 2, - } HyperoptTools.try_export_params(default_conf, "StrategyTestVXXX", params) @@ -275,26 +406,17 @@ def test_try_export_params(default_conf, tmp_path, caplog, mocker): assert export_mock.call_count == 1 assert export_mock.call_args_list[0][0][1] == CURRENT_TEST_STRATEGY - assert export_mock.call_args_list[0][0][2].name == 'strategy_test_v3.json' + assert export_mock.call_args_list[0][0][2].name == "strategy_test_v3.json" def test_params_print(capsys): - params = { - "buy": { - "buy_rsi": 30 - }, - "sell": { - "sell_rsi": 70 - }, + "buy": {"buy_rsi": 30}, + "sell": {"sell_rsi": 70}, } non_optimized = { - "buy": { - "buy_adx": 44 - }, - "sell": { - "sell_adx": 65 - }, + "buy": {"buy_adx": 44}, + "sell": {"sell_adx": 65}, "stoploss": { "stoploss": -0.05, }, @@ -306,14 +428,11 @@ def test_params_print(capsys): "trailing_stop": False, "trailing_stop_positive": 0.05, "trailing_stop_positive_offset": 0.1, - "trailing_only_offset_is_reached": True + "trailing_only_offset_is_reached": True, }, - "max_open_trades": { - "max_open_trades": 5 - } - + "max_open_trades": {"max_open_trades": 5}, } - HyperoptTools._params_pretty_print(params, 'buy', 'No header', non_optimized) + HyperoptTools._params_pretty_print(params, "buy", "No header", non_optimized) captured = capsys.readouterr() assert re.search("# No header", captured.out) @@ -321,36 +440,34 @@ def test_params_print(capsys): assert re.search('"buy_adx": 44, # value loaded.*\n', captured.out) assert not re.search("sell", captured.out) - HyperoptTools._params_pretty_print(params, 'sell', 'Sell Header', non_optimized) + HyperoptTools._params_pretty_print(params, "sell", "Sell Header", non_optimized) captured = capsys.readouterr() assert re.search("# Sell Header", captured.out) assert re.search('"sell_rsi": 70,\n', captured.out) assert re.search('"sell_adx": 65, # value loaded.*\n', captured.out) - HyperoptTools._params_pretty_print(params, 'roi', 'ROI Table:', non_optimized) + HyperoptTools._params_pretty_print(params, "roi", "ROI Table:", non_optimized) captured = capsys.readouterr() assert re.search("# ROI Table: # value loaded.*\n", captured.out) - assert re.search('minimal_roi = {\n', captured.out) + assert re.search("minimal_roi = {\n", captured.out) assert re.search('"20": 0.01\n', captured.out) - HyperoptTools._params_pretty_print(params, 'trailing', 'Trailing stop:', non_optimized) + HyperoptTools._params_pretty_print(params, "trailing", "Trailing stop:", non_optimized) captured = capsys.readouterr() assert re.search("# Trailing stop:", captured.out) - assert re.search('trailing_stop = False # value loaded.*\n', captured.out) - assert re.search('trailing_stop_positive = 0.05 # value loaded.*\n', captured.out) - assert re.search('trailing_stop_positive_offset = 0.1 # value loaded.*\n', captured.out) - assert re.search('trailing_only_offset_is_reached = True # value loaded.*\n', captured.out) + assert re.search("trailing_stop = False # value loaded.*\n", captured.out) + assert re.search("trailing_stop_positive = 0.05 # value loaded.*\n", captured.out) + assert re.search("trailing_stop_positive_offset = 0.1 # value loaded.*\n", captured.out) + assert re.search("trailing_only_offset_is_reached = True # value loaded.*\n", captured.out) - HyperoptTools._params_pretty_print( - params, 'max_open_trades', "Max Open Trades:", non_optimized) + HyperoptTools._params_pretty_print(params, "max_open_trades", "Max Open Trades:", non_optimized) captured = capsys.readouterr() assert re.search("# Max Open Trades:", captured.out) - assert re.search('max_open_trades = 5 # value loaded.*\n', captured.out) + assert re.search("max_open_trades = 5 # value loaded.*\n", captured.out) def test_hyperopt_serializer(): - assert isinstance(hyperopt_serializer(np.int_(5)), int) assert isinstance(hyperopt_serializer(np.bool_(True)), bool) assert isinstance(hyperopt_serializer(np.bool_(False)), bool) diff --git a/tests/optimize/test_hyperoptloss.py b/tests/optimize/test_hyperoptloss.py index be1c313f6..b78cdde30 100644 --- a/tests/optimize/test_hyperoptloss.py +++ b/tests/optimize/test_hyperoptloss.py @@ -9,96 +9,103 @@ from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver def test_hyperoptlossresolver_noname(default_conf): - with pytest.raises(OperationalException, - match="No Hyperopt loss set. Please use `--hyperopt-loss` to specify " - "the Hyperopt-Loss class to use."): + with pytest.raises( + OperationalException, + match="No Hyperopt loss set. Please use `--hyperopt-loss` to specify " + "the Hyperopt-Loss class to use.", + ): HyperOptLossResolver.load_hyperoptloss(default_conf) def test_hyperoptlossresolver(mocker, default_conf) -> None: - hl = ShortTradeDurHyperOptLoss mocker.patch( - 'freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object', - MagicMock(return_value=hl()) + "freqtrade.resolvers.hyperopt_resolver.HyperOptLossResolver.load_object", + MagicMock(return_value=hl()), ) - default_conf.update({'hyperopt_loss': 'SharpeHyperOptLossDaily'}) + default_conf.update({"hyperopt_loss": "SharpeHyperOptLossDaily"}) x = HyperOptLossResolver.load_hyperoptloss(default_conf) assert hasattr(x, "hyperopt_loss_function") def test_hyperoptlossresolver_wrongname(default_conf) -> None: - default_conf.update({'hyperopt_loss': "NonExistingLossClass"}) + default_conf.update({"hyperopt_loss": "NonExistingLossClass"}) - with pytest.raises(OperationalException, match=r'Impossible to load HyperoptLoss.*'): + with pytest.raises(OperationalException, match=r"Impossible to load HyperoptLoss.*"): HyperOptLossResolver.load_hyperoptloss(default_conf) def test_loss_calculation_prefer_correct_trade_count(hyperopt_conf, hyperopt_results) -> None: - hyperopt_conf.update({'hyperopt_loss': "ShortTradeDurHyperOptLoss"}) + hyperopt_conf.update({"hyperopt_loss": "ShortTradeDurHyperOptLoss"}) hl = HyperOptLossResolver.load_hyperoptloss(hyperopt_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(hyperopt_results, 600 + 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(hyperopt_results, 600 - 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function( + hyperopt_results, 600, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) + over = hl.hyperopt_loss_function( + hyperopt_results, 600 + 100, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) + under = hl.hyperopt_loss_function( + hyperopt_results, 600 - 100, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) assert over > correct assert under > correct def test_loss_calculation_prefer_shorter_trades(hyperopt_conf, hyperopt_results) -> None: resultsb = hyperopt_results.copy() - resultsb.loc[1, 'trade_duration'] = 20 + resultsb.loc[1, "trade_duration"] = 20 - hyperopt_conf.update({'hyperopt_loss': "ShortTradeDurHyperOptLoss"}) + hyperopt_conf.update({"hyperopt_loss": "ShortTradeDurHyperOptLoss"}) hl = HyperOptLossResolver.load_hyperoptloss(hyperopt_conf) - longer = hl.hyperopt_loss_function(hyperopt_results, 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - shorter = hl.hyperopt_loss_function(resultsb, 100, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + longer = hl.hyperopt_loss_function( + hyperopt_results, 100, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) + shorter = hl.hyperopt_loss_function(resultsb, 100, datetime(2019, 1, 1), datetime(2019, 5, 1)) assert shorter < longer def test_loss_calculation_has_limited_profit(hyperopt_conf, hyperopt_results) -> None: results_over = hyperopt_results.copy() - results_over['profit_ratio'] = hyperopt_results['profit_ratio'] * 2 + results_over["profit_ratio"] = hyperopt_results["profit_ratio"] * 2 results_under = hyperopt_results.copy() - results_under['profit_ratio'] = hyperopt_results['profit_ratio'] / 2 + results_under["profit_ratio"] = hyperopt_results["profit_ratio"] / 2 - hyperopt_conf.update({'hyperopt_loss': "ShortTradeDurHyperOptLoss"}) + hyperopt_conf.update({"hyperopt_loss": "ShortTradeDurHyperOptLoss"}) hl = HyperOptLossResolver.load_hyperoptloss(hyperopt_conf) - correct = hl.hyperopt_loss_function(hyperopt_results, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - over = hl.hyperopt_loss_function(results_over, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) - under = hl.hyperopt_loss_function(results_under, 600, - datetime(2019, 1, 1), datetime(2019, 5, 1)) + correct = hl.hyperopt_loss_function( + hyperopt_results, 600, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) + over = hl.hyperopt_loss_function(results_over, 600, datetime(2019, 1, 1), datetime(2019, 5, 1)) + under = hl.hyperopt_loss_function( + results_under, 600, datetime(2019, 1, 1), datetime(2019, 5, 1) + ) assert over < correct assert under > correct -@pytest.mark.parametrize('lossfunction', [ - "OnlyProfitHyperOptLoss", - "SortinoHyperOptLoss", - "SortinoHyperOptLossDaily", - "SharpeHyperOptLoss", - "SharpeHyperOptLossDaily", - "MaxDrawDownHyperOptLoss", - "MaxDrawDownRelativeHyperOptLoss", - "CalmarHyperOptLoss", - "ProfitDrawDownHyperOptLoss", - -]) +@pytest.mark.parametrize( + "lossfunction", + [ + "OnlyProfitHyperOptLoss", + "SortinoHyperOptLoss", + "SortinoHyperOptLossDaily", + "SharpeHyperOptLoss", + "SharpeHyperOptLossDaily", + "MaxDrawDownHyperOptLoss", + "MaxDrawDownRelativeHyperOptLoss", + "CalmarHyperOptLoss", + "ProfitDrawDownHyperOptLoss", + ], +) def test_loss_functions_better_profits(default_conf, hyperopt_results, lossfunction) -> None: results_over = hyperopt_results.copy() - results_over['profit_abs'] = hyperopt_results['profit_abs'] * 2 + 0.2 - results_over['profit_ratio'] = hyperopt_results['profit_ratio'] * 2 + results_over["profit_abs"] = hyperopt_results["profit_abs"] * 2 + 0.2 + results_over["profit_ratio"] = hyperopt_results["profit_ratio"] * 2 results_under = hyperopt_results.copy() - results_under['profit_abs'] = hyperopt_results['profit_abs'] / 2 - 0.2 - results_under['profit_ratio'] = hyperopt_results['profit_ratio'] / 2 + results_under["profit_abs"] = hyperopt_results["profit_abs"] / 2 - 0.2 + results_under["profit_ratio"] = hyperopt_results["profit_ratio"] / 2 - default_conf.update({'hyperopt_loss': lossfunction}) + default_conf.update({"hyperopt_loss": lossfunction}) hl = HyperOptLossResolver.load_hyperoptloss(default_conf) correct = hl.hyperopt_loss_function( hyperopt_results, @@ -107,7 +114,7 @@ def test_loss_functions_better_profits(default_conf, hyperopt_results, lossfunct max_date=datetime(2019, 5, 1), config=default_conf, processed=None, - backtest_stats={'profit_total': hyperopt_results['profit_abs'].sum()} + backtest_stats={"profit_total": hyperopt_results["profit_abs"].sum()}, ) over = hl.hyperopt_loss_function( results_over, @@ -116,7 +123,7 @@ def test_loss_functions_better_profits(default_conf, hyperopt_results, lossfunct max_date=datetime(2019, 5, 1), config=default_conf, processed=None, - backtest_stats={'profit_total': results_over['profit_abs'].sum()} + backtest_stats={"profit_total": results_over["profit_abs"].sum()}, ) under = hl.hyperopt_loss_function( results_under, @@ -125,7 +132,7 @@ def test_loss_functions_better_profits(default_conf, hyperopt_results, lossfunct max_date=datetime(2019, 5, 1), config=default_conf, processed=None, - backtest_stats={'profit_total': results_under['profit_abs'].sum()} + backtest_stats={"profit_total": results_under["profit_abs"].sum()}, ) assert over < correct assert under > correct diff --git a/tests/optimize/test_lookahead_analysis.py b/tests/optimize/test_lookahead_analysis.py index 6c84663b6..88e3ad877 100644 --- a/tests/optimize/test_lookahead_analysis.py +++ b/tests/optimize/test_lookahead_analysis.py @@ -15,17 +15,18 @@ from tests.conftest import EXMS, get_args, log_has_re, patch_exchange @pytest.fixture def lookahead_conf(default_conf_usdt, tmp_path): - default_conf_usdt['user_data_dir'] = tmp_path - default_conf_usdt['minimum_trade_amount'] = 10 - default_conf_usdt['targeted_trade_amount'] = 20 - default_conf_usdt['timerange'] = '20220101-20220501' + default_conf_usdt["user_data_dir"] = tmp_path + default_conf_usdt["minimum_trade_amount"] = 10 + default_conf_usdt["targeted_trade_amount"] = 20 + default_conf_usdt["timerange"] = "20220101-20220501" - default_conf_usdt['strategy_path'] = str( - Path(__file__).parent.parent / "strategy/strats/lookahead_bias") - default_conf_usdt['strategy'] = 'strategy_test_v3_with_lookahead_bias' - default_conf_usdt['max_open_trades'] = 1 - default_conf_usdt['dry_run_wallet'] = 1000000000 - default_conf_usdt['pairs'] = ['UNITTEST/USDT'] + default_conf_usdt["strategy_path"] = str( + Path(__file__).parent.parent / "strategy/strats/lookahead_bias" + ) + default_conf_usdt["strategy"] = "strategy_test_v3_with_lookahead_bias" + default_conf_usdt["max_open_trades"] = 1 + default_conf_usdt["dry_run_wallet"] = 1000000000 + default_conf_usdt["pairs"] = ["UNITTEST/USDT"] return default_conf_usdt @@ -33,7 +34,7 @@ def test_start_lookahead_analysis(mocker): single_mock = MagicMock() text_table_mock = MagicMock() mocker.patch.multiple( - 'freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions', + "freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions", initialize_single_lookahead_analysis=single_mock, text_table_lookahead_analysis_instances=text_table_mock, ) @@ -48,10 +49,10 @@ def test_start_lookahead_analysis(mocker): "--max-open-trades", "1", "--timerange", - "20220101-20220201" + "20220101-20220201", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_lookahead_analysis(pargs) assert single_mock.call_count == 1 @@ -72,9 +73,11 @@ def test_start_lookahead_analysis(mocker): "20", ] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"Targeted trade amount can't be smaller than minimum trade amount.*"): + pargs["config"] = None + with pytest.raises( + OperationalException, + match=r"Targeted trade amount can't be smaller than minimum trade amount.*", + ): start_lookahead_analysis(pargs) # Missing timerange @@ -90,27 +93,27 @@ def test_start_lookahead_analysis(mocker): "1", ] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"Please set a timerange\..*"): + pargs["config"] = None + with pytest.raises(OperationalException, match=r"Please set a timerange\..*"): start_lookahead_analysis(pargs) def test_lookahead_helper_invalid_config(lookahead_conf) -> None: conf = deepcopy(lookahead_conf) - conf['targeted_trade_amount'] = 10 - conf['minimum_trade_amount'] = 40 - with pytest.raises(OperationalException, - match=r"Targeted trade amount can't be smaller than minimum trade amount.*"): + conf["targeted_trade_amount"] = 10 + conf["minimum_trade_amount"] = 40 + with pytest.raises( + OperationalException, + match=r"Targeted trade amount can't be smaller than minimum trade amount.*", + ): LookaheadAnalysisSubFunctions.start(conf) def test_lookahead_helper_no_strategy_defined(lookahead_conf): conf = deepcopy(lookahead_conf) - conf['pairs'] = ['UNITTEST/USDT'] - del conf['strategy'] - with pytest.raises(OperationalException, - match=r"No Strategy specified"): + conf["pairs"] = ["UNITTEST/USDT"] + del conf["strategy"] + with pytest.raises(OperationalException, match=r"No Strategy specified"): LookaheadAnalysisSubFunctions.start(conf) @@ -118,7 +121,7 @@ def test_lookahead_helper_start(lookahead_conf, mocker) -> None: single_mock = MagicMock() text_table_mock = MagicMock() mocker.patch.multiple( - 'freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions', + "freqtrade.optimize.analysis.lookahead_helpers.LookaheadAnalysisSubFunctions", initialize_single_lookahead_analysis=single_mock, text_table_lookahead_analysis_instances=text_table_mock, ) @@ -138,19 +141,20 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf analysis.false_exit_signals = 3 strategy_obj = { - 'name': "strategy_test_v3_with_lookahead_bias", - 'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py") + "name": "strategy_test_v3_with_lookahead_bias", + "location": Path(lookahead_conf["strategy_path"], f"{lookahead_conf['strategy']}.py"), } instance = LookaheadAnalysis(lookahead_conf, strategy_obj) instance.current_analysis = analysis - _table, _headers, data = (LookaheadAnalysisSubFunctions. - text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + _table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance] + ) # check row contents for a try that has too few signals - assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py' - assert data[0][1] == 'strategy_test_v3_with_lookahead_bias' - assert data[0][2].__contains__('too few trades') + assert data[0][0] == "strategy_test_v3_with_lookahead_bias.py" + assert data[0][1] == "strategy_test_v3_with_lookahead_bias" + assert data[0][2].__contains__("too few trades") assert len(data[0]) == 3 # now check for an error which occurred after enough trades @@ -159,46 +163,51 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf analysis.false_exit_signals = 10 instance = LookaheadAnalysis(lookahead_conf, strategy_obj) instance.current_analysis = analysis - _table, _headers, data = (LookaheadAnalysisSubFunctions. - text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + _table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance] + ) assert data[0][2].__contains__("error") # edit it into not showing an error instance.failed_bias_check = False - _table, _headers, data = (LookaheadAnalysisSubFunctions. - text_table_lookahead_analysis_instances(lookahead_conf, [instance])) - assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py' - assert data[0][1] == 'strategy_test_v3_with_lookahead_bias' + _table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance] + ) + assert data[0][0] == "strategy_test_v3_with_lookahead_bias.py" + assert data[0][1] == "strategy_test_v3_with_lookahead_bias" assert data[0][2] # True assert data[0][3] == 12 assert data[0][4] == 11 assert data[0][5] == 10 - assert data[0][6] == '' + assert data[0][6] == "" - analysis.false_indicators.append('falseIndicator1') - analysis.false_indicators.append('falseIndicator2') - _table, _headers, data = (LookaheadAnalysisSubFunctions. - text_table_lookahead_analysis_instances(lookahead_conf, [instance])) + analysis.false_indicators.append("falseIndicator1") + analysis.false_indicators.append("falseIndicator2") + _table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance] + ) - assert data[0][6] == 'falseIndicator1, falseIndicator2' + assert data[0][6] == "falseIndicator1, falseIndicator2" # check amount of returning rows assert len(data) == 1 # check amount of multiple rows - _table, _headers, data = (LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( - lookahead_conf, [instance, instance, instance])) + _table, _headers, data = LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( + lookahead_conf, [instance, instance, instance] + ) assert len(data) == 3 def test_lookahead_helper_export_to_csv(lookahead_conf): import pandas as pd - lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + + lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv" # just to be sure the test won't fail: remove file if exists for some reason # (repeat this at the end once again to clean up) - if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists(): - Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink() + if Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists(): + Path(lookahead_conf["lookahead_analysis_exportfilename"]).unlink() # before we can start we have to delete the @@ -208,13 +217,13 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): analysis1.total_signals = 12 analysis1.false_entry_signals = 11 analysis1.false_exit_signals = 10 - analysis1.false_indicators.append('falseIndicator1') - analysis1.false_indicators.append('falseIndicator2') - lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + analysis1.false_indicators.append("falseIndicator1") + analysis1.false_indicators.append("falseIndicator2") + lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv" strategy_obj1 = { - 'name': "strat1", - 'location': Path("file1.py"), + "name": "strat1", + "location": Path("file1.py"), } instance1 = LookaheadAnalysis(lookahead_conf, strategy_obj1) @@ -222,30 +231,28 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): instance1.current_analysis = analysis1 LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance1]) - saved_data1 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + saved_data1 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"]) expected_values1 = [ - [ - 'file1.py', 'strat1', True, - 12, 11, 10, - "falseIndicator1,falseIndicator2" - ], + ["file1.py", "strat1", True, 12, 11, 10, "falseIndicator1,falseIndicator2"], + ] + expected_columns = [ + "filename", + "strategy", + "has_bias", + "total_signals", + "biased_entry_signals", + "biased_exit_signals", + "biased_indicators", ] - expected_columns = ['filename', 'strategy', 'has_bias', - 'total_signals', 'biased_entry_signals', 'biased_exit_signals', - 'biased_indicators'] expected_data1 = pd.DataFrame(expected_values1, columns=expected_columns) - assert Path(lookahead_conf['lookahead_analysis_exportfilename']).exists() + assert Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists() assert expected_data1.equals(saved_data1) # 2nd check: update the same strategy (which internally changed or is being retested) expected_values2 = [ - [ - 'file1.py', 'strat1', False, - 22, 21, 20, - "falseIndicator3,falseIndicator4" - ], + ["file1.py", "strat1", False, 22, 21, 20, "falseIndicator3,falseIndicator4"], ] expected_data2 = pd.DataFrame(expected_values2, columns=expected_columns) @@ -254,12 +261,12 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): analysis2.total_signals = 22 analysis2.false_entry_signals = 21 analysis2.false_exit_signals = 20 - analysis2.false_indicators.append('falseIndicator3') - analysis2.false_indicators.append('falseIndicator4') + analysis2.false_indicators.append("falseIndicator3") + analysis2.false_indicators.append("falseIndicator4") strategy_obj2 = { - 'name': "strat1", - 'location': Path("file1.py"), + "name": "strat1", + "location": Path("file1.py"), } instance2 = LookaheadAnalysis(lookahead_conf, strategy_obj2) @@ -267,21 +274,14 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): instance2.current_analysis = analysis2 LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance2]) - saved_data2 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + saved_data2 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"]) assert expected_data2.equals(saved_data2) # 3rd check: now we add a new row to an already existing file expected_values3 = [ - [ - 'file1.py', 'strat1', False, - 22, 21, 20, - "falseIndicator3,falseIndicator4" - ], - [ - 'file3.py', 'strat3', True, - 32, 31, 30, "falseIndicator5,falseIndicator6" - ], + ["file1.py", "strat1", False, 22, 21, 20, "falseIndicator3,falseIndicator4"], + ["file3.py", "strat3", True, 32, 31, 30, "falseIndicator5,falseIndicator6"], ] expected_data3 = pd.DataFrame(expected_values3, columns=expected_columns) @@ -291,13 +291,13 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): analysis3.total_signals = 32 analysis3.false_entry_signals = 31 analysis3.false_exit_signals = 30 - analysis3.false_indicators.append('falseIndicator5') - analysis3.false_indicators.append('falseIndicator6') - lookahead_conf['lookahead_analysis_exportfilename'] = "temp_csv_lookahead_analysis.csv" + analysis3.false_indicators.append("falseIndicator5") + analysis3.false_indicators.append("falseIndicator6") + lookahead_conf["lookahead_analysis_exportfilename"] = "temp_csv_lookahead_analysis.csv" strategy_obj3 = { - 'name': "strat3", - 'location': Path("file3.py"), + "name": "strat3", + "location": Path("file3.py"), } instance3 = LookaheadAnalysis(lookahead_conf, strategy_obj3) @@ -305,67 +305,66 @@ def test_lookahead_helper_export_to_csv(lookahead_conf): instance3.current_analysis = analysis3 LookaheadAnalysisSubFunctions.export_to_csv(lookahead_conf, [instance3]) - saved_data3 = pd.read_csv(lookahead_conf['lookahead_analysis_exportfilename']) + saved_data3 = pd.read_csv(lookahead_conf["lookahead_analysis_exportfilename"]) assert expected_data3.equals(saved_data3) # remove csv file after the test is done - if Path(lookahead_conf['lookahead_analysis_exportfilename']).exists(): - Path(lookahead_conf['lookahead_analysis_exportfilename']).unlink() + if Path(lookahead_conf["lookahead_analysis_exportfilename"]).exists(): + Path(lookahead_conf["lookahead_analysis_exportfilename"]).unlink() def test_initialize_single_lookahead_analysis(lookahead_conf, mocker, caplog): - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) - mocker.patch(f'{EXMS}.get_fee', return_value=0.0) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001) - mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf')) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) + mocker.patch(f"{EXMS}.get_fee", return_value=0.0) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) patch_exchange(mocker) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - lookahead_conf['pairs'] = ['UNITTEST/USDT'] + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + lookahead_conf["pairs"] = ["UNITTEST/USDT"] - lookahead_conf['timeframe'] = '5m' - lookahead_conf['timerange'] = '20180119-20180122' - start_mock = mocker.patch('freqtrade.optimize.analysis.lookahead.LookaheadAnalysis.start') + lookahead_conf["timeframe"] = "5m" + lookahead_conf["timerange"] = "20180119-20180122" + start_mock = mocker.patch("freqtrade.optimize.analysis.lookahead.LookaheadAnalysis.start") strategy_obj = { - 'name': "strategy_test_v3_with_lookahead_bias", - 'location': Path(lookahead_conf['strategy_path'], f"{lookahead_conf['strategy']}.py") + "name": "strategy_test_v3_with_lookahead_bias", + "location": Path(lookahead_conf["strategy_path"], f"{lookahead_conf['strategy']}.py"), } instance = LookaheadAnalysisSubFunctions.initialize_single_lookahead_analysis( - lookahead_conf, strategy_obj) + lookahead_conf, strategy_obj + ) assert log_has_re(r"Bias test of .* started\.", caplog) assert start_mock.call_count == 1 - assert instance.strategy_obj['name'] == "strategy_test_v3_with_lookahead_bias" + assert instance.strategy_obj["name"] == "strategy_test_v3_with_lookahead_bias" -@pytest.mark.parametrize('scenario', [ - 'no_bias', 'bias1' -]) +@pytest.mark.parametrize("scenario", ["no_bias", "bias1"]) def test_biased_strategy(lookahead_conf, mocker, caplog, scenario) -> None: - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) - mocker.patch(f'{EXMS}.get_fee', return_value=0.0) - mocker.patch(f'{EXMS}.get_min_pair_stake_amount', return_value=0.00001) - mocker.patch(f'{EXMS}.get_max_pair_stake_amount', return_value=float('inf')) patch_exchange(mocker) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - lookahead_conf['pairs'] = ['UNITTEST/USDT'] + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) + mocker.patch(f"{EXMS}.get_fee", return_value=0.0) + mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=0.00001) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + lookahead_conf["pairs"] = ["UNITTEST/USDT"] - lookahead_conf['timeframe'] = '5m' - lookahead_conf['timerange'] = '20180119-20180122' + lookahead_conf["timeframe"] = "5m" + lookahead_conf["timerange"] = "20180119-20180122" # Patch scenario Parameter to allow for easy selection - mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file', - return_value={ - 'params': { - "buy": { - "scenario": scenario - } - } - }) + mocker.patch( + "freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file", + return_value={"params": {"buy": {"scenario": scenario}}}, + ) - strategy_obj = {'name': "strategy_test_v3_with_lookahead_bias"} + strategy_obj = {"name": "strategy_test_v3_with_lookahead_bias"} instance = LookaheadAnalysis(lookahead_conf, strategy_obj) instance.start() # Assert init correct @@ -380,10 +379,10 @@ def test_biased_strategy(lookahead_conf, mocker, caplog, scenario) -> None: def test_config_overrides(lookahead_conf): - lookahead_conf['max_open_trades'] = 0 - lookahead_conf['dry_run_wallet'] = 1 - lookahead_conf['pairs'] = ['BTC/USDT', 'ETH/USDT', 'SOL/USDT'] + lookahead_conf["max_open_trades"] = 0 + lookahead_conf["dry_run_wallet"] = 1 + lookahead_conf["pairs"] = ["BTC/USDT", "ETH/USDT", "SOL/USDT"] lookahead_conf = LookaheadAnalysisSubFunctions.calculate_config_overrides(lookahead_conf) - assert lookahead_conf['dry_run_wallet'] == 1000000000 - assert lookahead_conf['max_open_trades'] == 3 + assert lookahead_conf["dry_run_wallet"] == 1000000000 + assert lookahead_conf["max_open_trades"] == 3 diff --git a/tests/optimize/test_optimize_reports.py b/tests/optimize/test_optimize_reports.py index f38fcb885..88e846d9d 100644 --- a/tests/optimize/test_optimize_reports.py +++ b/tests/optimize/test_optimize_reports.py @@ -10,21 +10,33 @@ import pytest from freqtrade.configuration import TimeRange from freqtrade.constants import BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN from freqtrade.data import history -from freqtrade.data.btanalysis import (get_latest_backtest_filename, load_backtest_data, - load_backtest_stats) +from freqtrade.data.btanalysis import ( + get_latest_backtest_filename, + load_backtest_data, + load_backtest_stats, +) from freqtrade.edge import PairInfo from freqtrade.enums import ExitType -from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_daily_stats, - generate_edge_table, generate_pair_metrics, - generate_periodic_breakdown_stats, - generate_strategy_comparison, - generate_trading_stats, show_sorted_pairlist, - store_backtest_analysis_results, - store_backtest_stats, text_table_bt_results, - text_table_strategy) +from freqtrade.optimize.optimize_reports import ( + generate_backtest_stats, + generate_daily_stats, + generate_edge_table, + generate_pair_metrics, + generate_periodic_breakdown_stats, + generate_strategy_comparison, + generate_trading_stats, + show_sorted_pairlist, + store_backtest_analysis_results, + store_backtest_stats, + text_table_bt_results, + text_table_strategy, +) from freqtrade.optimize.optimize_reports.bt_output import text_table_tags -from freqtrade.optimize.optimize_reports.optimize_reports import (_get_resample_from_period, - calc_streak, generate_tag_metrics) +from freqtrade.optimize.optimize_reports.optimize_reports import ( + _get_resample_from_period, + calc_streak, + generate_tag_metrics, +) from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.util import dt_ts from freqtrade.util.datetime_helpers import dt_from_ts, dt_utc @@ -39,7 +51,7 @@ def _backup_file(file: Path, copy_file: bool = False) -> None: :param copy_file: keep file in place too. :return: None """ - file_swp = str(file) + '.swp' + file_swp = str(file) + ".swp" if file.is_file(): file.rename(file_swp) @@ -48,149 +60,172 @@ def _backup_file(file: Path, copy_file: bool = False) -> None: def test_text_table_bt_results(): - results = pd.DataFrame( { - 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], - 'profit_ratio': [0.1, 0.2, -0.05], - 'profit_abs': [0.2, 0.4, -0.1], - 'trade_duration': [10, 30, 20], + "pair": ["ETH/BTC", "ETH/BTC", "ETH/BTC"], + "profit_ratio": [0.1, 0.2, -0.05], + "profit_abs": [0.2, 0.4, -0.1], + "trade_duration": [10, 30, 20], } ) result_str = ( - '| Pair | Entries | Avg Profit % | Tot Profit BTC | ' - 'Tot Profit % | Avg Duration | Win Draw Loss Win% |\n' - '|---------+-----------+----------------+------------------+' - '----------------+----------------+-------------------------|\n' - '| ETH/BTC | 3 | 8.33 | 0.50000000 | ' - '12.50 | 0:20:00 | 2 0 1 66.7 |\n' - '| TOTAL | 3 | 8.33 | 0.50000000 | ' - '12.50 | 0:20:00 | 2 0 1 66.7 |' + "| Pair | Entries | Avg Profit % | Tot Profit BTC | " + "Tot Profit % | Avg Duration | Win Draw Loss Win% |\n" + "|---------+-----------+----------------+------------------+" + "----------------+----------------+-------------------------|\n" + "| ETH/BTC | 3 | 8.33 | 0.50000000 | " + "12.50 | 0:20:00 | 2 0 1 66.7 |\n" + "| TOTAL | 3 | 8.33 | 0.50000000 | " + "12.50 | 0:20:00 | 2 0 1 66.7 |" ) - pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC', - starting_balance=4, results=results) - assert text_table_bt_results(pair_results, stake_currency='BTC') == result_str + pair_results = generate_pair_metrics( + ["ETH/BTC"], stake_currency="BTC", starting_balance=4, results=results + ) + assert text_table_bt_results(pair_results, stake_currency="BTC") == result_str def test_generate_backtest_stats(default_conf, testdatadir, tmp_path): - default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) + default_conf.update({"strategy": CURRENT_TEST_STRATEGY}) StrategyResolver.load_strategy(default_conf) - results = {'DefStrat': { - 'results': pd.DataFrame({"pair": ["UNITTEST/BTC", "UNITTEST/BTC", - "UNITTEST/BTC", "UNITTEST/BTC"], - "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], - "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], - "open_date": [dt_utc(2017, 11, 14, 19, 32, 00), - dt_utc(2017, 11, 14, 21, 36, 00), - dt_utc(2017, 11, 14, 22, 12, 00), - dt_utc(2017, 11, 14, 22, 44, 00)], - "close_date": [dt_utc(2017, 11, 14, 21, 35, 00), - dt_utc(2017, 11, 14, 22, 10, 00), - dt_utc(2017, 11, 14, 22, 43, 00), - dt_utc(2017, 11, 14, 22, 58, 00)], - "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], - "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], - "trade_duration": [123, 34, 31, 14], - "is_open": [False, False, False, True], - "is_short": [False, False, False, False], - "stake_amount": [0.01, 0.01, 0.01, 0.01], - "exit_reason": [ExitType.ROI, ExitType.STOP_LOSS, - ExitType.ROI, ExitType.FORCE_EXIT] - }), - 'config': default_conf, - 'locks': [], - 'final_balance': 1000.02, - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'backtest_start_time': dt_ts() // 1000, - 'backtest_end_time': dt_ts() // 1000, - 'run_id': '123', + results = { + "DefStrat": { + "results": pd.DataFrame( + { + "pair": ["UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC"], + "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], + "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], + "open_date": [ + dt_utc(2017, 11, 14, 19, 32, 00), + dt_utc(2017, 11, 14, 21, 36, 00), + dt_utc(2017, 11, 14, 22, 12, 00), + dt_utc(2017, 11, 14, 22, 44, 00), + ], + "close_date": [ + dt_utc(2017, 11, 14, 21, 35, 00), + dt_utc(2017, 11, 14, 22, 10, 00), + dt_utc(2017, 11, 14, 22, 43, 00), + dt_utc(2017, 11, 14, 22, 58, 00), + ], + "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], + "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], + "trade_duration": [123, 34, 31, 14], + "is_open": [False, False, False, True], + "is_short": [False, False, False, False], + "stake_amount": [0.01, 0.01, 0.01, 0.01], + "exit_reason": [ + ExitType.ROI, + ExitType.STOP_LOSS, + ExitType.ROI, + ExitType.FORCE_EXIT, + ], + } + ), + "config": default_conf, + "locks": [], + "final_balance": 1000.02, + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "backtest_start_time": dt_ts() // 1000, + "backtest_end_time": dt_ts() // 1000, + "run_id": "123", } - } - timerange = TimeRange.parse_timerange('1510688220-1510700340') + } + timerange = TimeRange.parse_timerange("1510688220-1510700340") min_date = dt_from_ts(1510688220) max_date = dt_from_ts(1510700340) - btdata = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, - fill_up_missing=True) + btdata = history.load_data( + testdatadir, "1m", ["UNITTEST/BTC"], timerange=timerange, fill_up_missing=True + ) stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) - assert 'strategy' in stats - assert 'DefStrat' in stats['strategy'] - assert 'strategy_comparison' in stats - strat_stats = stats['strategy']['DefStrat'] - assert strat_stats['backtest_start'] == min_date.strftime(DATETIME_PRINT_FORMAT) - assert strat_stats['backtest_end'] == max_date.strftime(DATETIME_PRINT_FORMAT) - assert strat_stats['total_trades'] == len(results['DefStrat']['results']) + assert "strategy" in stats + assert "DefStrat" in stats["strategy"] + assert "strategy_comparison" in stats + strat_stats = stats["strategy"]["DefStrat"] + assert strat_stats["backtest_start"] == min_date.strftime(DATETIME_PRINT_FORMAT) + assert strat_stats["backtest_end"] == max_date.strftime(DATETIME_PRINT_FORMAT) + assert strat_stats["total_trades"] == len(results["DefStrat"]["results"]) # Above sample had no losing trade - assert strat_stats['max_drawdown_account'] == 0.0 + assert strat_stats["max_drawdown_account"] == 0.0 # Retry with losing trade - results = {'DefStrat': { - 'results': pd.DataFrame( - {"pair": ["UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC"], - "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], - "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], - "open_date": [dt_utc(2017, 11, 14, 19, 32, 00), - dt_utc(2017, 11, 14, 21, 36, 00), - dt_utc(2017, 11, 14, 22, 12, 00), - dt_utc(2017, 11, 14, 22, 44, 00)], - "close_date": [dt_utc(2017, 11, 14, 21, 35, 00), - dt_utc(2017, 11, 14, 22, 10, 00), - dt_utc(2017, 11, 14, 22, 43, 00), - dt_utc(2017, 11, 14, 22, 58, 00)], - "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], - "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], - "trade_duration": [123, 34, 31, 14], - "is_open": [False, False, False, True], - "is_short": [False, False, False, False], - "stake_amount": [0.01, 0.01, 0.01, 0.01], - "exit_reason": [ExitType.ROI, ExitType.ROI, - ExitType.STOP_LOSS, ExitType.FORCE_EXIT] - }), - 'config': default_conf, - 'locks': [], - 'final_balance': 1000.02, - 'rejected_signals': 20, - 'timedout_entry_orders': 0, - 'timedout_exit_orders': 0, - 'canceled_trade_entries': 0, - 'canceled_entry_orders': 0, - 'replaced_entry_orders': 0, - 'backtest_start_time': dt_ts() // 1000, - 'backtest_end_time': dt_ts() // 1000, - 'run_id': '124', + results = { + "DefStrat": { + "results": pd.DataFrame( + { + "pair": ["UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC"], + "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], + "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], + "open_date": [ + dt_utc(2017, 11, 14, 19, 32, 00), + dt_utc(2017, 11, 14, 21, 36, 00), + dt_utc(2017, 11, 14, 22, 12, 00), + dt_utc(2017, 11, 14, 22, 44, 00), + ], + "close_date": [ + dt_utc(2017, 11, 14, 21, 35, 00), + dt_utc(2017, 11, 14, 22, 10, 00), + dt_utc(2017, 11, 14, 22, 43, 00), + dt_utc(2017, 11, 14, 22, 58, 00), + ], + "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], + "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], + "trade_duration": [123, 34, 31, 14], + "is_open": [False, False, False, True], + "is_short": [False, False, False, False], + "stake_amount": [0.01, 0.01, 0.01, 0.01], + "exit_reason": [ + ExitType.ROI, + ExitType.ROI, + ExitType.STOP_LOSS, + ExitType.FORCE_EXIT, + ], + } + ), + "config": default_conf, + "locks": [], + "final_balance": 1000.02, + "rejected_signals": 20, + "timedout_entry_orders": 0, + "timedout_exit_orders": 0, + "canceled_trade_entries": 0, + "canceled_entry_orders": 0, + "replaced_entry_orders": 0, + "backtest_start_time": dt_ts() // 1000, + "backtest_end_time": dt_ts() // 1000, + "run_id": "124", } } stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) - assert 'strategy' in stats - assert 'DefStrat' in stats['strategy'] - assert 'strategy_comparison' in stats - strat_stats = stats['strategy']['DefStrat'] + assert "strategy" in stats + assert "DefStrat" in stats["strategy"] + assert "strategy_comparison" in stats + strat_stats = stats["strategy"]["DefStrat"] - assert pytest.approx(strat_stats['max_drawdown_account']) == 1.399999e-08 - assert strat_stats['drawdown_start'] == '2017-11-14 22:10:00' - assert strat_stats['drawdown_end'] == '2017-11-14 22:43:00' - assert strat_stats['drawdown_end_ts'] == 1510699380000 - assert strat_stats['drawdown_start_ts'] == 1510697400000 - assert strat_stats['pairlist'] == ['UNITTEST/BTC'] + assert pytest.approx(strat_stats["max_drawdown_account"]) == 1.399999e-08 + assert strat_stats["drawdown_start"] == "2017-11-14 22:10:00" + assert strat_stats["drawdown_end"] == "2017-11-14 22:43:00" + assert strat_stats["drawdown_end_ts"] == 1510699380000 + assert strat_stats["drawdown_start_ts"] == 1510697400000 + assert strat_stats["pairlist"] == ["UNITTEST/BTC"] # Test storing stats - filename = tmp_path / 'btresult.json' + filename = tmp_path / "btresult.json" filename_last = tmp_path / LAST_BT_RESULT_FN _backup_file(filename_last, copy_file=True) assert not filename.is_file() - store_backtest_stats(filename, stats, '2022_01_01_15_05_13') + store_backtest_stats(filename, stats, "2022_01_01_15_05_13") # get real Filename (it's btresult-.json) last_fn = get_latest_backtest_filename(filename_last.parent) @@ -199,9 +234,9 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmp_path): filename1 = tmp_path / last_fn assert filename1.is_file() content = filename1.read_text() - assert 'max_drawdown_account' in content - assert 'strategy' in content - assert 'pairlist' in content + assert "max_drawdown_account" in content + assert "strategy" in content + assert "pairlist" in content assert filename_last.is_file() @@ -210,149 +245,146 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmp_path): def test_store_backtest_stats(testdatadir, mocker): + dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_json") - dump_mock = mocker.patch('freqtrade.optimize.optimize_reports.bt_storage.file_dump_json') - - data = {'metadata': {}, 'strategy': {}, 'strategy_comparison': []} - store_backtest_stats(testdatadir, data, '2022_01_01_15_05_13') + data = {"metadata": {}, "strategy": {}, "strategy_comparison": []} + store_backtest_stats(testdatadir, data, "2022_01_01_15_05_13") assert dump_mock.call_count == 3 assert isinstance(dump_mock.call_args_list[0][0][0], Path) - assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / 'backtest-result')) + assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "backtest-result")) dump_mock.reset_mock() - filename = testdatadir / 'testresult.json' - store_backtest_stats(filename, data, '2022_01_01_15_05_13') + filename = testdatadir / "testresult.json" + store_backtest_stats(filename, data, "2022_01_01_15_05_13") assert dump_mock.call_count == 3 assert isinstance(dump_mock.call_args_list[0][0][0], Path) # result will be testdatadir / testresult-.json - assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / 'testresult')) + assert str(dump_mock.call_args_list[0][0][0]).startswith(str(testdatadir / "testresult")) def test_store_backtest_stats_real(tmp_path): - data = {'metadata': {}, 'strategy': {}, 'strategy_comparison': []} - store_backtest_stats(tmp_path, data, '2022_01_01_15_05_13') + data = {"metadata": {}, "strategy": {}, "strategy_comparison": []} + store_backtest_stats(tmp_path, data, "2022_01_01_15_05_13") - assert (tmp_path / 'backtest-result-2022_01_01_15_05_13.json').is_file() - assert (tmp_path / 'backtest-result-2022_01_01_15_05_13.meta.json').is_file() - assert not (tmp_path / 'backtest-result-2022_01_01_15_05_13_market_change.feather').is_file() + assert (tmp_path / "backtest-result-2022_01_01_15_05_13.json").is_file() + assert (tmp_path / "backtest-result-2022_01_01_15_05_13.meta.json").is_file() + assert not (tmp_path / "backtest-result-2022_01_01_15_05_13_market_change.feather").is_file() assert (tmp_path / LAST_BT_RESULT_FN).is_file() fn = get_latest_backtest_filename(tmp_path) - assert fn == 'backtest-result-2022_01_01_15_05_13.json' + assert fn == "backtest-result-2022_01_01_15_05_13.json" - store_backtest_stats(tmp_path, data, '2024_01_01_15_05_25', market_change_data=pd.DataFrame()) - assert (tmp_path / 'backtest-result-2024_01_01_15_05_25.json').is_file() - assert (tmp_path / 'backtest-result-2024_01_01_15_05_25.meta.json').is_file() - assert (tmp_path / 'backtest-result-2024_01_01_15_05_25_market_change.feather').is_file() + store_backtest_stats(tmp_path, data, "2024_01_01_15_05_25", market_change_data=pd.DataFrame()) + assert (tmp_path / "backtest-result-2024_01_01_15_05_25.json").is_file() + assert (tmp_path / "backtest-result-2024_01_01_15_05_25.meta.json").is_file() + assert (tmp_path / "backtest-result-2024_01_01_15_05_25_market_change.feather").is_file() assert (tmp_path / LAST_BT_RESULT_FN).is_file() # Last file reference should be updated fn = get_latest_backtest_filename(tmp_path) - assert fn == 'backtest-result-2024_01_01_15_05_25.json' + assert fn == "backtest-result-2024_01_01_15_05_25.json" def test_store_backtest_candles(testdatadir, mocker): + dump_mock = mocker.patch("freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib") - dump_mock = mocker.patch( - 'freqtrade.optimize.optimize_reports.bt_storage.file_dump_joblib') - - candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} + candle_dict = {"DefStrat": {"UNITTEST/BTC": pd.DataFrame()}} # mock directory exporting - store_backtest_analysis_results(testdatadir, candle_dict, {}, '2022_01_01_15_05_13') + store_backtest_analysis_results(testdatadir, candle_dict, {}, "2022_01_01_15_05_13") assert dump_mock.call_count == 2 assert isinstance(dump_mock.call_args_list[0][0][0], Path) - assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl') + assert str(dump_mock.call_args_list[0][0][0]).endswith("_signals.pkl") dump_mock.reset_mock() # mock file exporting - filename = Path(testdatadir / 'testresult') - store_backtest_analysis_results(filename, candle_dict, {}, '2022_01_01_15_05_13') + filename = Path(testdatadir / "testresult") + store_backtest_analysis_results(filename, candle_dict, {}, "2022_01_01_15_05_13") assert dump_mock.call_count == 2 assert isinstance(dump_mock.call_args_list[0][0][0], Path) # result will be testdatadir / testresult-_signals.pkl - assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl') + assert str(dump_mock.call_args_list[0][0][0]).endswith("_signals.pkl") dump_mock.reset_mock() def test_write_read_backtest_candles(tmp_path): - - candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} + candle_dict = {"DefStrat": {"UNITTEST/BTC": pd.DataFrame()}} # test directory exporting - sample_date = '2022_01_01_15_05_13' + sample_date = "2022_01_01_15_05_13" store_backtest_analysis_results(tmp_path, candle_dict, {}, sample_date) - stored_file = tmp_path / f'backtest-result-{sample_date}_signals.pkl' + stored_file = tmp_path / f"backtest-result-{sample_date}_signals.pkl" with stored_file.open("rb") as scp: pickled_signal_candles = joblib.load(scp) assert pickled_signal_candles.keys() == candle_dict.keys() - assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys() - assert pickled_signal_candles['DefStrat']['UNITTEST/BTC'] \ - .equals(pickled_signal_candles['DefStrat']['UNITTEST/BTC']) + assert pickled_signal_candles["DefStrat"].keys() == pickled_signal_candles["DefStrat"].keys() + assert pickled_signal_candles["DefStrat"]["UNITTEST/BTC"].equals( + pickled_signal_candles["DefStrat"]["UNITTEST/BTC"] + ) _clean_test_file(stored_file) # test file exporting - filename = tmp_path / 'testresult' + filename = tmp_path / "testresult" store_backtest_analysis_results(filename, candle_dict, {}, sample_date) - stored_file = tmp_path / f'testresult-{sample_date}_signals.pkl' + stored_file = tmp_path / f"testresult-{sample_date}_signals.pkl" with stored_file.open("rb") as scp: pickled_signal_candles = joblib.load(scp) assert pickled_signal_candles.keys() == candle_dict.keys() - assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys() - assert pickled_signal_candles['DefStrat']['UNITTEST/BTC'] \ - .equals(pickled_signal_candles['DefStrat']['UNITTEST/BTC']) + assert pickled_signal_candles["DefStrat"].keys() == pickled_signal_candles["DefStrat"].keys() + assert pickled_signal_candles["DefStrat"]["UNITTEST/BTC"].equals( + pickled_signal_candles["DefStrat"]["UNITTEST/BTC"] + ) _clean_test_file(stored_file) def test_generate_pair_metrics(): - results = pd.DataFrame( { - 'pair': ['ETH/BTC', 'ETH/BTC'], - 'profit_ratio': [0.1, 0.2], - 'profit_abs': [0.2, 0.4], - 'trade_duration': [10, 30], - 'wins': [2, 0], - 'draws': [0, 0], - 'losses': [0, 0] + "pair": ["ETH/BTC", "ETH/BTC"], + "profit_ratio": [0.1, 0.2], + "profit_abs": [0.2, 0.4], + "trade_duration": [10, 30], + "wins": [2, 0], + "draws": [0, 0], + "losses": [0, 0], } ) - pair_results = generate_pair_metrics(['ETH/BTC'], stake_currency='BTC', - starting_balance=2, results=results) + pair_results = generate_pair_metrics( + ["ETH/BTC"], stake_currency="BTC", starting_balance=2, results=results + ) assert isinstance(pair_results, list) assert len(pair_results) == 2 - assert pair_results[-1]['key'] == 'TOTAL' + assert pair_results[-1]["key"] == "TOTAL" assert ( - pytest.approx(pair_results[-1]['profit_mean_pct']) == pair_results[-1]['profit_mean'] * 100) - assert ( - pytest.approx(pair_results[-1]['profit_sum_pct']) == pair_results[-1]['profit_sum'] * 100) + pytest.approx(pair_results[-1]["profit_mean_pct"]) == pair_results[-1]["profit_mean"] * 100 + ) + assert pytest.approx(pair_results[-1]["profit_sum_pct"]) == pair_results[-1]["profit_sum"] * 100 def test_generate_daily_stats(testdatadir): - filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_data(filename) res = generate_daily_stats(bt_data) assert isinstance(res, dict) - assert round(res['backtest_best_day'], 4) == 0.1796 - assert round(res['backtest_worst_day'], 4) == -0.1468 - assert res['winning_days'] == 19 - assert res['draw_days'] == 0 - assert res['losing_days'] == 2 + assert round(res["backtest_best_day"], 4) == 0.1796 + assert round(res["backtest_worst_day"], 4) == -0.1468 + assert res["winning_days"] == 19 + assert res["draw_days"] == 0 + assert res["losing_days"] == 2 # Select empty dataframe! - res = generate_daily_stats(bt_data.loc[bt_data['open_date'] == '2000-01-01', :]) + res = generate_daily_stats(bt_data.loc[bt_data["open_date"] == "2000-01-01", :]) assert isinstance(res, dict) - assert round(res['backtest_best_day'], 4) == 0.0 - assert res['winning_days'] == 0 - assert res['draw_days'] == 0 - assert res['losing_days'] == 0 + assert round(res["backtest_best_day"], 4) == 0.0 + assert res["winning_days"] == 0 + assert res["draw_days"] == 0 + assert res["losing_days"] == 0 def test_generate_trading_stats(testdatadir): @@ -360,22 +392,24 @@ def test_generate_trading_stats(testdatadir): bt_data = load_backtest_data(filename) res = generate_trading_stats(bt_data) assert isinstance(res, dict) - assert res['winner_holding_avg'] == timedelta(seconds=1440) - assert res['loser_holding_avg'] == timedelta(days=1, seconds=21420) - assert 'wins' in res - assert 'losses' in res - assert 'draws' in res + assert res["winner_holding_avg"] == timedelta(seconds=1440) + assert res["loser_holding_avg"] == timedelta(days=1, seconds=21420) + assert "wins" in res + assert "losses" in res + assert "draws" in res # Select empty dataframe! - res = generate_trading_stats(bt_data.loc[bt_data['open_date'] == '2000-01-01', :]) - assert res['wins'] == 0 - assert res['losses'] == 0 + res = generate_trading_stats(bt_data.loc[bt_data["open_date"] == "2000-01-01", :]) + assert res["wins"] == 0 + assert res["losses"] == 0 def test_calc_streak(testdatadir): - df = pd.DataFrame({ - 'profit_ratio': [0.05, -0.02, -0.03, -0.05, 0.01, 0.02, 0.03, 0.04, -0.02, -0.03], - }) + df = pd.DataFrame( + { + "profit_ratio": [0.05, -0.02, -0.03, -0.05, 0.01, 0.02, 0.03, 0.04, -0.02, -0.03], + } + ) # 4 consecutive wins, 3 consecutive losses res = calc_streak(df) assert res == (4, 3) @@ -384,12 +418,14 @@ def test_calc_streak(testdatadir): # invert situation df1 = df.copy() - df1['profit_ratio'] = df1['profit_ratio'] * -1 + df1["profit_ratio"] = df1["profit_ratio"] * -1 assert calc_streak(df1) == (3, 4) - df_empty = pd.DataFrame({ - 'profit_ratio': [], - }) + df_empty = pd.DataFrame( + { + "profit_ratio": [], + } + ) assert df_empty.empty assert calc_streak(df_empty) == (0, 0) @@ -399,131 +435,133 @@ def test_calc_streak(testdatadir): def test_text_table_exit_reason(): - results = pd.DataFrame( { - 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], - 'profit_ratio': [0.1, 0.2, -0.1], - 'profit_abs': [0.2, 0.4, -0.2], - 'trade_duration': [10, 30, 10], - 'wins': [2, 0, 0], - 'draws': [0, 0, 0], - 'losses': [0, 0, 1], - 'exit_reason': [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS] + "pair": ["ETH/BTC", "ETH/BTC", "ETH/BTC"], + "profit_ratio": [0.1, 0.2, -0.1], + "profit_abs": [0.2, 0.4, -0.2], + "trade_duration": [10, 30, 10], + "wins": [2, 0, 0], + "draws": [0, 0, 0], + "losses": [0, 0, 1], + "exit_reason": [ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS], } ) result_str = ( - '| Exit Reason | Exits | Avg Profit % | Tot Profit BTC | Tot Profit % |' - ' Avg Duration | Win Draw Loss Win% |\n' - '|---------------+---------+----------------+------------------+----------------+' - '----------------+-------------------------|\n' - '| roi | 2 | 15.00 | 0.60000000 | 2.73 |' - ' 0:20:00 | 2 0 0 100 |\n' - '| stop_loss | 1 | -10.00 | -0.20000000 | -0.91 |' - ' 0:10:00 | 0 0 1 0 |\n' - '| TOTAL | 3 | 6.67 | 0.40000000 | 1.82 |' - ' 0:17:00 | 2 0 1 66.7 |' + "| Exit Reason | Exits | Avg Profit % | Tot Profit BTC | Tot Profit % |" + " Avg Duration | Win Draw Loss Win% |\n" + "|---------------+---------+----------------+------------------+----------------+" + "----------------+-------------------------|\n" + "| roi | 2 | 15.00 | 0.60000000 | 2.73 |" + " 0:20:00 | 2 0 0 100 |\n" + "| stop_loss | 1 | -10.00 | -0.20000000 | -0.91 |" + " 0:10:00 | 0 0 1 0 |\n" + "| TOTAL | 3 | 6.67 | 0.40000000 | 1.82 |" + " 0:17:00 | 2 0 1 66.7 |" ) - exit_reason_stats = generate_tag_metrics('exit_reason', starting_balance=22, - results=results, skip_nan=False) - assert text_table_tags('exit_tag', exit_reason_stats, 'BTC') == result_str + exit_reason_stats = generate_tag_metrics( + "exit_reason", starting_balance=22, results=results, skip_nan=False + ) + assert text_table_tags("exit_tag", exit_reason_stats, "BTC") == result_str def test_generate_sell_reason_stats(): - results = pd.DataFrame( { - 'pair': ['ETH/BTC', 'ETH/BTC', 'ETH/BTC'], - 'profit_ratio': [0.1, 0.2, -0.1], - 'profit_abs': [0.2, 0.4, -0.2], - 'trade_duration': [10, 30, 10], - 'wins': [2, 0, 0], - 'draws': [0, 0, 0], - 'losses': [0, 0, 1], - 'exit_reason': [ExitType.ROI.value, ExitType.ROI.value, ExitType.STOP_LOSS.value] + "pair": ["ETH/BTC", "ETH/BTC", "ETH/BTC"], + "profit_ratio": [0.1, 0.2, -0.1], + "profit_abs": [0.2, 0.4, -0.2], + "trade_duration": [10, 30, 10], + "wins": [2, 0, 0], + "draws": [0, 0, 0], + "losses": [0, 0, 1], + "exit_reason": [ExitType.ROI.value, ExitType.ROI.value, ExitType.STOP_LOSS.value], } ) - exit_reason_stats = generate_tag_metrics('exit_reason', starting_balance=22, - results=results, skip_nan=False) + exit_reason_stats = generate_tag_metrics( + "exit_reason", starting_balance=22, results=results, skip_nan=False + ) roi_result = exit_reason_stats[0] - assert roi_result['key'] == 'roi' - assert roi_result['trades'] == 2 - assert pytest.approx(roi_result['profit_mean']) == 0.15 - assert roi_result['profit_mean_pct'] == round(roi_result['profit_mean'] * 100, 2) - assert pytest.approx(roi_result['profit_mean']) == 0.15 - assert roi_result['profit_mean_pct'] == round(roi_result['profit_mean'] * 100, 2) + assert roi_result["key"] == "roi" + assert roi_result["trades"] == 2 + assert pytest.approx(roi_result["profit_mean"]) == 0.15 + assert roi_result["profit_mean_pct"] == round(roi_result["profit_mean"] * 100, 2) + assert pytest.approx(roi_result["profit_mean"]) == 0.15 + assert roi_result["profit_mean_pct"] == round(roi_result["profit_mean"] * 100, 2) stop_result = exit_reason_stats[1] - assert stop_result['key'] == 'stop_loss' - assert stop_result['trades'] == 1 - assert pytest.approx(stop_result['profit_mean']) == -0.1 - assert stop_result['profit_mean_pct'] == round(stop_result['profit_mean'] * 100, 2) - assert pytest.approx(stop_result['profit_mean']) == -0.1 - assert stop_result['profit_mean_pct'] == round(stop_result['profit_mean'] * 100, 2) + assert stop_result["key"] == "stop_loss" + assert stop_result["trades"] == 1 + assert pytest.approx(stop_result["profit_mean"]) == -0.1 + assert stop_result["profit_mean_pct"] == round(stop_result["profit_mean"] * 100, 2) + assert pytest.approx(stop_result["profit_mean"]) == -0.1 + assert stop_result["profit_mean_pct"] == round(stop_result["profit_mean"] * 100, 2) def test_text_table_strategy(testdatadir): filename = testdatadir / "backtest_results/backtest-result_multistrat.json" bt_res_data = load_backtest_stats(filename) - bt_res_data_comparison = bt_res_data.pop('strategy_comparison') + bt_res_data_comparison = bt_res_data.pop("strategy_comparison") result_str = ( - '| Strategy | Entries | Avg Profit % | Tot Profit BTC |' - ' Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n' - '|----------------+-----------+----------------+------------------+' - '----------------+----------------+-------------------------+-----------------------|\n' - '| StrategyTestV2 | 179 | 0.08 | 0.02608550 |' - ' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n' - '| TestStrategy | 179 | 0.08 | 0.02608550 |' - ' 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |' + "| Strategy | Entries | Avg Profit % | Tot Profit BTC |" + " Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |\n" + "|----------------+-----------+----------------+------------------+" + "----------------+----------------+-------------------------+-----------------------|\n" + "| StrategyTestV2 | 179 | 0.08 | 0.02608550 |" + " 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |\n" + "| TestStrategy | 179 | 0.08 | 0.02608550 |" + " 260.85 | 3:40:00 | 170 0 9 95.0 | 0.00308222 BTC 8.67% |" ) - strategy_results = generate_strategy_comparison(bt_stats=bt_res_data['strategy']) + strategy_results = generate_strategy_comparison(bt_stats=bt_res_data["strategy"]) assert strategy_results == bt_res_data_comparison - assert text_table_strategy(strategy_results, 'BTC') == result_str + assert text_table_strategy(strategy_results, "BTC") == result_str def test_generate_edge_table(): - results = {} - results['ETH/BTC'] = PairInfo(-0.01, 0.60, 2, 1, 3, 10, 60) - assert generate_edge_table(results).count('+') == 7 - assert generate_edge_table(results).count('| ETH/BTC |') == 1 - assert generate_edge_table(results).count( - '| Risk Reward Ratio | Required Risk Reward | Expectancy |') == 1 + results["ETH/BTC"] = PairInfo(-0.01, 0.60, 2, 1, 3, 10, 60) + assert generate_edge_table(results).count("+") == 7 + assert generate_edge_table(results).count("| ETH/BTC |") == 1 + assert ( + generate_edge_table(results).count( + "| Risk Reward Ratio | Required Risk Reward | Expectancy |" + ) + == 1 + ) def test_generate_periodic_breakdown_stats(testdatadir): filename = testdatadir / "backtest_results/backtest-result.json" - bt_data = load_backtest_data(filename).to_dict(orient='records') + bt_data = load_backtest_data(filename).to_dict(orient="records") - res = generate_periodic_breakdown_stats(bt_data, 'day') + res = generate_periodic_breakdown_stats(bt_data, "day") assert isinstance(res, list) assert len(res) == 21 day = res[0] - assert 'date' in day - assert 'draws' in day - assert 'loses' in day - assert 'wins' in day - assert 'profit_abs' in day + assert "date" in day + assert "draws" in day + assert "loses" in day + assert "wins" in day + assert "profit_abs" in day # Select empty dataframe! - res = generate_periodic_breakdown_stats([], 'day') + res = generate_periodic_breakdown_stats([], "day") assert res == [] def test__get_resample_from_period(): - - assert _get_resample_from_period('day') == '1d' - assert _get_resample_from_period('week') == '1W-MON' - assert _get_resample_from_period('month') == '1ME' + assert _get_resample_from_period("day") == "1d" + assert _get_resample_from_period("week") == "1W-MON" + assert _get_resample_from_period("month") == "1ME" with pytest.raises(ValueError, match=r"Period noooo is not supported."): - _get_resample_from_period('noooo') + _get_resample_from_period("noooo") for period in BACKTEST_BREAKDOWNS: assert isinstance(_get_resample_from_period(period), str) @@ -532,11 +570,11 @@ def test__get_resample_from_period(): def test_show_sorted_pairlist(testdatadir, default_conf, capsys): filename = testdatadir / "backtest_results/backtest-result.json" bt_data = load_backtest_stats(filename) - default_conf['backtest_show_pair_list'] = True + default_conf["backtest_show_pair_list"] = True show_sorted_pairlist(default_conf, bt_data) out, _err = capsys.readouterr() - assert 'Pairs for Strategy StrategyTestV3: \n[' in out - assert 'TOTAL' not in out + assert "Pairs for Strategy StrategyTestV3: \n[" in out + assert "TOTAL" not in out assert '"ETH/BTC", // ' in out diff --git a/tests/optimize/test_recursive_analysis.py b/tests/optimize/test_recursive_analysis.py index 95a96c9f5..2969b4153 100644 --- a/tests/optimize/test_recursive_analysis.py +++ b/tests/optimize/test_recursive_analysis.py @@ -10,19 +10,18 @@ from freqtrade.data.history import get_timerange from freqtrade.exceptions import OperationalException from freqtrade.optimize.analysis.recursive import RecursiveAnalysis from freqtrade.optimize.analysis.recursive_helpers import RecursiveAnalysisSubFunctions -from tests.conftest import get_args, log_has_re, patch_exchange +from tests.conftest import EXMS, get_args, log_has_re, patch_exchange @pytest.fixture def recursive_conf(default_conf_usdt, tmp_path): - default_conf_usdt['user_data_dir'] = tmp_path - default_conf_usdt['timerange'] = '20220101-20220501' + default_conf_usdt["user_data_dir"] = tmp_path + default_conf_usdt["timerange"] = "20220101-20220501" - default_conf_usdt['strategy_path'] = str( - Path(__file__).parent.parent / "strategy/strats") - default_conf_usdt['strategy'] = 'strategy_test_v3_recursive_issue' - default_conf_usdt['pairs'] = ['UNITTEST/USDT'] - default_conf_usdt['startup_candle'] = [100] + default_conf_usdt["strategy_path"] = str(Path(__file__).parent.parent / "strategy/strats") + default_conf_usdt["strategy"] = "strategy_test_v3_recursive_issue" + default_conf_usdt["pairs"] = ["UNITTEST/USDT"] + default_conf_usdt["startup_candle"] = [100] return default_conf_usdt @@ -30,7 +29,7 @@ def test_start_recursive_analysis(mocker): single_mock = MagicMock() text_table_mock = MagicMock() mocker.patch.multiple( - 'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions', + "freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions", initialize_single_recursive_analysis=single_mock, text_table_recursive_analysis_instances=text_table_mock, ) @@ -43,10 +42,10 @@ def test_start_recursive_analysis(mocker): "--pairs", "UNITTEST/BTC", "--timerange", - "20220101-20220201" + "20220101-20220201", ] pargs = get_args(args) - pargs['config'] = None + pargs["config"] = None start_recursive_analysis(pargs) assert single_mock.call_count == 1 @@ -62,21 +61,19 @@ def test_start_recursive_analysis(mocker): "--strategy-path", str(Path(__file__).parent.parent / "strategy/strats"), "--pairs", - "UNITTEST/BTC" + "UNITTEST/BTC", ] pargs = get_args(args) - pargs['config'] = None - with pytest.raises(OperationalException, - match=r"Please set a timerange\..*"): + pargs["config"] = None + with pytest.raises(OperationalException, match=r"Please set a timerange\..*"): start_recursive_analysis(pargs) def test_recursive_helper_no_strategy_defined(recursive_conf): conf = deepcopy(recursive_conf) - conf['pairs'] = ['UNITTEST/USDT'] - del conf['strategy'] - with pytest.raises(OperationalException, - match=r"No Strategy specified"): + conf["pairs"] = ["UNITTEST/USDT"] + del conf["strategy"] + with pytest.raises(OperationalException, match=r"No Strategy specified"): RecursiveAnalysisSubFunctions.start(conf) @@ -84,7 +81,7 @@ def test_recursive_helper_start(recursive_conf, mocker) -> None: single_mock = MagicMock() text_table_mock = MagicMock() mocker.patch.multiple( - 'freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions', + "freqtrade.optimize.analysis.recursive_helpers.RecursiveAnalysisSubFunctions", initialize_single_recursive_analysis=single_mock, text_table_recursive_analysis_instances=text_table_mock, ) @@ -98,81 +95,83 @@ def test_recursive_helper_start(recursive_conf, mocker) -> None: def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf): dict_diff = dict() - dict_diff['rsi'] = {} - dict_diff['rsi'][100] = "0.078%" + dict_diff["rsi"] = {} + dict_diff["rsi"][100] = "0.078%" strategy_obj = { - 'name': "strategy_test_v3_recursive_issue", - 'location': Path(recursive_conf['strategy_path'], f"{recursive_conf['strategy']}.py") + "name": "strategy_test_v3_recursive_issue", + "location": Path(recursive_conf["strategy_path"], f"{recursive_conf['strategy']}.py"), } instance = RecursiveAnalysis(recursive_conf, strategy_obj) instance.dict_recursive = dict_diff - _table, _headers, data = (RecursiveAnalysisSubFunctions. - text_table_recursive_analysis_instances([instance])) + _table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances( + [instance] + ) # check row contents for a try that has too few signals - assert data[0][0] == 'rsi' - assert data[0][1] == '0.078%' + assert data[0][0] == "rsi" + assert data[0][1] == "0.078%" assert len(data[0]) == 2 # now check when there is no issue dict_diff = dict() instance = RecursiveAnalysis(recursive_conf, strategy_obj) instance.dict_recursive = dict_diff - _table, _headers, data = (RecursiveAnalysisSubFunctions. - text_table_recursive_analysis_instances([instance])) + _table, _headers, data = RecursiveAnalysisSubFunctions.text_table_recursive_analysis_instances( + [instance] + ) assert len(data) == 0 def test_initialize_single_recursive_analysis(recursive_conf, mocker, caplog): - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - recursive_conf['pairs'] = ['UNITTEST/BTC'] + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + recursive_conf["pairs"] = ["UNITTEST/BTC"] - recursive_conf['timeframe'] = '5m' - recursive_conf['timerange'] = '20180119-20180122' - start_mock = mocker.patch('freqtrade.optimize.analysis.recursive.RecursiveAnalysis.start') + recursive_conf["timeframe"] = "5m" + recursive_conf["timerange"] = "20180119-20180122" + start_mock = mocker.patch("freqtrade.optimize.analysis.recursive.RecursiveAnalysis.start") strategy_obj = { - 'name': "strategy_test_v3_recursive_issue", - 'location': Path(recursive_conf['strategy_path'], f"{recursive_conf['strategy']}.py") + "name": "strategy_test_v3_recursive_issue", + "location": Path(recursive_conf["strategy_path"], f"{recursive_conf['strategy']}.py"), } instance = RecursiveAnalysisSubFunctions.initialize_single_recursive_analysis( - recursive_conf, strategy_obj) + recursive_conf, strategy_obj + ) assert log_has_re(r"Recursive test of .* started\.", caplog) assert start_mock.call_count == 1 - assert instance.strategy_obj['name'] == "strategy_test_v3_recursive_issue" + assert instance.strategy_obj["name"] == "strategy_test_v3_recursive_issue" -@pytest.mark.parametrize('scenario', [ - 'no_bias', 'bias1', 'bias2' -]) +@pytest.mark.parametrize("scenario", ["no_bias", "bias1", "bias2"]) def test_recursive_biased_strategy(recursive_conf, mocker, caplog, scenario) -> None: - mocker.patch('freqtrade.data.history.get_timerange', get_timerange) patch_exchange(mocker) - mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', - PropertyMock(return_value=['UNITTEST/BTC'])) - recursive_conf['pairs'] = ['UNITTEST/BTC'] + mocker.patch(f"{EXMS}.get_fee", return_value=0.0) + mocker.patch("freqtrade.data.history.get_timerange", get_timerange) + mocker.patch( + "freqtrade.plugins.pairlistmanager.PairListManager.whitelist", + PropertyMock(return_value=["UNITTEST/BTC"]), + ) + recursive_conf["pairs"] = ["UNITTEST/BTC"] - recursive_conf['timeframe'] = '5m' - recursive_conf['timerange'] = '20180119-20180122' - recursive_conf['startup_candle'] = [100] + recursive_conf["timeframe"] = "5m" + recursive_conf["timerange"] = "20180119-20180122" + recursive_conf["startup_candle"] = [100] # Patch scenario Parameter to allow for easy selection - mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file', - return_value={ - 'params': { - "buy": { - "scenario": scenario - } - } - }) + mocker.patch( + "freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file", + return_value={"params": {"buy": {"scenario": scenario}}}, + ) - strategy_obj = {'name': "strategy_test_v3_recursive_issue"} + strategy_obj = {"name": "strategy_test_v3_recursive_issue"} instance = RecursiveAnalysis(recursive_conf, strategy_obj) instance.start() # Assert init correct @@ -180,7 +179,7 @@ def test_recursive_biased_strategy(recursive_conf, mocker, caplog, scenario) -> if scenario == "bias2": assert log_has_re("=> found lookahead in indicator rsi", caplog) - diff_pct = abs(float(instance.dict_recursive['rsi'][100].replace("%", ""))) + diff_pct = abs(float(instance.dict_recursive["rsi"][100].replace("%", ""))) # check non-biased strategy if scenario == "no_bias": assert diff_pct < 0.01 diff --git a/tests/persistence/test_db_context.py b/tests/persistence/test_db_context.py index 690006219..164095d5d 100644 --- a/tests/persistence/test_db_context.py +++ b/tests/persistence/test_db_context.py @@ -3,12 +3,12 @@ import pytest from freqtrade.persistence import FtNoDBContext, PairLocks, Trade -@pytest.mark.parametrize('timeframe', ['', '5m', '1d']) +@pytest.mark.parametrize("timeframe", ["", "5m", "1d"]) def test_FtNoDBContext(timeframe): - PairLocks.timeframe = '' + PairLocks.timeframe = "" assert Trade.use_db is True assert PairLocks.use_db is True - assert PairLocks.timeframe == '' + assert PairLocks.timeframe == "" with FtNoDBContext(timeframe): assert Trade.use_db is False @@ -18,7 +18,7 @@ def test_FtNoDBContext(timeframe): with FtNoDBContext(): assert Trade.use_db is False assert PairLocks.use_db is False - assert PairLocks.timeframe == '' + assert PairLocks.timeframe == "" assert Trade.use_db is True assert PairLocks.use_db is True diff --git a/tests/persistence/test_key_value_store.py b/tests/persistence/test_key_value_store.py index 1dab8764a..e69ae0220 100644 --- a/tests/persistence/test_key_value_store.py +++ b/tests/persistence/test_key_value_store.py @@ -46,7 +46,7 @@ def test_key_value_store(time_machine): KeyValueStore.delete_value("test_float") with pytest.raises(ValueError, match=r"Unknown value type"): - KeyValueStore.store_value("test_float", {'some': 'dict'}) + KeyValueStore.store_value("test_float", {"some": "dict"}) @pytest.mark.usefixtures("init_persistence") diff --git a/tests/persistence/test_migrations.py b/tests/persistence/test_migrations.py index d354e8f22..677ba9014 100644 --- a/tests/persistence/test_migrations.py +++ b/tests/persistence/test_migrations.py @@ -24,9 +24,9 @@ spot, margin, futures = TradingMode.SPOT, TradingMode.MARGIN, TradingMode.FUTURE def test_init_create_session(default_conf): # Check if init create a session - init_db(default_conf['db_url']) - assert hasattr(Trade, 'session') - assert 'scoped_session' in type(Trade.session).__name__ + init_db(default_conf["db_url"]) + assert hasattr(Trade, "session") + assert "scoped_session" in type(Trade.session).__name__ def test_init_custom_db_url(default_conf, tmp_path): @@ -34,43 +34,40 @@ def test_init_custom_db_url(default_conf, tmp_path): filename = tmp_path / "freqtrade2_test.sqlite" assert not filename.is_file() - default_conf.update({'db_url': f'sqlite:///{filename}'}) + default_conf.update({"db_url": f"sqlite:///{filename}"}) - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) assert filename.is_file() r = Trade.session.execute(text("PRAGMA journal_mode")) - assert r.first() == ('wal',) + assert r.first() == ("wal",) def test_init_invalid_db_url(): # Update path to a value other than default, but still in-memory - with pytest.raises(OperationalException, match=r'.*no valid database URL*'): - init_db('unknown:///some.url') + with pytest.raises(OperationalException, match=r".*no valid database URL*"): + init_db("unknown:///some.url") - with pytest.raises(OperationalException, match=r'Bad db-url.*For in-memory database, pl.*'): - init_db('sqlite:///') + with pytest.raises(OperationalException, match=r"Bad db-url.*For in-memory database, pl.*"): + init_db("sqlite:///") def test_init_prod_db(default_conf, mocker): - default_conf.update({'dry_run': False}) - default_conf.update({'db_url': DEFAULT_DB_PROD_URL}) + default_conf.update({"dry_run": False}) + default_conf.update({"db_url": DEFAULT_DB_PROD_URL}) - create_engine_mock = mocker.patch('freqtrade.persistence.models.create_engine', MagicMock()) + create_engine_mock = mocker.patch("freqtrade.persistence.models.create_engine", MagicMock()) - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) assert create_engine_mock.call_count == 1 - assert create_engine_mock.mock_calls[0][1][0] == 'sqlite:///tradesv3.sqlite' + assert create_engine_mock.mock_calls[0][1][0] == "sqlite:///tradesv3.sqlite" def test_init_dryrun_db(default_conf, tmpdir): filename = f"{tmpdir}/freqtrade2_prod.sqlite" assert not Path(filename).is_file() - default_conf.update({ - 'dry_run': True, - 'db_url': f'sqlite:///{filename}' - }) + default_conf.update({"dry_run": True, "db_url": f"sqlite:///{filename}"}) - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) assert Path(filename).is_file() @@ -135,10 +132,9 @@ def test_migrate(mocker, default_conf, fee, caplog): '2019-11-28 12:44:24.000000', 0.0, 0.0, 0.0, '5m', 'buy_order', 'dry_stop_order_id222') - """.format(fee=fee.return_value, - stake=default_conf.get("stake_amount"), - amount=amount - ) + """.format( + fee=fee.return_value, stake=default_conf.get("stake_amount"), amount=amount + ) insert_orders = f""" insert into orders ( ft_trade_id, @@ -237,8 +233,8 @@ def test_migrate(mocker, default_conf, fee, caplog): {amount * 0.00258580} ) """ - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + engine = create_engine("sqlite://") + mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine) # Create table using the old format with engine.begin() as connection: @@ -254,7 +250,7 @@ def test_migrate(mocker, default_conf, fee, caplog): connection.execute(text("create table trades_bak1 as select * from trades")) # Run init to test migration - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) trades = Trade.session.scalars(select(Trade)).all() assert len(trades) == 1 @@ -276,33 +272,35 @@ def test_migrate(mocker, default_conf, fee, caplog): assert trade.initial_stop_loss == 0.0 assert trade.exit_reason is None assert trade.strategy is None - assert trade.timeframe == '5m' + assert trade.timeframe == "5m" assert log_has("trying trades_bak1", caplog) assert log_has("trying trades_bak2", caplog) - assert log_has("Running database migration for trades - backup: trades_bak2, orders_bak0", - caplog) + assert log_has( + "Running database migration for trades - backup: trades_bak2, orders_bak0", caplog + ) assert log_has("Database migration finished.", caplog) assert pytest.approx(trade.open_trade_value) == trade._calc_open_trade_value( - trade.amount, trade.open_rate) + trade.amount, trade.open_rate + ) assert trade.close_profit_abs is None assert trade.stake_amount == trade.max_stake_amount orders = trade.orders assert len(orders) == 4 - assert orders[0].order_id == 'dry_buy_order' - assert orders[0].ft_order_side == 'buy' + assert orders[0].order_id == "dry_buy_order" + assert orders[0].ft_order_side == "buy" # All dry-run stoploss orders will be closed - assert orders[-1].order_id == 'dry_stop_order_id222' - assert orders[-1].ft_order_side == 'stoploss' + assert orders[-1].order_id == "dry_stop_order_id222" + assert orders[-1].ft_order_side == "stoploss" assert orders[-1].ft_is_open is False - assert orders[1].order_id == 'dry_buy_order22' - assert orders[1].ft_order_side == 'buy' + assert orders[1].order_id == "dry_buy_order22" + assert orders[1].ft_order_side == "buy" assert orders[1].ft_is_open is True - assert orders[2].order_id == 'dry_stop_order_id11X' - assert orders[2].ft_order_side == 'stoploss' + assert orders[2].order_id == "dry_stop_order_id11X" + assert orders[2].ft_order_side == "stoploss" assert orders[2].ft_is_open is False orders1 = Order.session.scalars(select(Order)).all() @@ -342,12 +340,11 @@ def test_migrate_too_old(mocker, default_conf, fee, caplog): VALUES ('binance', 'ETC/BTC', 1, {fee}, {fee}, 0.00258580, {stake}, {amount}, '2019-11-28 12:44:24.000000') - """.format(fee=fee.return_value, - stake=default_conf.get("stake_amount"), - amount=amount - ) - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + """.format( + fee=fee.return_value, stake=default_conf.get("stake_amount"), amount=amount + ) + engine = create_engine("sqlite://") + mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine) # Create table using the old format with engine.begin() as connection: @@ -355,22 +352,22 @@ def test_migrate_too_old(mocker, default_conf, fee, caplog): connection.execute(text(insert_table_old)) # Run init to test migration - with pytest.raises(OperationalException, match=r'Your database seems to be very old'): - init_db(default_conf['db_url']) + with pytest.raises(OperationalException, match=r"Your database seems to be very old"): + init_db(default_conf["db_url"]) def test_migrate_get_last_sequence_ids(): engine = MagicMock() engine.begin = MagicMock() - engine.name = 'postgresql' - get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') + engine.name = "postgresql" + get_last_sequence_ids(engine, "trades_bak", "orders_bak") assert engine.begin.call_count == 2 engine.reset_mock() engine.begin.reset_mock() - engine.name = 'somethingelse' - get_last_sequence_ids(engine, 'trades_bak', 'orders_bak') + engine.name = "somethingelse" + get_last_sequence_ids(engine, "trades_bak", "orders_bak") assert engine.begin.call_count == 0 @@ -378,14 +375,14 @@ def test_migrate_get_last_sequence_ids(): def test_migrate_set_sequence_ids(): engine = MagicMock() engine.begin = MagicMock() - engine.name = 'postgresql' + engine.name = "postgresql" set_sequence_ids(engine, 22, 55, 5) assert engine.begin.call_count == 1 engine.reset_mock() engine.begin.reset_mock() - engine.name = 'somethingelse' + engine.name = "somethingelse" set_sequence_ids(engine, 22, 55, 6) assert engine.begin.call_count == 0 @@ -418,8 +415,8 @@ def test_migrate_pairlocks(mocker, default_conf, fee, caplog): id, pair, reason, lock_time, lock_end_time, active) VALUES (2, '*', 'Lock all', '2021-07-12 18:41:03', '2021-07-12 19:00:00', 1) """ - engine = create_engine('sqlite://') - mocker.patch('freqtrade.persistence.models.create_engine', lambda *args, **kwargs: engine) + engine = create_engine("sqlite://") + mocker.patch("freqtrade.persistence.models.create_engine", lambda *args, **kwargs: engine) # Create table using the old format with engine.begin() as connection: connection.execute(text(create_table_old)) @@ -430,22 +427,28 @@ def test_migrate_pairlocks(mocker, default_conf, fee, caplog): connection.execute(text(create_index2)) connection.execute(text(create_index3)) - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) assert len(PairLock.get_all_locks().all()) == 2 - assert len(PairLock.session.scalars(select(PairLock).filter(PairLock.pair == '*')).all()) == 1 - pairlocks = PairLock.session.scalars(select(PairLock).filter(PairLock.pair == 'ETH/BTC')).all() + assert len(PairLock.session.scalars(select(PairLock).filter(PairLock.pair == "*")).all()) == 1 + pairlocks = PairLock.session.scalars(select(PairLock).filter(PairLock.pair == "ETH/BTC")).all() assert len(pairlocks) == 1 - assert pairlocks[0].pair == 'ETH/BTC' - assert pairlocks[0].side == '*' + assert pairlocks[0].pair == "ETH/BTC" + assert pairlocks[0].side == "*" -@pytest.mark.parametrize('dialect', [ - 'sqlite', 'postgresql', 'mysql', 'oracle', 'mssql', - ]) +@pytest.mark.parametrize( + "dialect", + [ + "sqlite", + "postgresql", + "mysql", + "oracle", + "mssql", + ], +) def test_create_table_compiles(dialect): - dialect_mod = import_module(f"sqlalchemy.dialects.{dialect}") for table in ModelBase.metadata.tables.values(): create_sql = str(CreateTable(table).compile(dialect=dialect_mod.dialect())) - assert 'CREATE TABLE' in create_sql + assert "CREATE TABLE" in create_sql diff --git a/tests/persistence/test_persistence.py b/tests/persistence/test_persistence.py index a9c27a9b5..0545ac861 100644 --- a/tests/persistence/test_persistence.py +++ b/tests/persistence/test_persistence.py @@ -10,20 +10,25 @@ from freqtrade.enums import TradingMode from freqtrade.exceptions import DependencyException from freqtrade.persistence import LocalTrade, Order, Trade, init_db from freqtrade.util import dt_now -from tests.conftest import (create_mock_trades, create_mock_trades_usdt, - create_mock_trades_with_leverage, log_has, log_has_re) +from tests.conftest import ( + create_mock_trades, + create_mock_trades_usdt, + create_mock_trades_with_leverage, + log_has, + log_has_re, +) spot, margin, futures = TradingMode.SPOT, TradingMode.MARGIN, TradingMode.FUTURES -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize("is_short", [False, True]) @pytest.mark.usefixtures("init_persistence") def test_enter_exit_side(fee, is_short): entry_side, exit_side = ("sell", "buy") if is_short else ("buy", "sell") trade = Trade( id=2, - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=0.001, open_rate=0.01, amount=5, @@ -31,21 +36,21 @@ def test_enter_exit_side(fee, is_short): open_date=dt_now(), fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", is_short=is_short, leverage=2.0, - trading_mode=margin + trading_mode=margin, ) assert trade.entry_side == entry_side assert trade.exit_side == exit_side - assert trade.trade_direction == 'short' if is_short else 'long' + assert trade.trade_direction == "short" if is_short else "long" @pytest.mark.usefixtures("init_persistence") def test_set_stop_loss_liquidation(fee): trade = Trade( id=2, - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, @@ -53,10 +58,10 @@ def test_set_stop_loss_liquidation(fee): open_date=dt_now(), fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", is_short=False, leverage=2.0, - trading_mode=margin + trading_mode=margin, ) trade.set_liquidation_price(0.09) assert trade.liquidation_price == 0.09 @@ -168,95 +173,94 @@ def test_set_stop_loss_liquidation(fee): assert trade.stoploss_or_liquidation == 1.5 -@pytest.mark.parametrize('exchange,is_short,lev,minutes,rate,interest,trading_mode', [ - ("binance", False, 3, 10, 0.0005, round(0.0008333333333333334, 8), margin), - ("binance", True, 3, 10, 0.0005, 0.000625, margin), - ("binance", False, 3, 295, 0.0005, round(0.004166666666666667, 8), margin), - ("binance", True, 3, 295, 0.0005, round(0.0031249999999999997, 8), margin), - ("binance", False, 3, 295, 0.00025, round(0.0020833333333333333, 8), margin), - ("binance", True, 3, 295, 0.00025, round(0.0015624999999999999, 8), margin), - ("binance", False, 5, 295, 0.0005, 0.005, margin), - ("binance", True, 5, 295, 0.0005, round(0.0031249999999999997, 8), margin), - ("binance", False, 1, 295, 0.0005, 0.0, spot), - ("binance", True, 1, 295, 0.0005, 0.003125, margin), - - ("binance", False, 3, 10, 0.0005, 0.0, futures), - ("binance", True, 3, 295, 0.0005, 0.0, futures), - ("binance", False, 5, 295, 0.0005, 0.0, futures), - ("binance", True, 5, 295, 0.0005, 0.0, futures), - ("binance", False, 1, 295, 0.0005, 0.0, futures), - ("binance", True, 1, 295, 0.0005, 0.0, futures), - - ("kraken", False, 3, 10, 0.0005, 0.040, margin), - ("kraken", True, 3, 10, 0.0005, 0.030, margin), - ("kraken", False, 3, 295, 0.0005, 0.06, margin), - ("kraken", True, 3, 295, 0.0005, 0.045, margin), - ("kraken", False, 3, 295, 0.00025, 0.03, margin), - ("kraken", True, 3, 295, 0.00025, 0.0225, margin), - ("kraken", False, 5, 295, 0.0005, round(0.07200000000000001, 8), margin), - ("kraken", True, 5, 295, 0.0005, 0.045, margin), - ("kraken", False, 1, 295, 0.0005, 0.0, spot), - ("kraken", True, 1, 295, 0.0005, 0.045, margin), - -]) +@pytest.mark.parametrize( + "exchange,is_short,lev,minutes,rate,interest,trading_mode", + [ + ("binance", False, 3, 10, 0.0005, round(0.0008333333333333334, 8), margin), + ("binance", True, 3, 10, 0.0005, 0.000625, margin), + ("binance", False, 3, 295, 0.0005, round(0.004166666666666667, 8), margin), + ("binance", True, 3, 295, 0.0005, round(0.0031249999999999997, 8), margin), + ("binance", False, 3, 295, 0.00025, round(0.0020833333333333333, 8), margin), + ("binance", True, 3, 295, 0.00025, round(0.0015624999999999999, 8), margin), + ("binance", False, 5, 295, 0.0005, 0.005, margin), + ("binance", True, 5, 295, 0.0005, round(0.0031249999999999997, 8), margin), + ("binance", False, 1, 295, 0.0005, 0.0, spot), + ("binance", True, 1, 295, 0.0005, 0.003125, margin), + ("binance", False, 3, 10, 0.0005, 0.0, futures), + ("binance", True, 3, 295, 0.0005, 0.0, futures), + ("binance", False, 5, 295, 0.0005, 0.0, futures), + ("binance", True, 5, 295, 0.0005, 0.0, futures), + ("binance", False, 1, 295, 0.0005, 0.0, futures), + ("binance", True, 1, 295, 0.0005, 0.0, futures), + ("kraken", False, 3, 10, 0.0005, 0.040, margin), + ("kraken", True, 3, 10, 0.0005, 0.030, margin), + ("kraken", False, 3, 295, 0.0005, 0.06, margin), + ("kraken", True, 3, 295, 0.0005, 0.045, margin), + ("kraken", False, 3, 295, 0.00025, 0.03, margin), + ("kraken", True, 3, 295, 0.00025, 0.0225, margin), + ("kraken", False, 5, 295, 0.0005, round(0.07200000000000001, 8), margin), + ("kraken", True, 5, 295, 0.0005, 0.045, margin), + ("kraken", False, 1, 295, 0.0005, 0.0, spot), + ("kraken", True, 1, 295, 0.0005, 0.045, margin), + ], +) @pytest.mark.usefixtures("init_persistence") -def test_interest(fee, exchange, is_short, lev, minutes, rate, interest, - trading_mode): +def test_interest(fee, exchange, is_short, lev, minutes, rate, interest, trading_mode): """ - 10min, 5hr limit trade on Binance/Kraken at 3x,5x leverage - fee: 0.25 % quote - interest_rate: 0.05 % per 4 hrs - open_rate: 2.00 quote - close_rate: 2.20 quote - amount: = 30.0 crypto - stake_amount - 3x, -3x: 20.0 quote - 5x, -5x: 12.0 quote - borrowed - 10min - 3x: 40 quote - -3x: 30 crypto - 5x: 48 quote - -5x: 30 crypto - 1x: 0 - -1x: 30 crypto - hours: 1/6 (10 minutes) - time-periods: - 10min - kraken: (1 + 1) 4hr_periods = 2 4hr_periods - binance: 1/24 24hr_periods - 4.95hr - kraken: ceil(1 + 4.95/4) 4hr_periods = 3 4hr_periods - binance: ceil(4.95)/24 24hr_periods = 5/24 24hr_periods - interest: borrowed * interest_rate * time-periods - 10min - binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote - kraken 3x: 40 * 0.0005 * 2 = 0.040 quote - binace -3x: 30 * 0.0005 * 1/24 = 0.000625 crypto - kraken -3x: 30 * 0.0005 * 2 = 0.030 crypto - 5hr - binance 3x: 40 * 0.0005 * 5/24 = 0.004166666666666667 quote - kraken 3x: 40 * 0.0005 * 3 = 0.06 quote - binace -3x: 30 * 0.0005 * 5/24 = 0.0031249999999999997 crypto - kraken -3x: 30 * 0.0005 * 3 = 0.045 crypto - 0.00025 interest - binance 3x: 40 * 0.00025 * 5/24 = 0.0020833333333333333 quote - kraken 3x: 40 * 0.00025 * 3 = 0.03 quote - binace -3x: 30 * 0.00025 * 5/24 = 0.0015624999999999999 crypto - kraken -3x: 30 * 0.00025 * 3 = 0.0225 crypto - 5x leverage, 0.0005 interest, 5hr - binance 5x: 48 * 0.0005 * 5/24 = 0.005 quote - kraken 5x: 48 * 0.0005 * 3 = 0.07200000000000001 quote - binace -5x: 30 * 0.0005 * 5/24 = 0.0031249999999999997 crypto - kraken -5x: 30 * 0.0005 * 3 = 0.045 crypto - 1x leverage, 0.0005 interest, 5hr - binance,kraken 1x: 0.0 quote - binace -1x: 30 * 0.0005 * 5/24 = 0.003125 crypto - kraken -1x: 30 * 0.0005 * 3 = 0.045 crypto + 10min, 5hr limit trade on Binance/Kraken at 3x,5x leverage + fee: 0.25 % quote + interest_rate: 0.05 % per 4 hrs + open_rate: 2.00 quote + close_rate: 2.20 quote + amount: = 30.0 crypto + stake_amount + 3x, -3x: 20.0 quote + 5x, -5x: 12.0 quote + borrowed + 10min + 3x: 40 quote + -3x: 30 crypto + 5x: 48 quote + -5x: 30 crypto + 1x: 0 + -1x: 30 crypto + hours: 1/6 (10 minutes) + time-periods: + 10min + kraken: (1 + 1) 4hr_periods = 2 4hr_periods + binance: 1/24 24hr_periods + 4.95hr + kraken: ceil(1 + 4.95/4) 4hr_periods = 3 4hr_periods + binance: ceil(4.95)/24 24hr_periods = 5/24 24hr_periods + interest: borrowed * interest_rate * time-periods + 10min + binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote + kraken 3x: 40 * 0.0005 * 2 = 0.040 quote + binace -3x: 30 * 0.0005 * 1/24 = 0.000625 crypto + kraken -3x: 30 * 0.0005 * 2 = 0.030 crypto + 5hr + binance 3x: 40 * 0.0005 * 5/24 = 0.004166666666666667 quote + kraken 3x: 40 * 0.0005 * 3 = 0.06 quote + binace -3x: 30 * 0.0005 * 5/24 = 0.0031249999999999997 crypto + kraken -3x: 30 * 0.0005 * 3 = 0.045 crypto + 0.00025 interest + binance 3x: 40 * 0.00025 * 5/24 = 0.0020833333333333333 quote + kraken 3x: 40 * 0.00025 * 3 = 0.03 quote + binace -3x: 30 * 0.00025 * 5/24 = 0.0015624999999999999 crypto + kraken -3x: 30 * 0.00025 * 3 = 0.0225 crypto + 5x leverage, 0.0005 interest, 5hr + binance 5x: 48 * 0.0005 * 5/24 = 0.005 quote + kraken 5x: 48 * 0.0005 * 3 = 0.07200000000000001 quote + binace -5x: 30 * 0.0005 * 5/24 = 0.0031249999999999997 crypto + kraken -5x: 30 * 0.0005 * 3 = 0.045 crypto + 1x leverage, 0.0005 interest, 5hr + binance,kraken 1x: 0.0 quote + binace -1x: 30 * 0.0005 * 5/24 = 0.003125 crypto + kraken -1x: 30 * 0.0005 * 3 = 0.045 crypto """ trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=20.0, amount=30.0, open_rate=2.0, @@ -267,85 +271,88 @@ def test_interest(fee, exchange, is_short, lev, minutes, rate, interest, leverage=lev, interest_rate=rate, is_short=is_short, - trading_mode=trading_mode + trading_mode=trading_mode, ) assert round(float(trade.calculate_interest()), 8) == interest -@pytest.mark.parametrize('is_short,lev,borrowed,trading_mode', [ - (False, 1.0, 0.0, spot), - (True, 1.0, 30.0, margin), - (False, 3.0, 40.0, margin), - (True, 3.0, 30.0, margin), -]) +@pytest.mark.parametrize( + "is_short,lev,borrowed,trading_mode", + [ + (False, 1.0, 0.0, spot), + (True, 1.0, 30.0, margin), + (False, 3.0, 40.0, margin), + (True, 3.0, 30.0, margin), + ], +) @pytest.mark.usefixtures("init_persistence") def test_borrowed(fee, is_short, lev, borrowed, trading_mode): """ - 10 minute limit trade on Binance/Kraken at 1x, 3x leverage - fee: 0.25% quote - interest_rate: 0.05% per 4 hrs - open_rate: 2.00 quote - close_rate: 2.20 quote - amount: = 30.0 crypto - stake_amount - 1x,-1x: 60.0 quote - 3x,-3x: 20.0 quote - borrowed - 1x: 0 quote - 3x: 40 quote - -1x: 30 crypto - -3x: 30 crypto - hours: 1/6 (10 minutes) - time-periods: - kraken: (1 + 1) 4hr_periods = 2 4hr_periods - binance: 1/24 24hr_periods - interest: borrowed * interest_rate * time-periods - 1x : / - binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote - kraken 3x: 40 * 0.0005 * 2 = 0.040 quote - binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto - kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto - open_value: (amount * open_rate) ± (amount * open_rate * fee) - 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote - -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.850 quote - amount_closed: - 1x, 3x : amount - -1x, -3x : amount + interest - binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto - kraken -1x,-3x: 30 + 0.03 = 30.03 crypto - close_value: - 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest - -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) - binance,kraken 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 - binance 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 - kraken 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 - binance -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.16637843750001 - kraken -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 - total_profit: - 1x, 3x : close_value - open_value - -1x,-3x: open_value - close_value - binance,kraken 1x: 65.835 - 60.15 = 5.685 - binance 3x: 65.83416667 - 60.15 = 5.684166670000003 - kraken 3x: 65.795 - 60.15 = 5.645 - binance -1x,-3x: 59.850 - 66.16637843750001 = -6.316378437500013 - kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 - total_profit_ratio: - 1x, 3x : ((close_value/open_value) - 1) * leverage - -1x,-3x: (1 - (close_value/open_value)) * leverage - binance 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 - binance 3x: ((65.83416667 / 60.15) - 1) * 3 = 0.2834995845386534 - kraken 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 - kraken 3x: ((65.795 / 60.15) - 1) * 3 = 0.2815461346633419 - binance -1x: (1-(66.1663784375 / 59.85)) * 1 = -0.1055368159983292 - binance -3x: (1-(66.1663784375 / 59.85)) * 3 = -0.3166104479949876 - kraken -1x: (1-(66.2311650 / 59.85)) * 1 = -0.106619298245614 - kraken -3x: (1-(66.2311650 / 59.85)) * 3 = -0.319857894736842 + 10 minute limit trade on Binance/Kraken at 1x, 3x leverage + fee: 0.25% quote + interest_rate: 0.05% per 4 hrs + open_rate: 2.00 quote + close_rate: 2.20 quote + amount: = 30.0 crypto + stake_amount + 1x,-1x: 60.0 quote + 3x,-3x: 20.0 quote + borrowed + 1x: 0 quote + 3x: 40 quote + -1x: 30 crypto + -3x: 30 crypto + hours: 1/6 (10 minutes) + time-periods: + kraken: (1 + 1) 4hr_periods = 2 4hr_periods + binance: 1/24 24hr_periods + interest: borrowed * interest_rate * time-periods + 1x : / + binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote + kraken 3x: 40 * 0.0005 * 2 = 0.040 quote + binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto + kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto + open_value: (amount * open_rate) ± (amount * open_rate * fee) + 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote + -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.850 quote + amount_closed: + 1x, 3x : amount + -1x, -3x : amount + interest + binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto + kraken -1x,-3x: 30 + 0.03 = 30.03 crypto + close_value: + 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest + -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) + binance,kraken 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 + binance 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 + kraken 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 + binance -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.16637843750001 + kraken -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 + total_profit: + 1x, 3x : close_value - open_value + -1x,-3x: open_value - close_value + binance,kraken 1x: 65.835 - 60.15 = 5.685 + binance 3x: 65.83416667 - 60.15 = 5.684166670000003 + kraken 3x: 65.795 - 60.15 = 5.645 + binance -1x,-3x: 59.850 - 66.16637843750001 = -6.316378437500013 + kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 + total_profit_ratio: + 1x, 3x : ((close_value/open_value) - 1) * leverage + -1x,-3x: (1 - (close_value/open_value)) * leverage + binance 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 + binance 3x: ((65.83416667 / 60.15) - 1) * 3 = 0.2834995845386534 + kraken 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 + kraken 3x: ((65.795 / 60.15) - 1) * 3 = 0.2815461346633419 + binance -1x: (1-(66.1663784375 / 59.85)) * 1 = -0.1055368159983292 + binance -3x: (1-(66.1663784375 / 59.85)) * 3 = -0.3166104479949876 + kraken -1x: (1-(66.2311650 / 59.85)) * 1 = -0.106619298245614 + kraken -3x: (1-(66.2311650 / 59.85)) * 3 = -0.319857894736842 """ trade = Trade( id=2, - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, @@ -353,87 +360,101 @@ def test_borrowed(fee, is_short, lev, borrowed, trading_mode): open_date=dt_now(), fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", is_short=is_short, leverage=lev, - trading_mode=trading_mode + trading_mode=trading_mode, ) assert trade.borrowed == borrowed -@pytest.mark.parametrize('is_short,open_rate,close_rate,lev,profit,trading_mode', [ - (False, 2.0, 2.2, 1.0, 0.09451372, spot), - (True, 2.2, 2.0, 3.0, 0.25894253, margin), -]) +@pytest.mark.parametrize( + "is_short,open_rate,close_rate,lev,profit,trading_mode", + [ + (False, 2.0, 2.2, 1.0, 0.09451372, spot), + (True, 2.2, 2.0, 3.0, 0.25894253, margin), + ], +) @pytest.mark.usefixtures("init_persistence") -def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_usdt, time_machine, - is_short, open_rate, close_rate, lev, profit, trading_mode): +def test_update_limit_order( + fee, + caplog, + limit_buy_order_usdt, + limit_sell_order_usdt, + time_machine, + is_short, + open_rate, + close_rate, + lev, + profit, + trading_mode, +): """ - 10 minute limit trade on Binance/Kraken at 1x, 3x leverage - fee: 0.25% quote - interest_rate: 0.05% per 4 hrs - open_rate: 2.00 quote - close_rate: 2.20 quote - amount: = 30.0 crypto - stake_amount - 1x,-1x: 60.0 quote - 3x,-3x: 20.0 quote - borrowed - 1x: 0 quote - 3x: 40 quote - -1x: 30 crypto - -3x: 30 crypto - hours: 1/6 (10 minutes) - time-periods: - kraken: (1 + 1) 4hr_periods = 2 4hr_periods - binance: 1/24 24hr_periods - interest: borrowed * interest_rate * time-periods - 1x : / - binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote - kraken 3x: 40 * 0.0005 * 2 = 0.040 quote - binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto - kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto - open_value: (amount * open_rate) ± (amount * open_rate * fee) - 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote - -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.850 quote - amount_closed: - 1x, 3x : amount - -1x, -3x : amount + interest - binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto - kraken -1x,-3x: 30 + 0.03 = 30.03 crypto - close_value: - 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest - -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) - binance,kraken 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 - binance 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 - kraken 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 - binance -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.16637843750001 - kraken -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 - total_profit: - 1x, 3x : close_value - open_value - -1x,-3x: open_value - close_value - binance,kraken 1x: 65.835 - 60.15 = 5.685 - binance 3x: 65.83416667 - 60.15 = 5.684166670000003 - kraken 3x: 65.795 - 60.15 = 5.645 - binance -1x,-3x: 59.850 - 66.16637843750001 = -6.316378437500013 - kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 - total_profit_ratio: - 1x, 3x : ((close_value/open_value) - 1) * leverage - -1x,-3x: (1 - (close_value/open_value)) * leverage - binance 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 - binance 3x: ((65.83416667 / 60.15) - 1) * 3 = 0.2834995845386534 - kraken 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 - kraken 3x: ((65.795 / 60.15) - 1) * 3 = 0.2815461346633419 - binance -1x: (1-(66.1663784375 / 59.85)) * 1 = -0.1055368159983292 - binance -3x: (1-(66.1663784375 / 59.85)) * 3 = -0.3166104479949876 - kraken -1x: (1-(66.2311650 / 59.85)) * 1 = -0.106619298245614 - kraken -3x: (1-(66.2311650 / 59.85)) * 3 = -0.319857894736842 - open_rate: 2.2, close_rate: 2.0, -3x, binance, short - open_value: 30 * 2.2 - 30 * 2.2 * 0.0025 = 65.835 quote - amount_closed: 30 + 0.000625 = 30.000625 crypto - close_value: (30.000625 * 2.0) + (30.000625 * 2.0 * 0.0025) = 60.151253125 - total_profit: 65.835 - 60.151253125 = 5.683746874999997 - total_profit_ratio: (1-(60.151253125/65.835)) * 3 = 0.2589996297562085 + 10 minute limit trade on Binance/Kraken at 1x, 3x leverage + fee: 0.25% quote + interest_rate: 0.05% per 4 hrs + open_rate: 2.00 quote + close_rate: 2.20 quote + amount: = 30.0 crypto + stake_amount + 1x,-1x: 60.0 quote + 3x,-3x: 20.0 quote + borrowed + 1x: 0 quote + 3x: 40 quote + -1x: 30 crypto + -3x: 30 crypto + hours: 1/6 (10 minutes) + time-periods: + kraken: (1 + 1) 4hr_periods = 2 4hr_periods + binance: 1/24 24hr_periods + interest: borrowed * interest_rate * time-periods + 1x : / + binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote + kraken 3x: 40 * 0.0005 * 2 = 0.040 quote + binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto + kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto + open_value: (amount * open_rate) ± (amount * open_rate * fee) + 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote + -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.850 quote + amount_closed: + 1x, 3x : amount + -1x, -3x : amount + interest + binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto + kraken -1x,-3x: 30 + 0.03 = 30.03 crypto + close_value: + 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest + -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) + binance,kraken 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 + binance 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 + kraken 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 + binance -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.16637843750001 + kraken -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 + total_profit: + 1x, 3x : close_value - open_value + -1x,-3x: open_value - close_value + binance,kraken 1x: 65.835 - 60.15 = 5.685 + binance 3x: 65.83416667 - 60.15 = 5.684166670000003 + kraken 3x: 65.795 - 60.15 = 5.645 + binance -1x,-3x: 59.850 - 66.16637843750001 = -6.316378437500013 + kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 + total_profit_ratio: + 1x, 3x : ((close_value/open_value) - 1) * leverage + -1x,-3x: (1 - (close_value/open_value)) * leverage + binance 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 + binance 3x: ((65.83416667 / 60.15) - 1) * 3 = 0.2834995845386534 + kraken 1x: ((65.835 / 60.15) - 1) * 1 = 0.0945137157107232 + kraken 3x: ((65.795 / 60.15) - 1) * 3 = 0.2815461346633419 + binance -1x: (1-(66.1663784375 / 59.85)) * 1 = -0.1055368159983292 + binance -3x: (1-(66.1663784375 / 59.85)) * 3 = -0.3166104479949876 + kraken -1x: (1-(66.2311650 / 59.85)) * 1 = -0.106619298245614 + kraken -3x: (1-(66.2311650 / 59.85)) * 3 = -0.319857894736842 + open_rate: 2.2, close_rate: 2.0, -3x, binance, short + open_value: 30 * 2.2 - 30 * 2.2 * 0.0025 = 65.835 quote + amount_closed: 30 + 0.000625 = 30.000625 crypto + close_value: (30.000625 * 2.0) + (30.000625 * 2.0 * 0.0025) = 60.151253125 + total_profit: 65.835 - 60.151253125 = 5.683746874999997 + total_profit_ratio: (1-(60.151253125/65.835)) * 3 = 0.2589996297562085 """ time_machine.move_to("2022-03-31 20:45:00 +00:00") @@ -444,7 +465,7 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_ trade = Trade( id=2, - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=open_rate, amount=30.0, @@ -452,32 +473,34 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_ open_date=dt_now(), fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", is_short=is_short, interest_rate=0.0005, leverage=lev, - trading_mode=trading_mode + trading_mode=trading_mode, ) assert not trade.has_open_orders assert trade.close_profit is None assert trade.close_date is None - oobj = Order.parse_from_ccxt_object(enter_order, 'ADA/USDT', entry_side) + oobj = Order.parse_from_ccxt_object(enter_order, "ADA/USDT", entry_side) trade.orders.append(oobj) trade.update_trade(oobj) assert not trade.has_open_orders assert trade.open_rate == open_rate assert trade.close_profit is None assert trade.close_date is None - assert log_has_re(f"LIMIT_{entry_side.upper()} has been fulfilled for " - r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, " - f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, " - r"open_since=.*\).", - caplog) + assert log_has_re( + f"LIMIT_{entry_side.upper()} has been fulfilled for " + r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, " + f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, " + r"open_since=.*\).", + caplog, + ) caplog.clear() time_machine.move_to("2022-03-31 21:45:05 +00:00") - oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', exit_side) + oobj = Order.parse_from_ccxt_object(exit_order, "ADA/USDT", exit_side) trade.orders.append(oobj) trade.update_trade(oobj) @@ -485,11 +508,13 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_ assert trade.close_rate == close_rate assert pytest.approx(trade.close_profit) == profit assert trade.close_date is not None - assert log_has_re(f"LIMIT_{exit_side.upper()} has been fulfilled for " - r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, " - f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, " - r"open_since=.*\).", - caplog) + assert log_has_re( + f"LIMIT_{exit_side.upper()} has been fulfilled for " + r"Trade\(id=2, pair=ADA/USDT, amount=30.00000000, " + f"is_short={is_short}, leverage={lev}, open_rate={open_rate}0000000, " + r"open_since=.*\).", + caplog, + ) caplog.clear() @@ -497,7 +522,7 @@ def test_update_limit_order(fee, caplog, limit_buy_order_usdt, limit_sell_order_ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee, caplog): trade = Trade( id=1, - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, @@ -505,64 +530,77 @@ def test_update_market_order(market_buy_order_usdt, market_sell_order_usdt, fee, fee_open=fee.return_value, fee_close=fee.return_value, open_date=dt_now(), - exchange='binance', + exchange="binance", trading_mode=margin, leverage=1.0, ) - oobj = Order.parse_from_ccxt_object(market_buy_order_usdt, 'ADA/USDT', 'buy') + oobj = Order.parse_from_ccxt_object(market_buy_order_usdt, "ADA/USDT", "buy") trade.orders.append(oobj) trade.update_trade(oobj) assert not trade.has_open_orders assert trade.open_rate == 2.0 assert trade.close_profit is None assert trade.close_date is None - assert log_has_re(r"MARKET_BUY has been fulfilled for Trade\(id=1, " - r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, " - r"open_rate=2.00000000, open_since=.*\).", - caplog) + assert log_has_re( + r"MARKET_BUY has been fulfilled for Trade\(id=1, " + r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, " + r"open_rate=2.00000000, open_since=.*\).", + caplog, + ) caplog.clear() trade.is_open = True - oobj = Order.parse_from_ccxt_object(market_sell_order_usdt, 'ADA/USDT', 'sell') + oobj = Order.parse_from_ccxt_object(market_sell_order_usdt, "ADA/USDT", "sell") trade.orders.append(oobj) trade.update_trade(oobj) assert not trade.has_open_orders assert trade.close_rate == 2.2 assert pytest.approx(trade.close_profit) == 0.094513715710723 assert trade.close_date is not None - assert log_has_re(r"MARKET_SELL has been fulfilled for Trade\(id=1, " - r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, " - r"open_rate=2.00000000, open_since=.*\).", - caplog) + assert log_has_re( + r"MARKET_SELL has been fulfilled for Trade\(id=1, " + r"pair=ADA/USDT, amount=30.00000000, is_short=False, leverage=1.0, " + r"open_rate=2.00000000, open_since=.*\).", + caplog, + ) @pytest.mark.parametrize( - 'exchange,is_short,lev,open_value,close_value,profit,profit_ratio,trading_mode,funding_fees', [ + "exchange,is_short,lev,open_value,close_value,profit,profit_ratio,trading_mode,funding_fees", + [ ("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0), ("binance", True, 1, 65.835, 60.151253125, 5.68374687, 0.08633321, margin, 0.0), ("binance", False, 3, 60.15, 65.83416667, 5.68416667, 0.28349958, margin, 0.0), ("binance", True, 3, 65.835, 60.151253125, 5.68374687, 0.25899963, margin, 0.0), - ("kraken", False, 1, 60.15, 65.835, 5.685, 0.09451371, spot, 0.0), ("kraken", True, 1, 65.835, 60.21015, 5.62485, 0.0854386, margin, 0.0), ("kraken", False, 3, 60.15, 65.795, 5.645, 0.28154613, margin, 0.0), ("kraken", True, 3, 65.835, 60.21015, 5.62485, 0.25631579, margin, 0.0), - - ("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, futures, 0.0), - ("binance", False, 1, 60.15, 66.835, 6.685, 0.11113881, futures, 1.0), - ("binance", True, 1, 65.835, 60.15, 5.685, 0.08635224, futures, 0.0), - ("binance", True, 1, 65.835, 61.15, 4.685, 0.07116276, futures, -1.0), - ("binance", True, 3, 65.835, 59.15, 6.685, 0.3046252, futures, 1.0), - ("binance", False, 3, 60.15, 64.835, 4.685, 0.23366583, futures, -1.0), - ]) + ("binance", False, 1, 60.15, 65.835, 5.685, 0.09451371, futures, 0.0), + ("binance", False, 1, 60.15, 66.835, 6.685, 0.11113881, futures, 1.0), + ("binance", True, 1, 65.835, 60.15, 5.685, 0.08635224, futures, 0.0), + ("binance", True, 1, 65.835, 61.15, 4.685, 0.07116276, futures, -1.0), + ("binance", True, 3, 65.835, 59.15, 6.685, 0.3046252, futures, 1.0), + ("binance", False, 3, 60.15, 64.835, 4.685, 0.23366583, futures, -1.0), + ], +) @pytest.mark.usefixtures("init_persistence") def test_calc_open_close_trade_price( - limit_order, fee, exchange, is_short, lev, - open_value, close_value, profit, profit_ratio, trading_mode, funding_fees + limit_order, + fee, + exchange, + is_short, + lev, + open_value, + close_value, + profit, + profit_ratio, + trading_mode, + funding_fees, ): trade: Trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, @@ -578,14 +616,14 @@ def test_calc_open_close_trade_price( entry_order = limit_order[trade.entry_side] exit_order = limit_order[trade.exit_side] - oobj = Order.parse_from_ccxt_object(entry_order, 'ADA/USDT', trade.entry_side) + oobj = Order.parse_from_ccxt_object(entry_order, "ADA/USDT", trade.entry_side) oobj._trade_live = trade oobj.update_from_ccxt_object(entry_order) trade.update_trade(oobj) trade.funding_fee_running = funding_fees - oobj = Order.parse_from_ccxt_object(exit_order, 'ADA/USDT', trade.exit_side) + oobj = Order.parse_from_ccxt_object(exit_order, "ADA/USDT", trade.exit_side) oobj._trade_live = trade oobj.update_from_ccxt_object(exit_order) trade.update_trade(oobj) @@ -606,7 +644,7 @@ def test_trade_close(fee, time_machine): time_machine.move_to("2022-09-01 05:00:00 +00:00", tick=False) trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, @@ -615,40 +653,44 @@ def test_trade_close(fee, time_machine): fee_close=fee.return_value, open_date=dt_now() - timedelta(minutes=10), interest_rate=0.0005, - exchange='binance', + exchange="binance", trading_mode=margin, leverage=1.0, ) - trade.orders.append(Order( - ft_order_side=trade.entry_side, - order_id=f'{trade.pair}-{trade.entry_side}-{trade.open_date}', - ft_is_open=False, - ft_pair=trade.pair, - amount=trade.amount, - filled=trade.amount, - remaining=0, - price=trade.open_rate, - average=trade.open_rate, - status="closed", - order_type="limit", - side=trade.entry_side, - order_filled_date=trade.open_date, - )) - trade.orders.append(Order( - ft_order_side=trade.exit_side, - order_id=f'{trade.pair}-{trade.exit_side}-{trade.open_date}', - ft_is_open=False, - ft_pair=trade.pair, - amount=trade.amount, - filled=trade.amount, - remaining=0, - price=2.2, - average=2.2, - status="closed", - order_type="limit", - side=trade.exit_side, - order_filled_date=dt_now(), - )) + trade.orders.append( + Order( + ft_order_side=trade.entry_side, + order_id=f"{trade.pair}-{trade.entry_side}-{trade.open_date}", + ft_is_open=False, + ft_pair=trade.pair, + amount=trade.amount, + filled=trade.amount, + remaining=0, + price=trade.open_rate, + average=trade.open_rate, + status="closed", + order_type="limit", + side=trade.entry_side, + order_filled_date=trade.open_date, + ) + ) + trade.orders.append( + Order( + ft_order_side=trade.exit_side, + order_id=f"{trade.pair}-{trade.exit_side}-{trade.open_date}", + ft_is_open=False, + ft_pair=trade.pair, + amount=trade.amount, + filled=trade.amount, + remaining=0, + price=2.2, + average=2.2, + status="closed", + order_type="limit", + side=trade.exit_side, + order_filled_date=dt_now(), + ) + ) assert trade.close_profit is None assert trade.close_date is None assert trade.is_open is True @@ -670,18 +712,18 @@ def test_trade_close(fee, time_machine): @pytest.mark.usefixtures("init_persistence") def test_calc_close_trade_price_exception(limit_buy_order_usdt, fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", trading_mode=margin, leverage=1.0, ) - oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, 'ADA/USDT', 'buy') + oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, "ADA/USDT", "buy") trade.update_trade(oobj) assert trade.calc_close_trade_value(trade.close_rate) == 0.0 @@ -689,22 +731,22 @@ def test_calc_close_trade_price_exception(limit_buy_order_usdt, fee): @pytest.mark.usefixtures("init_persistence") def test_update_open_order(limit_buy_order_usdt): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, open_rate=2.0, amount=30.0, fee_open=0.1, fee_close=0.1, - exchange='binance', - trading_mode=margin + exchange="binance", + trading_mode=margin, ) assert not trade.has_open_orders assert trade.close_profit is None assert trade.close_date is None - limit_buy_order_usdt['status'] = 'open' - oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, 'ADA/USDT', 'buy') + limit_buy_order_usdt["status"] = "open" + oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, "ADA/USDT", "buy") trade.update_trade(oobj) assert not trade.has_open_orders @@ -715,43 +757,40 @@ def test_update_open_order(limit_buy_order_usdt): @pytest.mark.usefixtures("init_persistence") def test_update_invalid_order(limit_buy_order_usdt): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, amount=30.0, open_rate=2.0, fee_open=0.1, fee_close=0.1, - exchange='binance', - trading_mode=margin + exchange="binance", + trading_mode=margin, ) - limit_buy_order_usdt['type'] = 'invalid' - oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, 'ADA/USDT', 'meep') - with pytest.raises(ValueError, match=r'Unknown order type'): + limit_buy_order_usdt["type"] = "invalid" + oobj = Order.parse_from_ccxt_object(limit_buy_order_usdt, "ADA/USDT", "meep") + with pytest.raises(ValueError, match=r"Unknown order type"): trade.update_trade(oobj) -@pytest.mark.parametrize('exchange', ['binance', 'kraken']) -@pytest.mark.parametrize('trading_mode', [spot, margin, futures]) -@pytest.mark.parametrize('lev', [1, 3]) -@pytest.mark.parametrize('is_short,fee_rate,result', [ - (False, 0.003, 60.18), - (False, 0.0025, 60.15), - (False, 0.003, 60.18), - (False, 0.0025, 60.15), - (True, 0.003, 59.82), - (True, 0.0025, 59.85), - (True, 0.003, 59.82), - (True, 0.0025, 59.85) -]) +@pytest.mark.parametrize("exchange", ["binance", "kraken"]) +@pytest.mark.parametrize("trading_mode", [spot, margin, futures]) +@pytest.mark.parametrize("lev", [1, 3]) +@pytest.mark.parametrize( + "is_short,fee_rate,result", + [ + (False, 0.003, 60.18), + (False, 0.0025, 60.15), + (False, 0.003, 60.18), + (False, 0.0025, 60.15), + (True, 0.003, 59.82), + (True, 0.0025, 59.85), + (True, 0.003, 59.82), + (True, 0.0025, 59.85), + ], +) @pytest.mark.usefixtures("init_persistence") def test_calc_open_trade_value( - limit_buy_order_usdt, - exchange, - lev, - is_short, - fee_rate, - result, - trading_mode + limit_buy_order_usdt, exchange, lev, is_short, fee_rate, result, trading_mode ): # 10 minute limit trade on Binance/Kraken at 1x, 3x leverage # fee: 0.25 %, 0.3% quote @@ -768,7 +807,7 @@ def test_calc_open_trade_value( # 1x, 3x: 30 * 2 + 30 * 2 * 0.003 = 60.18 quote # -1x,-3x: 30 * 2 - 30 * 2 * 0.003 = 59.82 quote trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, amount=30.0, open_rate=2.0, @@ -778,10 +817,11 @@ def test_calc_open_trade_value( exchange=exchange, leverage=lev, is_short=is_short, - trading_mode=trading_mode + trading_mode=trading_mode, ) oobj = Order.parse_from_ccxt_object( - limit_buy_order_usdt, 'ADA/USDT', 'sell' if is_short else 'buy') + limit_buy_order_usdt, "ADA/USDT", "sell" if is_short else "buy" + ) trade.update_trade(oobj) # Buy @ 2.0 # Get the open rate price with the standard fee rate @@ -789,38 +829,36 @@ def test_calc_open_trade_value( @pytest.mark.parametrize( - 'exchange,is_short,lev,open_rate,close_rate,fee_rate,result,trading_mode,funding_fees', [ - ('binance', False, 1, 2.0, 2.5, 0.0025, 74.8125, spot, 0), - ('binance', False, 1, 2.0, 2.5, 0.003, 74.775, spot, 0), - ('binance', False, 1, 2.0, 2.2, 0.005, 65.67, margin, 0), - ('binance', False, 3, 2.0, 2.5, 0.0025, 74.81166667, margin, 0), - ('binance', False, 3, 2.0, 2.5, 0.003, 74.77416667, margin, 0), - ('binance', True, 3, 2.2, 2.5, 0.0025, 75.18906641, margin, 0), - ('binance', True, 3, 2.2, 2.5, 0.003, 75.22656719, margin, 0), - ('binance', True, 1, 2.2, 2.5, 0.0025, 75.18906641, margin, 0), - ('binance', True, 1, 2.2, 2.5, 0.003, 75.22656719, margin, 0), - + "exchange,is_short,lev,open_rate,close_rate,fee_rate,result,trading_mode,funding_fees", + [ + ("binance", False, 1, 2.0, 2.5, 0.0025, 74.8125, spot, 0), + ("binance", False, 1, 2.0, 2.5, 0.003, 74.775, spot, 0), + ("binance", False, 1, 2.0, 2.2, 0.005, 65.67, margin, 0), + ("binance", False, 3, 2.0, 2.5, 0.0025, 74.81166667, margin, 0), + ("binance", False, 3, 2.0, 2.5, 0.003, 74.77416667, margin, 0), + ("binance", True, 3, 2.2, 2.5, 0.0025, 75.18906641, margin, 0), + ("binance", True, 3, 2.2, 2.5, 0.003, 75.22656719, margin, 0), + ("binance", True, 1, 2.2, 2.5, 0.0025, 75.18906641, margin, 0), + ("binance", True, 1, 2.2, 2.5, 0.003, 75.22656719, margin, 0), # Kraken - ('kraken', False, 3, 2.0, 2.5, 0.0025, 74.7725, margin, 0), - ('kraken', False, 3, 2.0, 2.5, 0.003, 74.735, margin, 0), - ('kraken', True, 3, 2.2, 2.5, 0.0025, 75.2626875, margin, 0), - ('kraken', True, 3, 2.2, 2.5, 0.003, 75.300225, margin, 0), - ('kraken', True, 1, 2.2, 2.5, 0.0025, 75.2626875, margin, 0), - ('kraken', True, 1, 2.2, 2.5, 0.003, 75.300225, margin, 0), - - ('binance', False, 1, 2.0, 2.5, 0.0025, 75.8125, futures, 1), - ('binance', False, 3, 2.0, 2.5, 0.0025, 73.8125, futures, -1), - ('binance', True, 3, 2.0, 2.5, 0.0025, 74.1875, futures, 1), - ('binance', True, 1, 2.0, 2.5, 0.0025, 76.1875, futures, -1), - - ]) + ("kraken", False, 3, 2.0, 2.5, 0.0025, 74.7725, margin, 0), + ("kraken", False, 3, 2.0, 2.5, 0.003, 74.735, margin, 0), + ("kraken", True, 3, 2.2, 2.5, 0.0025, 75.2626875, margin, 0), + ("kraken", True, 3, 2.2, 2.5, 0.003, 75.300225, margin, 0), + ("kraken", True, 1, 2.2, 2.5, 0.0025, 75.2626875, margin, 0), + ("kraken", True, 1, 2.2, 2.5, 0.003, 75.300225, margin, 0), + ("binance", False, 1, 2.0, 2.5, 0.0025, 75.8125, futures, 1), + ("binance", False, 3, 2.0, 2.5, 0.0025, 73.8125, futures, -1), + ("binance", True, 3, 2.0, 2.5, 0.0025, 74.1875, futures, 1), + ("binance", True, 1, 2.0, 2.5, 0.0025, 76.1875, futures, -1), + ], +) @pytest.mark.usefixtures("init_persistence") def test_calc_close_trade_price( - open_rate, exchange, is_short, - lev, close_rate, fee_rate, result, trading_mode, funding_fees + open_rate, exchange, is_short, lev, close_rate, fee_rate, result, trading_mode, funding_fees ): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, amount=30.0, open_rate=open_rate, @@ -832,316 +870,296 @@ def test_calc_close_trade_price( is_short=is_short, leverage=lev, trading_mode=trading_mode, - funding_fees=funding_fees + funding_fees=funding_fees, ) assert round(trade.calc_close_trade_value(rate=close_rate), 8) == result @pytest.mark.parametrize( - 'exchange,is_short,lev,close_rate,fee_close,profit,profit_ratio,trading_mode,funding_fees', [ - ('binance', False, 1, 2.1, 0.0025, 2.6925, 0.044763092, spot, 0), - ('binance', False, 3, 2.1, 0.0025, 2.69166667, 0.134247714, margin, 0), - ('binance', True, 1, 2.1, 0.0025, -3.3088157, -0.055285142, margin, 0), - ('binance', True, 3, 2.1, 0.0025, -3.3088157, -0.16585542, margin, 0), - - ('binance', False, 1, 1.9, 0.0025, -3.2925, -0.054738154, margin, 0), - ('binance', False, 3, 1.9, 0.0025, -3.29333333, -0.164256026, margin, 0), - ('binance', True, 1, 1.9, 0.0025, 2.70630953, 0.0452182043, margin, 0), - ('binance', True, 3, 1.9, 0.0025, 2.70630953, 0.135654613, margin, 0), - - ('binance', False, 1, 2.2, 0.0025, 5.685, 0.09451371, margin, 0), - ('binance', False, 3, 2.2, 0.0025, 5.68416667, 0.28349958, margin, 0), - ('binance', True, 1, 2.2, 0.0025, -6.3163784, -0.10553681, margin, 0), - ('binance', True, 3, 2.2, 0.0025, -6.3163784, -0.31661044, margin, 0), - + "exchange,is_short,lev,close_rate,fee_close,profit,profit_ratio,trading_mode,funding_fees", + [ + ("binance", False, 1, 2.1, 0.0025, 2.6925, 0.044763092, spot, 0), + ("binance", False, 3, 2.1, 0.0025, 2.69166667, 0.134247714, margin, 0), + ("binance", True, 1, 2.1, 0.0025, -3.3088157, -0.055285142, margin, 0), + ("binance", True, 3, 2.1, 0.0025, -3.3088157, -0.16585542, margin, 0), + ("binance", False, 1, 1.9, 0.0025, -3.2925, -0.054738154, margin, 0), + ("binance", False, 3, 1.9, 0.0025, -3.29333333, -0.164256026, margin, 0), + ("binance", True, 1, 1.9, 0.0025, 2.70630953, 0.0452182043, margin, 0), + ("binance", True, 3, 1.9, 0.0025, 2.70630953, 0.135654613, margin, 0), + ("binance", False, 1, 2.2, 0.0025, 5.685, 0.09451371, margin, 0), + ("binance", False, 3, 2.2, 0.0025, 5.68416667, 0.28349958, margin, 0), + ("binance", True, 1, 2.2, 0.0025, -6.3163784, -0.10553681, margin, 0), + ("binance", True, 3, 2.2, 0.0025, -6.3163784, -0.31661044, margin, 0), # Kraken - ('kraken', False, 1, 2.1, 0.0025, 2.6925, 0.044763092, spot, 0), - ('kraken', False, 3, 2.1, 0.0025, 2.6525, 0.132294264, margin, 0), - ('kraken', True, 1, 2.1, 0.0025, -3.3706575, -0.056318421, margin, 0), - ('kraken', True, 3, 2.1, 0.0025, -3.3706575, -0.168955263, margin, 0), - - ('kraken', False, 1, 1.9, 0.0025, -3.2925, -0.054738154, margin, 0), - ('kraken', False, 3, 1.9, 0.0025, -3.3325, -0.166209476, margin, 0), - ('kraken', True, 1, 1.9, 0.0025, 2.6503575, 0.044283333, margin, 0), - ('kraken', True, 3, 1.9, 0.0025, 2.6503575, 0.132850000, margin, 0), - - ('kraken', False, 1, 2.2, 0.0025, 5.685, 0.09451371, margin, 0), - ('kraken', False, 3, 2.2, 0.0025, 5.645, 0.28154613, margin, 0), - ('kraken', True, 1, 2.2, 0.0025, -6.381165, -0.1066192, margin, 0), - ('kraken', True, 3, 2.2, 0.0025, -6.381165, -0.3198578, margin, 0), - - ('binance', False, 1, 2.1, 0.003, 2.66100000, 0.044239401, spot, 0), - ('binance', False, 1, 1.9, 0.003, -3.3209999, -0.055211970, spot, 0), - ('binance', False, 1, 2.2, 0.003, 5.6520000, 0.093965087, spot, 0), - + ("kraken", False, 1, 2.1, 0.0025, 2.6925, 0.044763092, spot, 0), + ("kraken", False, 3, 2.1, 0.0025, 2.6525, 0.132294264, margin, 0), + ("kraken", True, 1, 2.1, 0.0025, -3.3706575, -0.056318421, margin, 0), + ("kraken", True, 3, 2.1, 0.0025, -3.3706575, -0.168955263, margin, 0), + ("kraken", False, 1, 1.9, 0.0025, -3.2925, -0.054738154, margin, 0), + ("kraken", False, 3, 1.9, 0.0025, -3.3325, -0.166209476, margin, 0), + ("kraken", True, 1, 1.9, 0.0025, 2.6503575, 0.044283333, margin, 0), + ("kraken", True, 3, 1.9, 0.0025, 2.6503575, 0.132850000, margin, 0), + ("kraken", False, 1, 2.2, 0.0025, 5.685, 0.09451371, margin, 0), + ("kraken", False, 3, 2.2, 0.0025, 5.645, 0.28154613, margin, 0), + ("kraken", True, 1, 2.2, 0.0025, -6.381165, -0.1066192, margin, 0), + ("kraken", True, 3, 2.2, 0.0025, -6.381165, -0.3198578, margin, 0), + ("binance", False, 1, 2.1, 0.003, 2.66100000, 0.044239401, spot, 0), + ("binance", False, 1, 1.9, 0.003, -3.3209999, -0.055211970, spot, 0), + ("binance", False, 1, 2.2, 0.003, 5.6520000, 0.093965087, spot, 0), # FUTURES, funding_fee=1 - ('binance', False, 1, 2.1, 0.0025, 3.6925, 0.06138819, futures, 1), - ('binance', False, 3, 2.1, 0.0025, 3.6925, 0.18416458, futures, 1), - ('binance', True, 1, 2.1, 0.0025, -2.3074999, -0.03855472, futures, 1), - ('binance', True, 3, 2.1, 0.0025, -2.3074999, -0.11566416, futures, 1), - - ('binance', False, 1, 1.9, 0.0025, -2.2925, -0.03811305, futures, 1), - ('binance', False, 3, 1.9, 0.0025, -2.2925, -0.11433915, futures, 1), - ('binance', True, 1, 1.9, 0.0025, 3.7075, 0.06194653, futures, 1), - ('binance', True, 3, 1.9, 0.0025, 3.7075, 0.18583959, futures, 1), - - ('binance', False, 1, 2.2, 0.0025, 6.685, 0.11113881, futures, 1), - ('binance', False, 3, 2.2, 0.0025, 6.685, 0.33341645, futures, 1), - ('binance', True, 1, 2.2, 0.0025, -5.315, -0.08880534, futures, 1), - ('binance', True, 3, 2.2, 0.0025, -5.315, -0.26641604, futures, 1), - + ("binance", False, 1, 2.1, 0.0025, 3.6925, 0.06138819, futures, 1), + ("binance", False, 3, 2.1, 0.0025, 3.6925, 0.18416458, futures, 1), + ("binance", True, 1, 2.1, 0.0025, -2.3074999, -0.03855472, futures, 1), + ("binance", True, 3, 2.1, 0.0025, -2.3074999, -0.11566416, futures, 1), + ("binance", False, 1, 1.9, 0.0025, -2.2925, -0.03811305, futures, 1), + ("binance", False, 3, 1.9, 0.0025, -2.2925, -0.11433915, futures, 1), + ("binance", True, 1, 1.9, 0.0025, 3.7075, 0.06194653, futures, 1), + ("binance", True, 3, 1.9, 0.0025, 3.7075, 0.18583959, futures, 1), + ("binance", False, 1, 2.2, 0.0025, 6.685, 0.11113881, futures, 1), + ("binance", False, 3, 2.2, 0.0025, 6.685, 0.33341645, futures, 1), + ("binance", True, 1, 2.2, 0.0025, -5.315, -0.08880534, futures, 1), + ("binance", True, 3, 2.2, 0.0025, -5.315, -0.26641604, futures, 1), # FUTURES, funding_fee=-1 - ('binance', False, 1, 2.1, 0.0025, 1.6925, 0.02813798, futures, -1), - ('binance', False, 3, 2.1, 0.0025, 1.6925, 0.08441396, futures, -1), - ('binance', True, 1, 2.1, 0.0025, -4.307499, -0.07197159, futures, -1), - ('binance', True, 3, 2.1, 0.0025, -4.307499, -0.21591478, futures, -1), - - ('binance', False, 1, 1.9, 0.0025, -4.292499, -0.07136325, futures, -1), - ('binance', False, 3, 1.9, 0.0025, -4.292499, -0.21408977, futures, -1), - ('binance', True, 1, 1.9, 0.0025, 1.7075, 0.02852965, futures, -1), - ('binance', True, 3, 1.9, 0.0025, 1.7075, 0.08558897, futures, -1), - - ('binance', False, 1, 2.2, 0.0025, 4.684999, 0.07788861, futures, -1), - ('binance', False, 3, 2.2, 0.0025, 4.684999, 0.23366583, futures, -1), - ('binance', True, 1, 2.2, 0.0025, -7.315, -0.12222222, futures, -1), - ('binance', True, 3, 2.2, 0.0025, -7.315, -0.36666666, futures, -1), - + ("binance", False, 1, 2.1, 0.0025, 1.6925, 0.02813798, futures, -1), + ("binance", False, 3, 2.1, 0.0025, 1.6925, 0.08441396, futures, -1), + ("binance", True, 1, 2.1, 0.0025, -4.307499, -0.07197159, futures, -1), + ("binance", True, 3, 2.1, 0.0025, -4.307499, -0.21591478, futures, -1), + ("binance", False, 1, 1.9, 0.0025, -4.292499, -0.07136325, futures, -1), + ("binance", False, 3, 1.9, 0.0025, -4.292499, -0.21408977, futures, -1), + ("binance", True, 1, 1.9, 0.0025, 1.7075, 0.02852965, futures, -1), + ("binance", True, 3, 1.9, 0.0025, 1.7075, 0.08558897, futures, -1), + ("binance", False, 1, 2.2, 0.0025, 4.684999, 0.07788861, futures, -1), + ("binance", False, 3, 2.2, 0.0025, 4.684999, 0.23366583, futures, -1), + ("binance", True, 1, 2.2, 0.0025, -7.315, -0.12222222, futures, -1), + ("binance", True, 3, 2.2, 0.0025, -7.315, -0.36666666, futures, -1), # FUTURES, funding_fee=0 - ('binance', False, 1, 2.1, 0.0025, 2.6925, 0.04476309, futures, 0), - ('binance', False, 3, 2.1, 0.0025, 2.6925, 0.13428928, futures, 0), - ('binance', True, 1, 2.1, 0.0025, -3.3074999, -0.05526316, futures, 0), - ('binance', True, 3, 2.1, 0.0025, -3.3074999, -0.16578947, futures, 0), - - ('binance', False, 1, 1.9, 0.0025, -3.2925, -0.05473815, futures, 0), - ('binance', False, 3, 1.9, 0.0025, -3.2925, -0.16421446, futures, 0), - ('binance', True, 1, 1.9, 0.0025, 2.7075, 0.0452381, futures, 0), - ('binance', True, 3, 1.9, 0.0025, 2.7075, 0.13571429, futures, 0), - ]) + ("binance", False, 1, 2.1, 0.0025, 2.6925, 0.04476309, futures, 0), + ("binance", False, 3, 2.1, 0.0025, 2.6925, 0.13428928, futures, 0), + ("binance", True, 1, 2.1, 0.0025, -3.3074999, -0.05526316, futures, 0), + ("binance", True, 3, 2.1, 0.0025, -3.3074999, -0.16578947, futures, 0), + ("binance", False, 1, 1.9, 0.0025, -3.2925, -0.05473815, futures, 0), + ("binance", False, 3, 1.9, 0.0025, -3.2925, -0.16421446, futures, 0), + ("binance", True, 1, 1.9, 0.0025, 2.7075, 0.0452381, futures, 0), + ("binance", True, 3, 1.9, 0.0025, 2.7075, 0.13571429, futures, 0), + ], +) @pytest.mark.usefixtures("init_persistence") def test_calc_profit( - exchange, - is_short, - lev, - close_rate, - fee_close, - profit, - profit_ratio, - trading_mode, - funding_fees + exchange, is_short, lev, close_rate, fee_close, profit, profit_ratio, trading_mode, funding_fees ): """ - 10 minute limit trade on Binance/Kraken at 1x, 3x leverage - arguments: - fee: - 0.25% quote - 0.30% quote - interest_rate: 0.05% per 4 hrs - open_rate: 2.0 quote - close_rate: - 1.9 quote - 2.1 quote - 2.2 quote - amount: = 30.0 crypto - stake_amount - 1x,-1x: 60.0 quote - 3x,-3x: 20.0 quote - hours: 1/6 (10 minutes) - funding_fees: 1 - borrowed - 1x: 0 quote - 3x: 40 quote - -1x: 30 crypto - -3x: 30 crypto - time-periods: - kraken: (1 + 1) 4hr_periods = 2 4hr_periods - binance: 1/24 24hr_periods - interest: borrowed * interest_rate * time-periods - 1x : / - binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote - kraken 3x: 40 * 0.0005 * 2 = 0.040 quote - binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto - kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto - open_value: (amount * open_rate) ± (amount * open_rate * fee) - 0.0025 fee - 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote - -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.85 quote - 0.003 fee: Is only applied to close rate in this test - amount_closed: - 1x, 3x = amount - -1x, -3x = amount + interest - binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto - kraken -1x,-3x: 30 + 0.03 = 30.03 crypto + 10 minute limit trade on Binance/Kraken at 1x, 3x leverage + arguments: + fee: + 0.25% quote + 0.30% quote + interest_rate: 0.05% per 4 hrs + open_rate: 2.0 quote + close_rate: + 1.9 quote + 2.1 quote + 2.2 quote + amount: = 30.0 crypto + stake_amount + 1x,-1x: 60.0 quote + 3x,-3x: 20.0 quote + hours: 1/6 (10 minutes) + funding_fees: 1 + borrowed + 1x: 0 quote + 3x: 40 quote + -1x: 30 crypto + -3x: 30 crypto + time-periods: + kraken: (1 + 1) 4hr_periods = 2 4hr_periods + binance: 1/24 24hr_periods + interest: borrowed * interest_rate * time-periods + 1x : / + binance 3x: 40 * 0.0005 * 1/24 = 0.0008333333333333334 quote + kraken 3x: 40 * 0.0005 * 2 = 0.040 quote + binace -1x,-3x: 30 * 0.0005 * 1/24 = 0.000625 crypto + kraken -1x,-3x: 30 * 0.0005 * 2 = 0.030 crypto + open_value: (amount * open_rate) ± (amount * open_rate * fee) + 0.0025 fee + 1x, 3x: 30 * 2 + 30 * 2 * 0.0025 = 60.15 quote + -1x,-3x: 30 * 2 - 30 * 2 * 0.0025 = 59.85 quote + 0.003 fee: Is only applied to close rate in this test + amount_closed: + 1x, 3x = amount + -1x, -3x = amount + interest + binance -1x,-3x: 30 + 0.000625 = 30.000625 crypto + kraken -1x,-3x: 30 + 0.03 = 30.03 crypto + close_value: + equations: + 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest + -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) + 2.1 quote + bin,krak 1x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) = 62.8425 + bin 3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) - 0.0008333333 = 62.8416666667 + krak 3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) - 0.040 = 62.8025 + bin -1x,-3x: (30.000625 * 2.1) + (30.000625 * 2.1 * 0.0025) = 63.15881578125 + krak -1x,-3x: (30.03 * 2.1) + (30.03 * 2.1 * 0.0025) = 63.2206575 + 1.9 quote + bin,krak 1x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) = 56.8575 + bin 3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) - 0.0008333333 = 56.85666667 + krak 3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) - 0.040 = 56.8175 + bin -1x,-3x: (30.000625 * 1.9) + (30.000625 * 1.9 * 0.0025) = 57.14369046875 + krak -1x,-3x: (30.03 * 1.9) + (30.03 * 1.9 * 0.0025) = 57.1996425 + 2.2 quote + bin,krak 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 + bin 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 + krak 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 + bin -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.1663784375 + krak -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 + total_profit: + equations: + 1x, 3x : close_value - open_value + -1x,-3x: open_value - close_value + 2.1 quote + binance,kraken 1x: 62.8425 - 60.15 = 2.6925 + binance 3x: 62.84166667 - 60.15 = 2.69166667 + kraken 3x: 62.8025 - 60.15 = 2.6525 + binance -1x,-3x: 59.850 - 63.15881578125 = -3.308815781249997 + kraken -1x,-3x: 59.850 - 63.2206575 = -3.3706575 + 1.9 quote + binance,kraken 1x: 56.8575 - 60.15 = -3.2925 + binance 3x: 56.85666667 - 60.15 = -3.29333333 + kraken 3x: 56.8175 - 60.15 = -3.3325 + binance -1x,-3x: 59.850 - 57.14369046875 = 2.7063095312499996 + kraken -1x,-3x: 59.850 - 57.1996425 = 2.6503575 + 2.2 quote + binance,kraken 1x: 65.835 - 60.15 = 5.685 + binance 3x: 65.83416667 - 60.15 = 5.68416667 + kraken 3x: 65.795 - 60.15 = 5.645 + binance -1x,-3x: 59.850 - 66.1663784375 = -6.316378437499999 + kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 + total_profit_ratio: + equations: + 1x, 3x : ((close_value/open_value) - 1) * leverage + -1x,-3x: (1 - (close_value/open_value)) * leverage + 2.1 quote + binance,kraken 1x: (62.8425 / 60.15) - 1 = 0.04476309226932673 + binance 3x: ((62.84166667 / 60.15) - 1)*3 = 0.13424771421446402 + kraken 3x: ((62.8025 / 60.15) - 1)*3 = 0.13229426433915248 + binance -1x: 1 - (63.15881578125 / 59.850) = -0.05528514254385963 + binance -3x: (1 - (63.15881578125 / 59.850))*3 = -0.1658554276315789 + kraken -1x: 1 - (63.2206575 / 59.850) = -0.05631842105263152 + kraken -3x: (1 - (63.2206575 / 59.850))*3 = -0.16895526315789455 + 1.9 quote + binance,kraken 1x: (56.8575 / 60.15) - 1 = -0.05473815461346632 + binance 3x: ((56.85666667 / 60.15) - 1)*3 = -0.16425602643391513 + kraken 3x: ((56.8175 / 60.15) - 1)*3 = -0.16620947630922667 + binance -1x: 1 - (57.14369046875 / 59.850) = 0.045218204365079395 + binance -3x: (1 - (57.14369046875 / 59.850))*3 = 0.13565461309523819 + kraken -1x: 1 - (57.1996425 / 59.850) = 0.04428333333333334 + kraken -3x: (1 - (57.1996425 / 59.850))*3 = 0.13285000000000002 + 2.2 quote + binance,kraken 1x: (65.835 / 60.15) - 1 = 0.0945137157107232 + binance 3x: ((65.83416667 / 60.15) - 1)*3 = 0.2834995845386534 + kraken 3x: ((65.795 / 60.15) - 1)*3 = 0.2815461346633419 + binance -1x: 1 - (66.1663784375 / 59.850) = -0.1055368159983292 + binance -3x: (1 - (66.1663784375 / 59.850))*3 = -0.3166104479949876 + kraken -1x: 1 - (66.231165 / 59.850) = -0.106619298245614 + kraken -3x: (1 - (66.231165 / 59.850))*3 = -0.319857894736842 + fee: 0.003, 1x close_value: - equations: - 1x, 3x: (amount_closed * close_rate) - (amount_closed * close_rate * fee) - interest - -1x,-3x: (amount_closed * close_rate) + (amount_closed * close_rate * fee) - 2.1 quote - bin,krak 1x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) = 62.8425 - bin 3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) - 0.0008333333 = 62.8416666667 - krak 3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) - 0.040 = 62.8025 - bin -1x,-3x: (30.000625 * 2.1) + (30.000625 * 2.1 * 0.0025) = 63.15881578125 - krak -1x,-3x: (30.03 * 2.1) + (30.03 * 2.1 * 0.0025) = 63.2206575 - 1.9 quote - bin,krak 1x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) = 56.8575 - bin 3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) - 0.0008333333 = 56.85666667 - krak 3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) - 0.040 = 56.8175 - bin -1x,-3x: (30.000625 * 1.9) + (30.000625 * 1.9 * 0.0025) = 57.14369046875 - krak -1x,-3x: (30.03 * 1.9) + (30.03 * 1.9 * 0.0025) = 57.1996425 - 2.2 quote - bin,krak 1x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) = 65.835 - bin 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.00083333 = 65.83416667 - krak 3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) - 0.040 = 65.795 - bin -1x,-3x: (30.000625 * 2.20) + (30.000625 * 2.20 * 0.0025) = 66.1663784375 - krak -1x,-3x: (30.03 * 2.20) + (30.03 * 2.20 * 0.0025) = 66.231165 - total_profit: - equations: - 1x, 3x : close_value - open_value - -1x,-3x: open_value - close_value - 2.1 quote - binance,kraken 1x: 62.8425 - 60.15 = 2.6925 - binance 3x: 62.84166667 - 60.15 = 2.69166667 - kraken 3x: 62.8025 - 60.15 = 2.6525 - binance -1x,-3x: 59.850 - 63.15881578125 = -3.308815781249997 - kraken -1x,-3x: 59.850 - 63.2206575 = -3.3706575 - 1.9 quote - binance,kraken 1x: 56.8575 - 60.15 = -3.2925 - binance 3x: 56.85666667 - 60.15 = -3.29333333 - kraken 3x: 56.8175 - 60.15 = -3.3325 - binance -1x,-3x: 59.850 - 57.14369046875 = 2.7063095312499996 - kraken -1x,-3x: 59.850 - 57.1996425 = 2.6503575 - 2.2 quote - binance,kraken 1x: 65.835 - 60.15 = 5.685 - binance 3x: 65.83416667 - 60.15 = 5.68416667 - kraken 3x: 65.795 - 60.15 = 5.645 - binance -1x,-3x: 59.850 - 66.1663784375 = -6.316378437499999 - kraken -1x,-3x: 59.850 - 66.231165 = -6.381165 - total_profit_ratio: - equations: - 1x, 3x : ((close_value/open_value) - 1) * leverage - -1x,-3x: (1 - (close_value/open_value)) * leverage - 2.1 quote - binance,kraken 1x: (62.8425 / 60.15) - 1 = 0.04476309226932673 - binance 3x: ((62.84166667 / 60.15) - 1)*3 = 0.13424771421446402 - kraken 3x: ((62.8025 / 60.15) - 1)*3 = 0.13229426433915248 - binance -1x: 1 - (63.15881578125 / 59.850) = -0.05528514254385963 - binance -3x: (1 - (63.15881578125 / 59.850))*3 = -0.1658554276315789 - kraken -1x: 1 - (63.2206575 / 59.850) = -0.05631842105263152 - kraken -3x: (1 - (63.2206575 / 59.850))*3 = -0.16895526315789455 - 1.9 quote - binance,kraken 1x: (56.8575 / 60.15) - 1 = -0.05473815461346632 - binance 3x: ((56.85666667 / 60.15) - 1)*3 = -0.16425602643391513 - kraken 3x: ((56.8175 / 60.15) - 1)*3 = -0.16620947630922667 - binance -1x: 1 - (57.14369046875 / 59.850) = 0.045218204365079395 - binance -3x: (1 - (57.14369046875 / 59.850))*3 = 0.13565461309523819 - kraken -1x: 1 - (57.1996425 / 59.850) = 0.04428333333333334 - kraken -3x: (1 - (57.1996425 / 59.850))*3 = 0.13285000000000002 - 2.2 quote - binance,kraken 1x: (65.835 / 60.15) - 1 = 0.0945137157107232 - binance 3x: ((65.83416667 / 60.15) - 1)*3 = 0.2834995845386534 - kraken 3x: ((65.795 / 60.15) - 1)*3 = 0.2815461346633419 - binance -1x: 1 - (66.1663784375 / 59.850) = -0.1055368159983292 - binance -3x: (1 - (66.1663784375 / 59.850))*3 = -0.3166104479949876 - kraken -1x: 1 - (66.231165 / 59.850) = -0.106619298245614 - kraken -3x: (1 - (66.231165 / 59.850))*3 = -0.319857894736842 - fee: 0.003, 1x + 2.1 quote: (30.00 * 2.1) - (30.00 * 2.1 * 0.003) = 62.811 + 1.9 quote: (30.00 * 1.9) - (30.00 * 1.9 * 0.003) = 56.829 + 2.2 quote: (30.00 * 2.2) - (30.00 * 2.2 * 0.003) = 65.802 + total_profit + fee: 0.003, 1x + 2.1 quote: 62.811 - 60.15 = 2.6610000000000014 + 1.9 quote: 56.829 - 60.15 = -3.320999999999998 + 2.2 quote: 65.802 - 60.15 = 5.652000000000008 + total_profit_ratio + fee: 0.003, 1x + 2.1 quote: (62.811 / 60.15) - 1 = 0.04423940149625927 + 1.9 quote: (56.829 / 60.15) - 1 = -0.05521197007481293 + 2.2 quote: (65.802 / 60.15) - 1 = 0.09396508728179565 + futures (live): + funding_fee: 1 close_value: - 2.1 quote: (30.00 * 2.1) - (30.00 * 2.1 * 0.003) = 62.811 - 1.9 quote: (30.00 * 1.9) - (30.00 * 1.9 * 0.003) = 56.829 - 2.2 quote: (30.00 * 2.2) - (30.00 * 2.2 * 0.003) = 65.802 - total_profit - fee: 0.003, 1x - 2.1 quote: 62.811 - 60.15 = 2.6610000000000014 - 1.9 quote: 56.829 - 60.15 = -3.320999999999998 - 2.2 quote: 65.802 - 60.15 = 5.652000000000008 - total_profit_ratio - fee: 0.003, 1x - 2.1 quote: (62.811 / 60.15) - 1 = 0.04423940149625927 - 1.9 quote: (56.829 / 60.15) - 1 = -0.05521197007481293 - 2.2 quote: (65.802 / 60.15) - 1 = 0.09396508728179565 - futures (live): - funding_fee: 1 - close_value: - equations: - 1x,3x: (amount * close_rate) - (amount * close_rate * fee) + funding_fees - -1x,-3x: (amount * close_rate) + (amount * close_rate * fee) - funding_fees - 2.1 quote - 1x,3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) + 1 = 63.8425 - -1x,-3x: (30.00 * 2.1) + (30.00 * 2.1 * 0.0025) - 1 = 62.1575 - 1.9 quote - 1x,3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) + 1 = 57.8575 - -1x,-3x: (30.00 * 1.9) + (30.00 * 1.9 * 0.0025) - 1 = 56.1425 - 2.2 quote: - 1x,3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) + 1 = 66.835 - -1x,-3x: (30.00 * 2.20) + (30.00 * 2.20 * 0.0025) - 1 = 65.165 - total_profit: - 2.1 quote - 1x,3x: 63.8425 - 60.15 = 3.6925 - -1x,-3x: 59.850 - 62.1575 = -2.3074999999999974 - 1.9 quote - 1x,3x: 57.8575 - 60.15 = -2.2925 - -1x,-3x: 59.850 - 56.1425 = 3.707500000000003 - 2.2 quote: - 1x,3x: 66.835 - 60.15 = 6.685 - -1x,-3x: 59.850 - 65.165 = -5.315000000000005 - total_profit_ratio: - 2.1 quote - 1x: (63.8425 / 60.15) - 1 = 0.06138819617622615 - 3x: ((63.8425 / 60.15) - 1)*3 = 0.18416458852867845 - -1x: 1 - (62.1575 / 59.850) = -0.038554720133667564 - -3x: (1 - (62.1575 / 59.850))*3 = -0.11566416040100269 - 1.9 quote - 1x: (57.8575 / 60.15) - 1 = -0.0381130507065669 - 3x: ((57.8575 / 60.15) - 1)*3 = -0.1143391521197007 - -1x: 1 - (56.1425 / 59.850) = 0.06194653299916464 - -3x: (1 - (56.1425 / 59.850))*3 = 0.18583959899749392 - 2.2 quote - 1x: (66.835 / 60.15) - 1 = 0.11113881961762262 - 3x: ((66.835 / 60.15) - 1)*3 = 0.33341645885286786 - -1x: 1 - (65.165 / 59.850) = -0.08880534670008355 - -3x: (1 - (65.165 / 59.850))*3 = -0.26641604010025066 - funding_fee: -1 - close_value: - equations: - (amount * close_rate) - (amount * close_rate * fee) + funding_fees - (amount * close_rate) - (amount * close_rate * fee) - funding_fees - 2.1 quote - 1x,3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) + (-1) = 61.8425 - -1x,-3x: (30.00 * 2.1) + (30.00 * 2.1 * 0.0025) - (-1) = 64.1575 - 1.9 quote - 1x,3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) + (-1) = 55.8575 - -1x,-3x: (30.00 * 1.9) + (30.00 * 1.9 * 0.0025) - (-1) = 58.1425 - 2.2 quote: - 1x,3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) + (-1) = 64.835 - -1x,-3x: (30.00 * 2.20) + (30.00 * 2.20 * 0.0025) - (-1) = 67.165 - total_profit: - 2.1 quote - 1x,3x: 61.8425 - 60.15 = 1.6925000000000026 - -1x,-3x: 59.850 - 64.1575 = -4.307499999999997 - 1.9 quote - 1x,3x: 55.8575 - 60.15 = -4.292499999999997 - -1x,-3x: 59.850 - 58.1425 = 1.7075000000000031 - 2.2 quote: - 1x,3x: 64.835 - 60.15 = 4.684999999999995 - -1x,-3x: 59.850 - 67.165 = -7.315000000000005 - total_profit_ratio: - 2.1 quote - 1x: (61.8425 / 60.15) - 1 = 0.028137988362427313 - 3x: ((61.8425 / 60.15) - 1)*3 = 0.08441396508728194 - -1x: 1 - (64.1575 / 59.850) = -0.07197159565580624 - -3x: (1 - (64.1575 / 59.850))*3 = -0.21591478696741873 - 1.9 quote - 1x: (55.8575 / 60.15) - 1 = -0.07136325852036574 - 3x: ((55.8575 / 60.15) - 1)*3 = -0.2140897755610972 - -1x: 1 - (58.1425 / 59.850) = 0.02852965747702596 - -3x: (1 - (58.1425 / 59.850))*3 = 0.08558897243107788 - 2.2 quote - 1x: (64.835 / 60.15) - 1 = 0.07788861180382378 - 3x: ((64.835 / 60.15) - 1)*3 = 0.23366583541147135 - -1x: 1 - (67.165 / 59.850) = -0.12222222222222223 - -3x: (1 - (67.165 / 59.850))*3 = -0.3666666666666667 + equations: + 1x,3x: (amount * close_rate) - (amount * close_rate * fee) + funding_fees + -1x,-3x: (amount * close_rate) + (amount * close_rate * fee) - funding_fees + 2.1 quote + 1x,3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) + 1 = 63.8425 + -1x,-3x: (30.00 * 2.1) + (30.00 * 2.1 * 0.0025) - 1 = 62.1575 + 1.9 quote + 1x,3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) + 1 = 57.8575 + -1x,-3x: (30.00 * 1.9) + (30.00 * 1.9 * 0.0025) - 1 = 56.1425 + 2.2 quote: + 1x,3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) + 1 = 66.835 + -1x,-3x: (30.00 * 2.20) + (30.00 * 2.20 * 0.0025) - 1 = 65.165 + total_profit: + 2.1 quote + 1x,3x: 63.8425 - 60.15 = 3.6925 + -1x,-3x: 59.850 - 62.1575 = -2.3074999999999974 + 1.9 quote + 1x,3x: 57.8575 - 60.15 = -2.2925 + -1x,-3x: 59.850 - 56.1425 = 3.707500000000003 + 2.2 quote: + 1x,3x: 66.835 - 60.15 = 6.685 + -1x,-3x: 59.850 - 65.165 = -5.315000000000005 + total_profit_ratio: + 2.1 quote + 1x: (63.8425 / 60.15) - 1 = 0.06138819617622615 + 3x: ((63.8425 / 60.15) - 1)*3 = 0.18416458852867845 + -1x: 1 - (62.1575 / 59.850) = -0.038554720133667564 + -3x: (1 - (62.1575 / 59.850))*3 = -0.11566416040100269 + 1.9 quote + 1x: (57.8575 / 60.15) - 1 = -0.0381130507065669 + 3x: ((57.8575 / 60.15) - 1)*3 = -0.1143391521197007 + -1x: 1 - (56.1425 / 59.850) = 0.06194653299916464 + -3x: (1 - (56.1425 / 59.850))*3 = 0.18583959899749392 + 2.2 quote + 1x: (66.835 / 60.15) - 1 = 0.11113881961762262 + 3x: ((66.835 / 60.15) - 1)*3 = 0.33341645885286786 + -1x: 1 - (65.165 / 59.850) = -0.08880534670008355 + -3x: (1 - (65.165 / 59.850))*3 = -0.26641604010025066 + funding_fee: -1 + close_value: + equations: + (amount * close_rate) - (amount * close_rate * fee) + funding_fees + (amount * close_rate) - (amount * close_rate * fee) - funding_fees + 2.1 quote + 1x,3x: (30.00 * 2.1) - (30.00 * 2.1 * 0.0025) + (-1) = 61.8425 + -1x,-3x: (30.00 * 2.1) + (30.00 * 2.1 * 0.0025) - (-1) = 64.1575 + 1.9 quote + 1x,3x: (30.00 * 1.9) - (30.00 * 1.9 * 0.0025) + (-1) = 55.8575 + -1x,-3x: (30.00 * 1.9) + (30.00 * 1.9 * 0.0025) - (-1) = 58.1425 + 2.2 quote: + 1x,3x: (30.00 * 2.20) - (30.00 * 2.20 * 0.0025) + (-1) = 64.835 + -1x,-3x: (30.00 * 2.20) + (30.00 * 2.20 * 0.0025) - (-1) = 67.165 + total_profit: + 2.1 quote + 1x,3x: 61.8425 - 60.15 = 1.6925000000000026 + -1x,-3x: 59.850 - 64.1575 = -4.307499999999997 + 1.9 quote + 1x,3x: 55.8575 - 60.15 = -4.292499999999997 + -1x,-3x: 59.850 - 58.1425 = 1.7075000000000031 + 2.2 quote: + 1x,3x: 64.835 - 60.15 = 4.684999999999995 + -1x,-3x: 59.850 - 67.165 = -7.315000000000005 + total_profit_ratio: + 2.1 quote + 1x: (61.8425 / 60.15) - 1 = 0.028137988362427313 + 3x: ((61.8425 / 60.15) - 1)*3 = 0.08441396508728194 + -1x: 1 - (64.1575 / 59.850) = -0.07197159565580624 + -3x: (1 - (64.1575 / 59.850))*3 = -0.21591478696741873 + 1.9 quote + 1x: (55.8575 / 60.15) - 1 = -0.07136325852036574 + 3x: ((55.8575 / 60.15) - 1)*3 = -0.2140897755610972 + -1x: 1 - (58.1425 / 59.850) = 0.02852965747702596 + -3x: (1 - (58.1425 / 59.850))*3 = 0.08558897243107788 + 2.2 quote + 1x: (64.835 / 60.15) - 1 = 0.07788861180382378 + 3x: ((64.835 / 60.15) - 1)*3 = 0.23366583541147135 + -1x: 1 - (67.165 / 59.850) = -0.12222222222222223 + -3x: (1 - (67.165 / 59.850))*3 = -0.3666666666666667 """ trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=60.0, amount=30.0, open_rate=2.0, @@ -1154,7 +1172,7 @@ def test_calc_profit( fee_close=fee_close, max_stake_amount=60.0, trading_mode=trading_mode, - funding_fees=funding_fees + funding_fees=funding_fees, ) profit_res = trade.calculate_profit(close_rate) @@ -1176,20 +1194,22 @@ def test_calc_profit( assert pytest.approx(profit_res2.total_profit) == round(profit, 8) # assert pytest.approx(profit_res2.total_profit_ratio) == round(profit_ratio, 8) - assert pytest.approx(trade.calc_profit(close_rate, trade.amount, - trade.open_rate)) == round(profit, 8) - assert pytest.approx(trade.calc_profit_ratio(close_rate, trade.amount, - trade.open_rate)) == round(profit_ratio, 8) + assert pytest.approx(trade.calc_profit(close_rate, trade.amount, trade.open_rate)) == round( + profit, 8 + ) + assert pytest.approx( + trade.calc_profit_ratio(close_rate, trade.amount, trade.open_rate) + ) == round(profit_ratio, 8) def test_adjust_stop_loss(fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, amount=30, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, ) @@ -1236,12 +1256,12 @@ def test_adjust_stop_loss(fee): def test_adjust_stop_loss_short(fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=0.001, amount=5, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, is_short=True, @@ -1290,12 +1310,12 @@ def test_adjust_stop_loss_short(fee): def test_adjust_min_max_rates(fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, amount=30.0, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, ) @@ -1325,8 +1345,8 @@ def test_adjust_min_max_rates(fee): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('use_db', [True, False]) -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("use_db", [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_get_open(fee, is_short, use_db): Trade.use_db = use_db Trade.reset_trades() @@ -1339,7 +1359,7 @@ def test_get_open(fee, is_short, use_db): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('use_db', [True, False]) +@pytest.mark.parametrize("use_db", [True, False]) def test_get_open_lev(fee, use_db): Trade.use_db = use_db Trade.reset_trades() @@ -1351,8 +1371,8 @@ def test_get_open_lev(fee, use_db): Trade.use_db = True -@pytest.mark.parametrize('is_short', [True, False]) -@pytest.mark.parametrize('use_db', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) +@pytest.mark.parametrize("use_db", [True, False]) @pytest.mark.usefixtures("init_persistence") def test_get_open_orders(fee, is_short, use_db): Trade.use_db = use_db @@ -1371,10 +1391,9 @@ def test_get_open_orders(fee, is_short, use_db): @pytest.mark.usefixtures("init_persistence") def test_to_json(fee): - # Simulate dry_run entries trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=0.001, amount=123.0, amount_requested=123.0, @@ -1382,7 +1401,7 @@ def test_to_json(fee): fee_close=fee.return_value, open_date=dt_now() - timedelta(hours=2), open_rate=0.123, - exchange='binance', + exchange="binance", enter_tag=None, precision_mode=1, amount_precision=8.0, @@ -1393,75 +1412,75 @@ def test_to_json(fee): assert isinstance(result, dict) assert result == { - 'trade_id': None, - 'pair': 'ADA/USDT', - 'base_currency': 'ADA', - 'quote_currency': 'USDT', - 'is_open': None, - 'open_date': trade.open_date.strftime(DATETIME_PRINT_FORMAT), - 'open_timestamp': int(trade.open_date.timestamp() * 1000), - 'open_fill_date': None, - 'open_fill_timestamp': None, - 'close_date': None, - 'close_timestamp': None, - 'open_rate': 0.123, - 'open_rate_requested': None, - 'open_trade_value': 15.1668225, - 'fee_close': 0.0025, - 'fee_close_cost': None, - 'fee_close_currency': None, - 'fee_open': 0.0025, - 'fee_open_cost': None, - 'fee_open_currency': None, - 'close_rate': None, - 'close_rate_requested': None, - 'amount': 123.0, - 'amount_requested': 123.0, - 'stake_amount': 0.001, - 'max_stake_amount': None, - 'trade_duration': None, - 'trade_duration_s': None, - 'realized_profit': 0.0, - 'realized_profit_ratio': None, - 'close_profit': None, - 'close_profit_pct': None, - 'close_profit_abs': None, - 'profit_ratio': None, - 'profit_pct': None, - 'profit_abs': None, - 'exit_reason': None, - 'exit_order_status': None, - 'stop_loss_abs': None, - 'stop_loss_ratio': None, - 'stop_loss_pct': None, - 'stoploss_last_update': None, - 'stoploss_last_update_timestamp': None, - 'initial_stop_loss_abs': None, - 'initial_stop_loss_pct': None, - 'initial_stop_loss_ratio': None, - 'min_rate': None, - 'max_rate': None, - 'strategy': None, - 'enter_tag': None, - 'timeframe': None, - 'exchange': 'binance', - 'leverage': None, - 'interest_rate': None, - 'liquidation_price': None, - 'is_short': None, - 'trading_mode': None, - 'funding_fees': None, - 'amount_precision': 8.0, - 'price_precision': 7.0, - 'precision_mode': 1, - 'contract_size': 1, - 'orders': [], - 'has_open_orders': False, + "trade_id": None, + "pair": "ADA/USDT", + "base_currency": "ADA", + "quote_currency": "USDT", + "is_open": None, + "open_date": trade.open_date.strftime(DATETIME_PRINT_FORMAT), + "open_timestamp": int(trade.open_date.timestamp() * 1000), + "open_fill_date": None, + "open_fill_timestamp": None, + "close_date": None, + "close_timestamp": None, + "open_rate": 0.123, + "open_rate_requested": None, + "open_trade_value": 15.1668225, + "fee_close": 0.0025, + "fee_close_cost": None, + "fee_close_currency": None, + "fee_open": 0.0025, + "fee_open_cost": None, + "fee_open_currency": None, + "close_rate": None, + "close_rate_requested": None, + "amount": 123.0, + "amount_requested": 123.0, + "stake_amount": 0.001, + "max_stake_amount": None, + "trade_duration": None, + "trade_duration_s": None, + "realized_profit": 0.0, + "realized_profit_ratio": None, + "close_profit": None, + "close_profit_pct": None, + "close_profit_abs": None, + "profit_ratio": None, + "profit_pct": None, + "profit_abs": None, + "exit_reason": None, + "exit_order_status": None, + "stop_loss_abs": None, + "stop_loss_ratio": None, + "stop_loss_pct": None, + "stoploss_last_update": None, + "stoploss_last_update_timestamp": None, + "initial_stop_loss_abs": None, + "initial_stop_loss_pct": None, + "initial_stop_loss_ratio": None, + "min_rate": None, + "max_rate": None, + "strategy": None, + "enter_tag": None, + "timeframe": None, + "exchange": "binance", + "leverage": None, + "interest_rate": None, + "liquidation_price": None, + "is_short": None, + "trading_mode": None, + "funding_fees": None, + "amount_precision": 8.0, + "price_precision": 7.0, + "precision_mode": 1, + "contract_size": 1, + "orders": [], + "has_open_orders": False, } # Simulate dry_run entries trade = Trade( - pair='XRP/BTC', + pair="XRP/BTC", stake_amount=0.001, amount=100.0, amount_requested=101.0, @@ -1471,94 +1490,94 @@ def test_to_json(fee): close_date=dt_now() - timedelta(hours=1), open_rate=0.123, close_rate=0.125, - enter_tag='buys_signal_001', - exchange='binance', + enter_tag="buys_signal_001", + exchange="binance", precision_mode=2, amount_precision=7.0, price_precision=8.0, - contract_size=1 + contract_size=1, ) result = trade.to_json() assert isinstance(result, dict) assert result == { - 'trade_id': None, - 'pair': 'XRP/BTC', - 'base_currency': 'XRP', - 'quote_currency': 'BTC', - 'open_date': trade.open_date.strftime(DATETIME_PRINT_FORMAT), - 'open_timestamp': int(trade.open_date.timestamp() * 1000), - 'open_fill_date': None, - 'open_fill_timestamp': None, - 'close_date': trade.close_date.strftime(DATETIME_PRINT_FORMAT), - 'close_timestamp': int(trade.close_date.timestamp() * 1000), - 'open_rate': 0.123, - 'close_rate': 0.125, - 'amount': 100.0, - 'amount_requested': 101.0, - 'stake_amount': 0.001, - 'max_stake_amount': None, - 'trade_duration': 60, - 'trade_duration_s': 3600, - 'stop_loss_abs': None, - 'stop_loss_pct': None, - 'stop_loss_ratio': None, - 'stoploss_last_update': None, - 'stoploss_last_update_timestamp': None, - 'initial_stop_loss_abs': None, - 'initial_stop_loss_pct': None, - 'initial_stop_loss_ratio': None, - 'realized_profit': 0.0, - 'realized_profit_ratio': None, - 'close_profit': None, - 'close_profit_pct': None, - 'close_profit_abs': None, - 'profit_ratio': None, - 'profit_pct': None, - 'profit_abs': None, - 'close_rate_requested': None, - 'fee_close': 0.0025, - 'fee_close_cost': None, - 'fee_close_currency': None, - 'fee_open': 0.0025, - 'fee_open_cost': None, - 'fee_open_currency': None, - 'is_open': None, - 'max_rate': None, - 'min_rate': None, - 'open_rate_requested': None, - 'open_trade_value': 12.33075, - 'exit_reason': None, - 'exit_order_status': None, - 'strategy': None, - 'enter_tag': 'buys_signal_001', - 'timeframe': None, - 'exchange': 'binance', - 'leverage': None, - 'interest_rate': None, - 'liquidation_price': None, - 'is_short': None, - 'trading_mode': None, - 'funding_fees': None, - 'amount_precision': 7.0, - 'price_precision': 8.0, - 'precision_mode': 2, - 'contract_size': 1, - 'orders': [], - 'has_open_orders': False, + "trade_id": None, + "pair": "XRP/BTC", + "base_currency": "XRP", + "quote_currency": "BTC", + "open_date": trade.open_date.strftime(DATETIME_PRINT_FORMAT), + "open_timestamp": int(trade.open_date.timestamp() * 1000), + "open_fill_date": None, + "open_fill_timestamp": None, + "close_date": trade.close_date.strftime(DATETIME_PRINT_FORMAT), + "close_timestamp": int(trade.close_date.timestamp() * 1000), + "open_rate": 0.123, + "close_rate": 0.125, + "amount": 100.0, + "amount_requested": 101.0, + "stake_amount": 0.001, + "max_stake_amount": None, + "trade_duration": 60, + "trade_duration_s": 3600, + "stop_loss_abs": None, + "stop_loss_pct": None, + "stop_loss_ratio": None, + "stoploss_last_update": None, + "stoploss_last_update_timestamp": None, + "initial_stop_loss_abs": None, + "initial_stop_loss_pct": None, + "initial_stop_loss_ratio": None, + "realized_profit": 0.0, + "realized_profit_ratio": None, + "close_profit": None, + "close_profit_pct": None, + "close_profit_abs": None, + "profit_ratio": None, + "profit_pct": None, + "profit_abs": None, + "close_rate_requested": None, + "fee_close": 0.0025, + "fee_close_cost": None, + "fee_close_currency": None, + "fee_open": 0.0025, + "fee_open_cost": None, + "fee_open_currency": None, + "is_open": None, + "max_rate": None, + "min_rate": None, + "open_rate_requested": None, + "open_trade_value": 12.33075, + "exit_reason": None, + "exit_order_status": None, + "strategy": None, + "enter_tag": "buys_signal_001", + "timeframe": None, + "exchange": "binance", + "leverage": None, + "interest_rate": None, + "liquidation_price": None, + "is_short": None, + "trading_mode": None, + "funding_fees": None, + "amount_precision": 7.0, + "price_precision": 8.0, + "precision_mode": 2, + "contract_size": 1, + "orders": [], + "has_open_orders": False, } def test_stoploss_reinitialization(default_conf, fee): - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, fee_open=fee.return_value, open_date=dt_now() - timedelta(hours=2), amount=30.0, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, ) @@ -1611,15 +1630,15 @@ def test_stoploss_reinitialization(default_conf, fee): def test_stoploss_reinitialization_leverage(default_conf, fee): - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, fee_open=fee.return_value, open_date=dt_now() - timedelta(hours=2), amount=30.0, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, leverage=5.0, @@ -1673,15 +1692,15 @@ def test_stoploss_reinitialization_leverage(default_conf, fee): def test_stoploss_reinitialization_short(default_conf, fee): - init_db(default_conf['db_url']) + init_db(default_conf["db_url"]) trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=0.001, fee_open=fee.return_value, open_date=dt_now() - timedelta(hours=2), amount=10, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, is_short=True, @@ -1734,26 +1753,26 @@ def test_stoploss_reinitialization_short(default_conf, fee): def test_update_fee(fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, fee_open=fee.return_value, open_date=dt_now() - timedelta(hours=2), amount=30.0, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, ) fee_cost = 0.15 - fee_currency = 'BTC' + fee_currency = "BTC" fee_rate = 0.0075 assert trade.fee_open_currency is None - assert not trade.fee_updated('buy') - assert not trade.fee_updated('sell') + assert not trade.fee_updated("buy") + assert not trade.fee_updated("sell") - trade.update_fee(fee_cost, fee_currency, fee_rate, 'buy') - assert trade.fee_updated('buy') - assert not trade.fee_updated('sell') + trade.update_fee(fee_cost, fee_currency, fee_rate, "buy") + assert trade.fee_updated("buy") + assert not trade.fee_updated("sell") assert trade.fee_open_currency == fee_currency assert trade.fee_open_cost == fee_cost assert trade.fee_open == fee_rate @@ -1763,9 +1782,9 @@ def test_update_fee(fee): assert trade.fee_close_cost is None fee_rate = 0.0076 - trade.update_fee(fee_cost, fee_currency, fee_rate, 'sell') - assert trade.fee_updated('buy') - assert trade.fee_updated('sell') + trade.update_fee(fee_cost, fee_currency, fee_rate, "sell") + assert trade.fee_updated("buy") + assert trade.fee_updated("sell") assert trade.fee_close == 0.0076 assert trade.fee_close_cost == fee_cost assert trade.fee_close == fee_rate @@ -1773,39 +1792,38 @@ def test_update_fee(fee): def test_fee_updated(fee): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=30.0, fee_open=fee.return_value, open_date=dt_now() - timedelta(hours=2), amount=30.0, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=1, max_rate=1, ) assert trade.fee_open_currency is None - assert not trade.fee_updated('buy') - assert not trade.fee_updated('sell') - assert not trade.fee_updated('asdf') + assert not trade.fee_updated("buy") + assert not trade.fee_updated("sell") + assert not trade.fee_updated("asdf") - trade.update_fee(0.15, 'BTC', 0.0075, 'buy') - assert trade.fee_updated('buy') - assert not trade.fee_updated('sell') + trade.update_fee(0.15, "BTC", 0.0075, "buy") + assert trade.fee_updated("buy") + assert not trade.fee_updated("sell") assert trade.fee_open_currency is not None assert trade.fee_close_currency is None - trade.update_fee(0.15, 'ABC', 0.0075, 'sell') - assert trade.fee_updated('buy') - assert trade.fee_updated('sell') - assert not trade.fee_updated('asfd') + trade.update_fee(0.15, "ABC", 0.0075, "sell") + assert trade.fee_updated("buy") + assert trade.fee_updated("sell") + assert not trade.fee_updated("asfd") @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [True, False]) -@pytest.mark.parametrize('use_db', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) +@pytest.mark.parametrize("use_db", [True, False]) def test_total_open_trades_stakes(fee, is_short, use_db): - Trade.use_db = use_db Trade.reset_trades() res = Trade.total_open_trades_stakes() @@ -1818,14 +1836,16 @@ def test_total_open_trades_stakes(fee, is_short, use_db): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short,result', [ - (True, -0.006739127), - (False, 0.000739127), - (None, -0.005429127), -]) -@pytest.mark.parametrize('use_db', [True, False]) +@pytest.mark.parametrize( + "is_short,result", + [ + (True, -0.006739127), + (False, 0.000739127), + (None, -0.005429127), + ], +) +@pytest.mark.parametrize("use_db", [True, False]) def test_get_total_closed_profit(fee, use_db, is_short, result): - Trade.use_db = use_db Trade.reset_trades() res = Trade.get_total_closed_profit() @@ -1838,8 +1858,8 @@ def test_get_total_closed_profit(fee, use_db, is_short, result): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [True, False]) -@pytest.mark.parametrize('use_db', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) +@pytest.mark.parametrize("use_db", [True, False]) def test_get_trades_proxy(fee, use_db, is_short): Trade.use_db = use_db Trade.reset_trades() @@ -1865,7 +1885,7 @@ def test_get_trades_proxy(fee, use_db, is_short): @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_get_trades__query(fee, is_short): query = Trade.get_trades_query([]) # without orders there should be no join issued. @@ -1893,24 +1913,25 @@ def test_get_trades_backtest(): @pytest.mark.usefixtures("init_persistence") # @pytest.mark.parametrize('is_short', [True, False]) def test_get_overall_performance(fee): - create_mock_trades(fee, False) res = Trade.get_overall_performance() assert len(res) == 2 - assert 'pair' in res[0] - assert 'profit' in res[0] - assert 'count' in res[0] + assert "pair" in res[0] + assert "profit" in res[0] + assert "count" in res[0] @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short,pair,profit', [ - (True, 'ETC/BTC', -0.005), - (False, 'XRP/BTC', 0.01), - (None, 'XRP/BTC', 0.01), -]) +@pytest.mark.parametrize( + "is_short,pair,profit", + [ + (True, "ETC/BTC", -0.005), + (False, "XRP/BTC", 0.01), + (None, "XRP/BTC", 0.01), + ], +) def test_get_best_pair(fee, is_short, pair, profit): - res = Trade.get_best_pair() assert res is None @@ -1923,28 +1944,42 @@ def test_get_best_pair(fee, is_short, pair, profit): @pytest.mark.usefixtures("init_persistence") def test_get_best_pair_lev(fee): - res = Trade.get_best_pair() assert res is None create_mock_trades_with_leverage(fee) res = Trade.get_best_pair() assert len(res) == 2 - assert res[0] == 'DOGE/BTC' + assert res[0] == "DOGE/BTC" assert res[1] == 0.1713156134055116 @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_get_canceled_exit_order_count(fee, is_short): - create_mock_trades(fee, is_short=is_short) - trade = Trade.get_trades([Trade.pair == 'ETC/BTC']).first() + trade = Trade.get_trades([Trade.pair == "ETC/BTC"]).first() # No canceled order. assert trade.get_canceled_exit_order_count() == 0 + # Property returns the same result + assert trade.canceled_exit_order_count == 0 - trade.orders[-1].status = 'canceled' + trade.orders[-1].status = "canceled" assert trade.get_canceled_exit_order_count() == 1 + assert trade.canceled_exit_order_count == 1 + + +@pytest.mark.usefixtures("init_persistence") +@pytest.mark.parametrize("is_short", [True, False]) +def test_fully_canceled_entry_order_count(fee, is_short): + create_mock_trades(fee, is_short=is_short) + trade = Trade.get_trades([Trade.pair == "ETC/BTC"]).first() + # No canceled order. + assert trade.fully_canceled_entry_order_count == 0 + + trade.orders[0].status = "canceled" + trade.orders[0].filled = 0 + assert trade.fully_canceled_entry_order_count == 1 @pytest.mark.usefixtures("init_persistence") @@ -1953,32 +1988,32 @@ def test_update_order_from_ccxt(caplog, time_machine): time_machine.move_to(start, tick=False) # Most basic order return (only has orderid) - o = Order.parse_from_ccxt_object({'id': '1234'}, 'ADA/USDT', 'buy', 20.01, 1234.6) + o = Order.parse_from_ccxt_object({"id": "1234"}, "ADA/USDT", "buy", 20.01, 1234.6) assert isinstance(o, Order) - assert o.ft_pair == 'ADA/USDT' - assert o.ft_order_side == 'buy' - assert o.order_id == '1234' + assert o.ft_pair == "ADA/USDT" + assert o.ft_order_side == "buy" + assert o.order_id == "1234" assert o.ft_price == 1234.6 assert o.ft_amount == 20.01 assert o.ft_is_open ccxt_order = { - 'id': '1234', - 'side': 'buy', - 'symbol': 'ADA/USDT', - 'type': 'limit', - 'price': 1234.5, - 'amount': 20.0, - 'filled': 9, - 'remaining': 11, - 'status': 'open', - 'timestamp': 1599394315123 + "id": "1234", + "side": "buy", + "symbol": "ADA/USDT", + "type": "limit", + "price": 1234.5, + "amount": 20.0, + "filled": 9, + "remaining": 11, + "status": "open", + "timestamp": 1599394315123, } - o = Order.parse_from_ccxt_object(ccxt_order, 'ADA/USDT', 'buy', 20.01, 1234.6) + o = Order.parse_from_ccxt_object(ccxt_order, "ADA/USDT", "buy", 20.01, 1234.6) assert isinstance(o, Order) - assert o.ft_pair == 'ADA/USDT' - assert o.ft_order_side == 'buy' - assert o.order_id == '1234' - assert o.order_type == 'limit' + assert o.ft_pair == "ADA/USDT" + assert o.ft_order_side == "buy" + assert o.order_id == "1234" + assert o.order_type == "limit" assert o.price == 1234.5 assert o.ft_price == 1234.6 assert o.ft_amount == 20.01 @@ -1990,11 +2025,11 @@ def test_update_order_from_ccxt(caplog, time_machine): # Order is unfilled, "filled" not set # https://github.com/freqtrade/freqtrade/issues/5404 - ccxt_order.update({'filled': None, 'remaining': 20.0, 'status': 'canceled'}) + ccxt_order.update({"filled": None, "remaining": 20.0, "status": "canceled"}) o.update_from_ccxt_object(ccxt_order) # Order has been closed - ccxt_order.update({'filled': 20.0, 'remaining': 0.0, 'status': 'closed'}) + ccxt_order.update({"filled": 20.0, "remaining": 0.0, "status": "closed"}) o.update_from_ccxt_object(ccxt_order) assert o.filled == 20.0 @@ -2004,27 +2039,27 @@ def test_update_order_from_ccxt(caplog, time_machine): # Move time time_machine.move_to(start + timedelta(hours=1), tick=False) - ccxt_order.update({'id': 'somethingelse'}) + ccxt_order.update({"id": "somethingelse"}) with pytest.raises(DependencyException, match=r"Order-id's don't match"): o.update_from_ccxt_object(ccxt_order) message = "aaaa is not a valid response object." assert not log_has(message, caplog) - Order.update_orders([o], 'aaaa') + Order.update_orders([o], "aaaa") assert log_has(message, caplog) # Call regular update - shouldn't fail. - Order.update_orders([o], {'id': '1234'}) + Order.update_orders([o], {"id": "1234"}) assert o.order_filled_date == start # Fill order again - shouldn't update filled date - ccxt_order.update({'id': '1234'}) + ccxt_order.update({"id": "1234"}) Order.update_orders([o], ccxt_order) assert o.order_filled_date == start @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_select_order(fee, is_short): create_mock_trades(fee, is_short) @@ -2066,85 +2101,91 @@ def test_select_order(fee, is_short): order = trades[4].select_order(trades[4].exit_side, True) assert order is not None - trades[4].orders[1].ft_order_side = 'stoploss' - order = trades[4].select_order('stoploss', None) + trades[4].orders[1].ft_order_side = "stoploss" + order = trades[4].select_order("stoploss", None) assert order is not None - assert order.ft_order_side == 'stoploss' + assert order.ft_order_side == "stoploss" def test_Trade_object_idem(): - assert issubclass(Trade, LocalTrade) trade = vars(Trade) localtrade = vars(LocalTrade) excludes = ( - 'delete', - 'session', - 'commit', - 'rollback', - 'query', - 'open_date', - 'get_best_pair', - 'get_overall_performance', - 'get_total_closed_profit', - 'total_open_trades_stakes', - 'get_closed_trades_without_assigned_fees', - 'get_open_trades_without_assigned_fees', - 'get_trades', - 'get_trades_query', - 'get_exit_reason_performance', - 'get_enter_tag_performance', - 'get_mix_tag_performance', - 'get_trading_volume', - 'validate_string_len', - 'custom_data' + "delete", + "session", + "commit", + "rollback", + "query", + "open_date", + "get_best_pair", + "get_overall_performance", + "get_total_closed_profit", + "total_open_trades_stakes", + "get_closed_trades_without_assigned_fees", + "get_open_trades_without_assigned_fees", + "get_trades", + "get_trades_query", + "get_exit_reason_performance", + "get_enter_tag_performance", + "get_mix_tag_performance", + "get_trading_volume", + "validate_string_len", + "custom_data", + ) + EXCLUDES2 = ( + "trades", + "trades_open", + "bt_trades_open_pp", + "bt_open_open_trade_count", + "total_profit", + "from_json", ) - EXCLUDES2 = ('trades', 'trades_open', 'bt_trades_open_pp', 'bt_open_open_trade_count', - 'total_profit', 'from_json',) # Parent (LocalTrade) should have the same attributes for item in trade: # Exclude private attributes and open_date (as it's not assigned a default) - if (not item.startswith('_') and item not in excludes): + if not item.startswith("_") and item not in excludes: assert item in localtrade # Fails if only a column is added without corresponding parent field for item in localtrade: - if (not item.startswith('__') - and item not in EXCLUDES2 - and type(getattr(LocalTrade, item)) not in (property, FunctionType)): + if ( + not item.startswith("__") + and item not in EXCLUDES2 + and type(getattr(LocalTrade, item)) not in (property, FunctionType) + ): assert item in trade @pytest.mark.usefixtures("init_persistence") def test_trade_truncates_string_fields(): trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=20.0, amount=30.0, open_rate=2.0, open_date=datetime.now(timezone.utc) - timedelta(minutes=20), fee_open=0.001, fee_close=0.001, - exchange='binance', + exchange="binance", leverage=1.0, - trading_mode='futures', - enter_tag='a' * CUSTOM_TAG_MAX_LENGTH * 2, - exit_reason='b' * CUSTOM_TAG_MAX_LENGTH * 2, + trading_mode="futures", + enter_tag="a" * CUSTOM_TAG_MAX_LENGTH * 2, + exit_reason="b" * CUSTOM_TAG_MAX_LENGTH * 2, ) Trade.session.add(trade) Trade.commit() trade1 = Trade.session.scalars(select(Trade)).first() - assert trade1.enter_tag == 'a' * CUSTOM_TAG_MAX_LENGTH - assert trade1.exit_reason == 'b' * CUSTOM_TAG_MAX_LENGTH + assert trade1.enter_tag == "a" * CUSTOM_TAG_MAX_LENGTH + assert trade1.exit_reason == "b" * CUSTOM_TAG_MAX_LENGTH def test_recalc_trade_from_orders(fee): - o1_amount = 100 o1_rate = 1 o1_cost = o1_amount * o1_rate @@ -2152,13 +2193,13 @@ def test_recalc_trade_from_orders(fee): o1_trade_val = o1_cost + o1_fee_cost trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=o1_cost, open_date=dt_now() - timedelta(hours=2), amount=o1_amount, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=o1_rate, max_rate=o1_rate, leverage=1, @@ -2178,13 +2219,13 @@ def test_recalc_trade_from_orders(fee): assert trade.open_rate == o1_rate assert trade.open_trade_value == o1_trade_val - trade.update_fee(o1_fee_cost, 'BNB', fee.return_value, 'buy') + trade.update_fee(o1_fee_cost, "BNB", fee.return_value, "buy") assert len(trade.orders) == 0 # Check with 1 order order1 = Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2217,7 +2258,7 @@ def test_recalc_trade_from_orders(fee): o2_trade_val = o2_cost + o2_fee_cost order2 = Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2251,7 +2292,7 @@ def test_recalc_trade_from_orders(fee): o3_trade_val = o3_cost + o3_fee_cost order3 = Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2280,7 +2321,7 @@ def test_recalc_trade_from_orders(fee): # Just to make sure full sell orders are ignored, let's calculate one more time. sell1 = Order( - ft_order_side='sell', + ft_order_side="sell", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2322,20 +2363,20 @@ def test_recalc_trade_from_orders_kucoin(): o1_cost = o1_amount * o1_rate trade = Trade( - pair='FLOKI/USDT', + pair="FLOKI/USDT", stake_amount=o1_cost, open_date=dt_now() - timedelta(hours=2), amount=o1_amount, fee_open=0.001, fee_close=0.001, - exchange='binance', + exchange="binance", open_rate=o1_rate, max_rate=o1_rate, leverage=1, ) # Check with 1 order order1 = Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2352,7 +2393,7 @@ def test_recalc_trade_from_orders_kucoin(): ) trade.orders.append(order1) order2 = Order( - ft_order_side='buy', + ft_order_side="buy", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2375,7 +2416,7 @@ def test_recalc_trade_from_orders_kucoin(): assert profit.profit_ratio == pytest.approx(0.00566035) order3 = Order( - ft_order_side='sell', + ft_order_side="sell", ft_pair=trade.pair, ft_is_open=False, status="closed", @@ -2400,9 +2441,8 @@ def test_recalc_trade_from_orders_kucoin(): assert pytest.approx(trade.close_profit) == 0.00566035 -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short): - o1_amount = 100 o1_rate = 1 o1_cost = o1_amount * o1_rate @@ -2412,19 +2452,19 @@ def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short): exit_side = "buy" if is_short else "sell" trade = Trade( - pair='ADA/USDT', + pair="ADA/USDT", stake_amount=o1_cost, open_date=dt_now() - timedelta(hours=2), amount=o1_amount, fee_open=fee.return_value, fee_close=fee.return_value, - exchange='binance', + exchange="binance", open_rate=o1_rate, max_rate=o1_rate, is_short=is_short, leverage=1.0, ) - trade.update_fee(o1_fee_cost, 'BNB', fee.return_value, entry_side) + trade.update_fee(o1_fee_cost, "BNB", fee.return_value, entry_side) # Check with 1 order order1 = Order( ft_order_side=entry_side, @@ -2598,169 +2638,170 @@ def test_select_filled_orders(fee): trades = Trade.get_trades().all() # Closed buy order, no sell order - orders = trades[0].select_filled_orders('buy') + orders = trades[0].select_filled_orders("buy") assert isinstance(orders, list) assert len(orders) == 0 - orders = trades[0].select_filled_orders('sell') + orders = trades[0].select_filled_orders("sell") assert orders is not None assert len(orders) == 0 # closed buy order, and closed sell order - orders = trades[1].select_filled_orders('buy') + orders = trades[1].select_filled_orders("buy") assert isinstance(orders, list) assert len(orders) == 1 order = orders[0] assert order.amount > 0 assert order.filled > 0 - assert order.side == 'buy' - assert order.ft_order_side == 'buy' - assert order.status == 'closed' + assert order.side == "buy" + assert order.ft_order_side == "buy" + assert order.status == "closed" - orders = trades[1].select_filled_orders('sell') + orders = trades[1].select_filled_orders("sell") assert isinstance(orders, list) assert len(orders) == 1 # Has open buy order - orders = trades[3].select_filled_orders('buy') + orders = trades[3].select_filled_orders("buy") assert isinstance(orders, list) assert len(orders) == 0 - orders = trades[3].select_filled_orders('sell') + orders = trades[3].select_filled_orders("sell") assert isinstance(orders, list) assert len(orders) == 0 # Open sell order - orders = trades[4].select_filled_orders('buy') + orders = trades[4].select_filled_orders("buy") assert isinstance(orders, list) assert len(orders) == 1 - orders = trades[4].select_filled_orders('sell') + orders = trades[4].select_filled_orders("sell") assert isinstance(orders, list) assert len(orders) == 0 @pytest.mark.usefixtures("init_persistence") def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open): - - order = Order.parse_from_ccxt_object(limit_buy_order_open, 'mocked', 'buy') + order = Order.parse_from_ccxt_object(limit_buy_order_open, "mocked", "buy") order.ft_trade_id = 1 order.session.add(order) Order.session.commit() - order_resp = Order.order_by_id(limit_buy_order_open['id']) + order_resp = Order.order_by_id(limit_buy_order_open["id"]) assert order_resp raw_order = order_resp.to_ccxt_object() - del raw_order['fee'] - del raw_order['datetime'] - del raw_order['info'] - assert raw_order.get('stopPrice') is None - raw_order.pop('stopPrice', None) - del limit_buy_order_open['datetime'] + del raw_order["fee"] + del raw_order["datetime"] + del raw_order["info"] + assert raw_order.get("stopPrice") is None + raw_order.pop("stopPrice", None) + del limit_buy_order_open["datetime"] assert raw_order == limit_buy_order_open - order1 = Order.parse_from_ccxt_object(limit_sell_order_usdt_open, 'mocked', 'sell') - order1.ft_order_side = 'stoploss' + order1 = Order.parse_from_ccxt_object(limit_sell_order_usdt_open, "mocked", "sell") + order1.ft_order_side = "stoploss" order1.stop_price = order1.price * 0.9 order1.ft_trade_id = 1 order1.session.add(order1) Order.session.commit() - order_resp1 = Order.order_by_id(limit_sell_order_usdt_open['id']) + order_resp1 = Order.order_by_id(limit_sell_order_usdt_open["id"]) raw_order1 = order_resp1.to_ccxt_object() - assert raw_order1.get('stopPrice') is not None + assert raw_order1.get("stopPrice") is not None @pytest.mark.usefixtures("init_persistence") -@pytest.mark.parametrize('data', [ - { +@pytest.mark.parametrize( + "data", + [ # tuple 1 - side, amount, price # tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit - 'orders': [ - (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), - (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), - (('sell', 50, 12), (150.0, 12.5, 1875.0, -25.0, -25.0, -0.01)), - (('sell', 100, 20), (50.0, 12.5, 625.0, 725.0, 750.0, 0.29)), - (('sell', 50, 5), (50.0, 12.5, 625.0, 350.0, -375.0, 0.14)), - ], - 'end_profit': 350.0, - 'end_profit_ratio': 0.14, - 'fee': 0.0, - }, - { - 'orders': [ - (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), - (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), - (('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)), - (('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)), - (('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, -377.1875, 0.1343142)), - ], - 'end_profit': 336.625, - 'end_profit_ratio': 0.1343142, - 'fee': 0.0025, - }, - { - 'orders': [ - (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), - (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), - (('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)), - (('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)), - (('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)), - (('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 1787.25, 0.9747170)), - ], - 'end_profit': 3175.75, - 'end_profit_ratio': 0.9747170, - 'fee': 0.0025, - }, - { - # Test above without fees - 'orders': [ - (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), - (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), - (('sell', 100, 11), (100.0, 5.0, 500.0, 600.0, 600.0, 0.6)), - (('buy', 150, 15), (250.0, 11.0, 2750.0, 600.0, 600.0, 0.6)), - (('sell', 100, 19), (150.0, 11.0, 1650.0, 1400.0, 800.0, 0.43076923)), - (('sell', 150, 23), (150.0, 11.0, 1650.0, 3200.0, 1800.0, 0.98461538)), - ], - 'end_profit': 3200.0, - 'end_profit_ratio': 0.98461538, - 'fee': 0.0, - }, - { - 'orders': [ - (('buy', 100, 8), (100.0, 8.0, 800.0, 0.0, None, None)), - (('buy', 100, 9), (200.0, 8.5, 1700.0, 0.0, None, None)), - (('sell', 100, 10), (100.0, 8.5, 850.0, 150.0, 150.0, 0.08823529)), - (('buy', 150, 11), (250.0, 10, 2500.0, 150.0, 150.0, 0.08823529)), - (('sell', 100, 12), (150.0, 10.0, 1500.0, 350.0, 200.0, 0.1044776)), - (('sell', 150, 14), (150.0, 10.0, 1500.0, 950.0, 600.0, 0.283582)), - ], - 'end_profit': 950.0, - 'end_profit_ratio': 0.283582, - 'fee': 0.0, - }, -]) + { + "orders": [ + (("buy", 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), + (("buy", 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), + (("sell", 50, 12), (150.0, 12.5, 1875.0, -25.0, -25.0, -0.01)), + (("sell", 100, 20), (50.0, 12.5, 625.0, 725.0, 750.0, 0.29)), + (("sell", 50, 5), (50.0, 12.5, 625.0, 350.0, -375.0, 0.14)), + ], + "end_profit": 350.0, + "end_profit_ratio": 0.14, + "fee": 0.0, + }, + { + "orders": [ + (("buy", 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), + (("buy", 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), + (("sell", 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)), + (("sell", 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)), + (("sell", 50, 5), (50.0, 12.5, 625.0, 336.625, -377.1875, 0.1343142)), + ], + "end_profit": 336.625, + "end_profit_ratio": 0.1343142, + "fee": 0.0025, + }, + { + "orders": [ + (("buy", 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), + (("buy", 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), + (("sell", 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)), + (("buy", 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)), + (("sell", 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)), + (("sell", 150, 23), (150.0, 11.0, 1650.0, 3175.75, 1787.25, 0.9747170)), + ], + "end_profit": 3175.75, + "end_profit_ratio": 0.9747170, + "fee": 0.0025, + }, + { + # Test above without fees + "orders": [ + (("buy", 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), + (("buy", 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), + (("sell", 100, 11), (100.0, 5.0, 500.0, 600.0, 600.0, 0.6)), + (("buy", 150, 15), (250.0, 11.0, 2750.0, 600.0, 600.0, 0.6)), + (("sell", 100, 19), (150.0, 11.0, 1650.0, 1400.0, 800.0, 0.43076923)), + (("sell", 150, 23), (150.0, 11.0, 1650.0, 3200.0, 1800.0, 0.98461538)), + ], + "end_profit": 3200.0, + "end_profit_ratio": 0.98461538, + "fee": 0.0, + }, + { + "orders": [ + (("buy", 100, 8), (100.0, 8.0, 800.0, 0.0, None, None)), + (("buy", 100, 9), (200.0, 8.5, 1700.0, 0.0, None, None)), + (("sell", 100, 10), (100.0, 8.5, 850.0, 150.0, 150.0, 0.08823529)), + (("buy", 150, 11), (250.0, 10, 2500.0, 150.0, 150.0, 0.08823529)), + (("sell", 100, 12), (150.0, 10.0, 1500.0, 350.0, 200.0, 0.1044776)), + (("sell", 150, 14), (150.0, 10.0, 1500.0, 950.0, 600.0, 0.283582)), + ], + "end_profit": 950.0, + "end_profit_ratio": 0.283582, + "fee": 0.0, + }, + ], +) def test_recalc_trade_from_orders_dca(data) -> None: - - pair = 'ETH/USDT' + pair = "ETH/USDT" trade = Trade( id=2, pair=pair, stake_amount=1000, - open_rate=data['orders'][0][0][2], - amount=data['orders'][0][0][1], + open_rate=data["orders"][0][0][2], + amount=data["orders"][0][0][1], is_open=True, open_date=dt_now(), - fee_open=data['fee'], - fee_close=data['fee'], - exchange='binance', + fee_open=data["fee"], + fee_close=data["fee"], + exchange="binance", is_short=False, leverage=1.0, - trading_mode=TradingMode.SPOT + trading_mode=TradingMode.SPOT, ) Trade.session.add(trade) - for idx, (order, result) in enumerate(data['orders']): + for idx, (order, result) in enumerate(data["orders"]): amount = order[1] price = order[2] @@ -2805,8 +2846,8 @@ def test_recalc_trade_from_orders_dca(data) -> None: assert pytest.approx(trade.close_profit) == result[5] trade.close(price) - assert pytest.approx(trade.close_profit_abs) == data['end_profit'] - assert pytest.approx(trade.close_profit) == data['end_profit_ratio'] + assert pytest.approx(trade.close_profit_abs) == data["end_profit"] + assert pytest.approx(trade.close_profit) == data["end_profit_ratio"] assert not trade.is_open trade = Trade.session.scalars(select(Trade)).first() assert trade diff --git a/tests/persistence/test_trade_custom_data.py b/tests/persistence/test_trade_custom_data.py index 15241aa93..b2971883d 100644 --- a/tests/persistence/test_trade_custom_data.py +++ b/tests/persistence/test_trade_custom_data.py @@ -7,15 +7,20 @@ from freqtrade.data.history.history_utils import get_timerange from freqtrade.optimize.backtesting import Backtesting from freqtrade.persistence import Trade, disable_database_use, enable_database_use from freqtrade.persistence.custom_data import CustomDataWrapper -from tests.conftest import (EXMS, create_mock_trades_usdt, generate_test_data, - get_patched_freqtradebot, patch_exchange) +from tests.conftest import ( + EXMS, + create_mock_trades_usdt, + generate_test_data, + get_patched_freqtradebot, + patch_exchange, +) @pytest.mark.usefixtures("init_persistence") @pytest.mark.parametrize("use_db", [True, False]) def test_trade_custom_data(fee, use_db): if not use_db: - disable_database_use('5m') + disable_database_use("5m") Trade.reset_trades() CustomDataWrapper.reset_custom_data() @@ -26,120 +31,121 @@ def test_trade_custom_data(fee, use_db): trade1.id = 1 assert trade1.get_all_custom_data() == [] - trade1.set_custom_data('test_str', 'test_value') - trade1.set_custom_data('test_int', 1) - trade1.set_custom_data('test_float', 1.55) - trade1.set_custom_data('test_bool', True) - trade1.set_custom_data('test_dict', {'test': 'dict'}) + trade1.set_custom_data("test_str", "test_value") + trade1.set_custom_data("test_int", 1) + trade1.set_custom_data("test_float", 1.55) + trade1.set_custom_data("test_bool", True) + trade1.set_custom_data("test_dict", {"test": "dict"}) assert len(trade1.get_all_custom_data()) == 5 - assert trade1.get_custom_data('test_str') == 'test_value' - trade1.set_custom_data('test_str', 'test_value_updated') - assert trade1.get_custom_data('test_str') == 'test_value_updated' + assert trade1.get_custom_data("test_str") == "test_value" + trade1.set_custom_data("test_str", "test_value_updated") + assert trade1.get_custom_data("test_str") == "test_value_updated" - assert trade1.get_custom_data('test_int') == 1 - assert isinstance(trade1.get_custom_data('test_int'), int) + assert trade1.get_custom_data("test_int") == 1 + assert isinstance(trade1.get_custom_data("test_int"), int) - assert trade1.get_custom_data('test_float') == 1.55 - assert isinstance(trade1.get_custom_data('test_float'), float) + assert trade1.get_custom_data("test_float") == 1.55 + assert isinstance(trade1.get_custom_data("test_float"), float) - assert trade1.get_custom_data('test_bool') is True - assert isinstance(trade1.get_custom_data('test_bool'), bool) + assert trade1.get_custom_data("test_bool") is True + assert isinstance(trade1.get_custom_data("test_bool"), bool) - assert trade1.get_custom_data('test_dict') == {'test': 'dict'} - assert isinstance(trade1.get_custom_data('test_dict'), dict) + assert trade1.get_custom_data("test_dict") == {"test": "dict"} + assert isinstance(trade1.get_custom_data("test_dict"), dict) if not use_db: enable_database_use() def test_trade_custom_data_strategy_compat(mocker, default_conf_usdt, fee): - - mocker.patch(f'{EXMS}.get_rate', return_value=0.50) - mocker.patch('freqtrade.freqtradebot.FreqtradeBot.get_real_amount', return_value=None) - default_conf_usdt["minimal_roi"] = {"0": 100} + mocker.patch(f"{EXMS}.get_rate", return_value=0.50) + mocker.patch("freqtrade.freqtradebot.FreqtradeBot.get_real_amount", return_value=None) + default_conf_usdt["minimal_roi"] = {"0": 100} freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt) create_mock_trades_usdt(fee) - trade1 = Trade.get_trades_proxy(pair='ADA/USDT')[0] - trade1.set_custom_data('test_str', 'test_value') - trade1.set_custom_data('test_int', 1) + trade1 = Trade.get_trades_proxy(pair="ADA/USDT")[0] + trade1.set_custom_data("test_str", "test_value") + trade1.set_custom_data("test_int", 1) def custom_exit(pair, trade, **kwargs): - - if pair == 'ADA/USDT': - custom_val = trade.get_custom_data('test_str') - custom_val_i = trade.get_custom_data('test_int') + if pair == "ADA/USDT": + custom_val = trade.get_custom_data("test_str") + custom_val_i = trade.get_custom_data("test_int") return f"{custom_val}_{custom_val_i}" freqtrade.strategy.custom_exit = custom_exit - ff_spy = mocker.spy(freqtrade.strategy, 'custom_exit') + ff_spy = mocker.spy(freqtrade.strategy, "custom_exit") trades = Trade.get_open_trades() freqtrade.exit_positions(trades) Trade.commit() - trade_after = Trade.get_trades_proxy(pair='ADA/USDT')[0] - assert trade_after.get_custom_data('test_str') == 'test_value' - assert trade_after.get_custom_data('test_int') == 1 + trade_after = Trade.get_trades_proxy(pair="ADA/USDT")[0] + assert trade_after.get_custom_data("test_str") == "test_value" + assert trade_after.get_custom_data("test_int") == 1 # 2 open pairs eligible for exit assert ff_spy.call_count == 2 - assert trade_after.exit_reason == 'test_value_1' + assert trade_after.exit_reason == "test_value_1" def test_trade_custom_data_strategy_backtest_compat(mocker, default_conf_usdt, fee): - - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) mocker.patch(f"{EXMS}.get_min_pair_stake_amount", return_value=10) - mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float('inf')) + mocker.patch(f"{EXMS}.get_max_pair_stake_amount", return_value=float("inf")) mocker.patch(f"{EXMS}.get_max_leverage", return_value=10) mocker.patch(f"{EXMS}.get_maintenance_ratio_and_amt", return_value=(0.1, 0.1)) - mocker.patch('freqtrade.optimize.backtesting.Backtesting._run_funding_fees') + mocker.patch("freqtrade.optimize.backtesting.Backtesting._run_funding_fees") patch_exchange(mocker) - default_conf_usdt.update({ - "stake_amount": 100.0, - "max_open_trades": 2, - "dry_run_wallet": 1000.0, - "strategy": "StrategyTestV3", - "trading_mode": "futures", - "margin_mode": "isolated", - "stoploss": -2, - "minimal_roi": {"0": 100}, - }) - default_conf_usdt['pairlists'] = [{'method': 'StaticPairList', 'allow_inactive': True}] + default_conf_usdt.update( + { + "stake_amount": 100.0, + "max_open_trades": 2, + "dry_run_wallet": 1000.0, + "strategy": "StrategyTestV3", + "trading_mode": "futures", + "margin_mode": "isolated", + "stoploss": -2, + "minimal_roi": {"0": 100}, + } + ) + default_conf_usdt["pairlists"] = [{"method": "StaticPairList", "allow_inactive": True}] backtesting = Backtesting(default_conf_usdt) - df = generate_test_data(default_conf_usdt['timeframe'], 100, '2022-01-01 00:00:00+00:00') + df = generate_test_data(default_conf_usdt["timeframe"], 100, "2022-01-01 00:00:00+00:00") - pair_exp = 'XRP/USDT:USDT' + pair_exp = "XRP/USDT:USDT" def custom_exit(pair, trade, **kwargs): - custom_val = trade.get_custom_data('test_str') - custom_val_i = trade.get_custom_data('test_int', 0) + custom_val = trade.get_custom_data("test_str") + custom_val_i = trade.get_custom_data("test_int", 0) if pair == pair_exp: - trade.set_custom_data('test_str', 'test_value') - trade.set_custom_data('test_int', custom_val_i + 1) + trade.set_custom_data("test_str", "test_value") + trade.set_custom_data("test_int", custom_val_i + 1) if custom_val_i >= 2: return f"{custom_val}_{custom_val_i}" backtesting._set_strategy(backtesting.strategylist[0]) - processed = backtesting.strategy.advise_all_indicators({ - pair_exp: df, - 'BTC/USDT:USDT': df, - }) + processed = backtesting.strategy.advise_all_indicators( + { + pair_exp: df, + "BTC/USDT:USDT": df, + } + ) def fun(dataframe, *args, **kwargs): - dataframe.loc[dataframe.index == 50, 'enter_long'] = 1 + dataframe.loc[dataframe.index == 50, "enter_long"] = 1 return dataframe backtesting.strategy.advise_entry = fun backtesting.strategy.leverage = MagicMock(return_value=1) backtesting.strategy.custom_exit = custom_exit - ff_spy = mocker.spy(backtesting.strategy, 'custom_exit') + ff_spy = mocker.spy(backtesting.strategy, "custom_exit") min_date, max_date = get_timerange(processed) @@ -148,13 +154,13 @@ def test_trade_custom_data_strategy_backtest_compat(mocker, default_conf_usdt, f start_date=min_date, end_date=max_date, ) - results = result['results'] + results = result["results"] assert not results.empty assert len(results) == 2 - assert results['pair'][0] == pair_exp - assert results['pair'][1] == 'BTC/USDT:USDT' - assert results['exit_reason'][0] == 'test_value_2' - assert results['exit_reason'][1] == 'exit_signal' + assert results["pair"][0] == pair_exp + assert results["pair"][1] == "BTC/USDT:USDT" + assert results["exit_reason"][0] == "test_value_2" + assert results["exit_reason"][1] == "exit_signal" assert ff_spy.call_count == 7 Backtesting.cleanup() diff --git a/tests/persistence/test_trade_fromjson.py b/tests/persistence/test_trade_fromjson.py index 988f7ed5b..686667f85 100644 --- a/tests/persistence/test_trade_fromjson.py +++ b/tests/persistence/test_trade_fromjson.py @@ -181,10 +181,10 @@ def test_trade_fromjson(): Trade.commit() assert trade.id == 25 - assert trade.pair == 'ETH/USDT' + assert trade.pair == "ETH/USDT" assert trade.open_date_utc == datetime(2022, 10, 18, 9, 12, 42, tzinfo=timezone.utc) assert isinstance(trade.open_date, datetime) - assert trade.exit_reason == 'no longer good' + assert trade.exit_reason == "no longer good" assert trade.realized_profit == 2.76315361 assert trade.precision_mode == 2 assert trade.amount_precision == 1.0 @@ -199,7 +199,6 @@ def test_trade_fromjson(): @pytest.mark.usefixtures("init_persistence") def test_trade_serialize_load_back(fee): - create_mock_trades_usdt(fee, None) t = Trade.get_trades([Trade.id == 1]).first() @@ -219,12 +218,22 @@ def test_trade_serialize_load_back(fee): assert len(trade.orders) == len(t.orders) assert trade.orders[0].funding_fee == t.orders[0].funding_fee excluded = [ - 'trade_id', 'quote_currency', 'open_timestamp', 'close_timestamp', - 'realized_profit_ratio', 'close_profit_pct', - 'trade_duration_s', 'trade_duration', - 'profit_ratio', 'profit_pct', 'profit_abs', 'stop_loss_abs', - 'initial_stop_loss_abs', 'open_fill_date', 'open_fill_timestamp', - 'orders', + "trade_id", + "quote_currency", + "open_timestamp", + "close_timestamp", + "realized_profit_ratio", + "close_profit_pct", + "trade_duration_s", + "trade_duration", + "profit_ratio", + "profit_pct", + "profit_abs", + "stop_loss_abs", + "initial_stop_loss_abs", + "open_fill_date", + "open_fill_timestamp", + "orders", ] failed = [] # Ensure all attributes written can be read. @@ -233,29 +242,33 @@ def test_trade_serialize_load_back(fee): continue tattr = getattr(trade, obj, None) if isinstance(tattr, datetime): - tattr = tattr.strftime('%Y-%m-%d %H:%M:%S') + tattr = tattr.strftime("%Y-%m-%d %H:%M:%S") if tattr != value: failed.append((obj, tattr, value)) - assert tjson.get('trade_id') == trade.id - assert tjson.get('quote_currency') == trade.stake_currency - assert tjson.get('stop_loss_abs') == trade.stop_loss - assert tjson.get('initial_stop_loss_abs') == trade.initial_stop_loss + assert tjson.get("trade_id") == trade.id + assert tjson.get("quote_currency") == trade.stake_currency + assert tjson.get("stop_loss_abs") == trade.stop_loss + assert tjson.get("initial_stop_loss_abs") == trade.initial_stop_loss excluded_o = [ - 'order_filled_timestamp', 'ft_is_entry', 'pair', 'is_open', 'order_timestamp', + "order_filled_timestamp", + "ft_is_entry", + "pair", + "is_open", + "order_timestamp", ] order_obj = trade.orders[0] - for obj, value in tjson['orders'][0].items(): + for obj, value in tjson["orders"][0].items(): if obj in excluded_o: continue tattr = getattr(order_obj, obj, None) if isinstance(tattr, datetime): - tattr = tattr.strftime('%Y-%m-%d %H:%M:%S') + tattr = tattr.strftime("%Y-%m-%d %H:%M:%S") if tattr != value: failed.append((obj, tattr, value)) - assert tjson['orders'][0]['pair'] == order_obj.ft_pair + assert tjson["orders"][0]["pair"] == order_obj.ft_pair assert not failed trade2 = LocalTrade.from_json(trade_string) diff --git a/tests/plugins/test_pairlist.py b/tests/plugins/test_pairlist.py index 1a23846b7..f6c58a1e7 100644 --- a/tests/plugins/test_pairlist.py +++ b/tests/plugins/test_pairlist.py @@ -19,56 +19,68 @@ from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist, from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.resolvers import PairListResolver from freqtrade.util.datetime_helpers import dt_now -from tests.conftest import (EXMS, create_mock_trades_usdt, generate_test_data, get_patched_exchange, - get_patched_freqtradebot, log_has, log_has_re, num_log_has) +from tests.conftest import ( + EXMS, + create_mock_trades_usdt, + generate_test_data, + get_patched_exchange, + get_patched_freqtradebot, + log_has, + log_has_re, + num_log_has, +) # Exclude RemotePairList from tests. # It has a mandatory parameter, and requires special handling, which happens in test_remotepairlist. -TESTABLE_PAIRLISTS = [p for p in AVAILABLE_PAIRLISTS if p not in ['RemotePairList']] +TESTABLE_PAIRLISTS = [p for p in AVAILABLE_PAIRLISTS if p not in ["RemotePairList"]] @pytest.fixture(scope="function") def whitelist_conf(default_conf): - default_conf['stake_currency'] = 'BTC' - default_conf['exchange']['pair_whitelist'] = [ - 'ETH/BTC', - 'TKN/BTC', - 'TRST/BTC', - 'SWT/BTC', - 'BCC/BTC', - 'HOT/BTC', + default_conf["stake_currency"] = "BTC" + default_conf["exchange"]["pair_whitelist"] = [ + "ETH/BTC", + "TKN/BTC", + "TRST/BTC", + "SWT/BTC", + "BCC/BTC", + "HOT/BTC", ] - default_conf['exchange']['pair_blacklist'] = [ - 'BLK/BTC' - ] - default_conf['pairlists'] = [ + default_conf["exchange"]["pair_blacklist"] = ["BLK/BTC"] + default_conf["pairlists"] = [ { "method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", }, ] - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [], + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [], + } } - }) + ) return default_conf @pytest.fixture(scope="function") def whitelist_conf_2(default_conf): - default_conf['stake_currency'] = 'BTC' - default_conf['exchange']['pair_whitelist'] = [ - 'ETH/BTC', 'TKN/BTC', 'BLK/BTC', 'LTC/BTC', - 'BTT/BTC', 'HOT/BTC', 'FUEL/BTC', 'XRP/BTC' + default_conf["stake_currency"] = "BTC" + default_conf["exchange"]["pair_whitelist"] = [ + "ETH/BTC", + "TKN/BTC", + "BLK/BTC", + "LTC/BTC", + "BTT/BTC", + "HOT/BTC", + "FUEL/BTC", + "XRP/BTC", ] - default_conf['exchange']['pair_blacklist'] = [ - 'BLK/BTC' - ] - default_conf['pairlists'] = [ + default_conf["exchange"]["pair_blacklist"] = ["BLK/BTC"] + default_conf["pairlists"] = [ # { "method": "StaticPairList"}, { "method": "VolumePairList", @@ -82,33 +94,33 @@ def whitelist_conf_2(default_conf): @pytest.fixture(scope="function") def whitelist_conf_agefilter(default_conf): - default_conf['stake_currency'] = 'BTC' - default_conf['exchange']['pair_whitelist'] = [ - 'ETH/BTC', 'TKN/BTC', 'BLK/BTC', 'LTC/BTC', - 'BTT/BTC', 'HOT/BTC', 'FUEL/BTC', 'XRP/BTC' + default_conf["stake_currency"] = "BTC" + default_conf["exchange"]["pair_whitelist"] = [ + "ETH/BTC", + "TKN/BTC", + "BLK/BTC", + "LTC/BTC", + "BTT/BTC", + "HOT/BTC", + "FUEL/BTC", + "XRP/BTC", ] - default_conf['exchange']['pair_blacklist'] = [ - 'BLK/BTC' - ] - default_conf['pairlists'] = [ + default_conf["exchange"]["pair_blacklist"] = ["BLK/BTC"] + default_conf["pairlists"] = [ { "method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", "refresh_period": -1, }, - { - "method": "AgeFilter", - "min_days_listed": 2, - "max_days_listed": 100 - } + {"method": "AgeFilter", "min_days_listed": 2, "max_days_listed": 100}, ] return default_conf @pytest.fixture(scope="function") def static_pl_conf(whitelist_conf): - whitelist_conf['pairlists'] = [ + whitelist_conf["pairlists"] = [ { "method": "StaticPairList", }, @@ -117,65 +129,70 @@ def static_pl_conf(whitelist_conf): def test_log_cached(mocker, static_pl_conf, markets, tickers): - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) freqtrade = get_patched_freqtradebot(mocker, static_pl_conf) logmock = MagicMock() # Assign starting whitelist pl = freqtrade.pairlists._pairlist_handlers[0] - pl.log_once('Hello world', logmock) + pl.log_once("Hello world", logmock) assert logmock.call_count == 1 - pl.log_once('Hello world', logmock) + pl.log_once("Hello world", logmock) assert logmock.call_count == 1 assert pl._log_cache.currsize == 1 - assert ('Hello world',) in pl._log_cache._Cache__data + assert ("Hello world",) in pl._log_cache._Cache__data - pl.log_once('Hello world2', logmock) + pl.log_once("Hello world2", logmock) assert logmock.call_count == 2 assert pl._log_cache.currsize == 2 def test_load_pairlist_noexist(mocker, markets, default_conf): freqtrade = get_patched_freqtradebot(mocker, default_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) plm = PairListManager(freqtrade.exchange, default_conf, MagicMock()) - with pytest.raises(OperationalException, - match=r"Impossible to load Pairlist 'NonexistingPairList'. " - r"This class does not exist or contains Python code errors."): - PairListResolver.load_pairlist('NonexistingPairList', freqtrade.exchange, plm, - default_conf, {}, 1) + with pytest.raises( + OperationalException, + match=r"Impossible to load Pairlist 'NonexistingPairList'. " + r"This class does not exist or contains Python code errors.", + ): + PairListResolver.load_pairlist( + "NonexistingPairList", freqtrade.exchange, plm, default_conf, {}, 1 + ) def test_load_pairlist_verify_multi(mocker, markets_static, default_conf): freqtrade = get_patched_freqtradebot(mocker, default_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets_static)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets_static)) plm = PairListManager(freqtrade.exchange, default_conf, MagicMock()) # Call different versions one after the other, should always consider what was passed in # and have no side-effects (therefore the same check multiple times) - assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', ], print) == ['ETH/BTC', 'XRP/BTC'] - assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', 'BUUU/BTC'], print) == ['ETH/BTC', 'XRP/BTC'] - assert plm.verify_whitelist(['XRP/BTC', 'BUUU/BTC'], print) == ['XRP/BTC'] - assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', ], print) == ['ETH/BTC', 'XRP/BTC'] - assert plm.verify_whitelist(['ETH/USDT', 'XRP/USDT', ], print) == ['ETH/USDT', ] - assert plm.verify_whitelist(['ETH/BTC', 'XRP/BTC', ], print) == ['ETH/BTC', 'XRP/BTC'] + assert plm.verify_whitelist(["ETH/BTC", "XRP/BTC"], print) == ["ETH/BTC", "XRP/BTC"] + assert plm.verify_whitelist(["ETH/BTC", "XRP/BTC", "BUUU/BTC"], print) == ["ETH/BTC", "XRP/BTC"] + assert plm.verify_whitelist(["XRP/BTC", "BUUU/BTC"], print) == ["XRP/BTC"] + assert plm.verify_whitelist(["ETH/BTC", "XRP/BTC"], print) == ["ETH/BTC", "XRP/BTC"] + assert plm.verify_whitelist(["ETH/USDT", "XRP/USDT"], print) == ["ETH/USDT"] + assert plm.verify_whitelist(["ETH/BTC", "XRP/BTC"], print) == ["ETH/BTC", "XRP/BTC"] def test_refresh_market_pair_not_in_whitelist(mocker, markets, static_pl_conf): - freqtrade = get_patched_freqtradebot(mocker, static_pl_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets)) freqtrade.pairlists.refresh_pairlist() # List ordered by BaseVolume - whitelist = ['ETH/BTC', 'TKN/BTC'] + whitelist = ["ETH/BTC", "TKN/BTC"] # Ensure all except those in whitelist are removed assert set(whitelist) == set(freqtrade.pairlists.whitelist) # Ensure config dict hasn't been changed - assert (static_pl_conf['exchange']['pair_whitelist'] == - freqtrade.config['exchange']['pair_whitelist']) + assert ( + static_pl_conf["exchange"]["pair_whitelist"] + == freqtrade.config["exchange"]["pair_whitelist"] + ) def test_refresh_static_pairlist(mocker, markets, static_pl_conf): @@ -187,22 +204,28 @@ def test_refresh_static_pairlist(mocker, markets, static_pl_conf): ) freqtrade.pairlists.refresh_pairlist() # List ordered by BaseVolume - whitelist = ['ETH/BTC', 'TKN/BTC'] + whitelist = ["ETH/BTC", "TKN/BTC"] # Ensure all except those in whitelist are removed assert set(whitelist) == set(freqtrade.pairlists.whitelist) - assert static_pl_conf['exchange']['pair_blacklist'] == freqtrade.pairlists.blacklist + assert static_pl_conf["exchange"]["pair_blacklist"] == freqtrade.pairlists.blacklist -@pytest.mark.parametrize('pairs,expected', [ - (['NOEXIST/BTC', r'\+WHAT/BTC'], - ['ETH/BTC', 'TKN/BTC', 'TRST/BTC', 'NOEXIST/BTC', 'SWT/BTC', 'BCC/BTC', 'HOT/BTC']), - (['NOEXIST/BTC', r'*/BTC'], # This is an invalid regex - []), -]) +@pytest.mark.parametrize( + "pairs,expected", + [ + ( + ["NOEXIST/BTC", r"\+WHAT/BTC"], + ["ETH/BTC", "TKN/BTC", "TRST/BTC", "NOEXIST/BTC", "SWT/BTC", "BCC/BTC", "HOT/BTC"], + ), + ( + ["NOEXIST/BTC", r"*/BTC"], # This is an invalid regex + [], + ), + ], +) def test_refresh_static_pairlist_noexist(mocker, markets, static_pl_conf, pairs, expected, caplog): - - static_pl_conf['pairlists'][0]['allow_inactive'] = True - static_pl_conf['exchange']['pair_whitelist'] += pairs + static_pl_conf["pairlists"][0]["allow_inactive"] = True + static_pl_conf["exchange"]["pair_whitelist"] += pairs freqtrade = get_patched_freqtradebot(mocker, static_pl_conf) mocker.patch.multiple( EXMS, @@ -213,13 +236,13 @@ def test_refresh_static_pairlist_noexist(mocker, markets, static_pl_conf, pairs, # Ensure all except those in whitelist are removed assert set(expected) == set(freqtrade.pairlists.whitelist) - assert static_pl_conf['exchange']['pair_blacklist'] == freqtrade.pairlists.blacklist + assert static_pl_conf["exchange"]["pair_blacklist"] == freqtrade.pairlists.blacklist if not expected: - assert log_has_re(r'Pair whitelist contains an invalid Wildcard: Wildcard error.*', caplog) + assert log_has_re(r"Pair whitelist contains an invalid Wildcard: Wildcard error.*", caplog) def test_invalid_blacklist(mocker, markets, static_pl_conf, caplog): - static_pl_conf['exchange']['pair_blacklist'] = ['*/BTC'] + static_pl_conf["exchange"]["pair_blacklist"] = ["*/BTC"] freqtrade = get_patched_freqtradebot(mocker, static_pl_conf) mocker.patch.multiple( EXMS, @@ -230,7 +253,7 @@ def test_invalid_blacklist(mocker, markets, static_pl_conf, caplog): whitelist = [] # Ensure all except those in whitelist are removed assert set(whitelist) == set(freqtrade.pairlists.whitelist) - assert static_pl_conf['exchange']['pair_blacklist'] == freqtrade.pairlists.blacklist + assert static_pl_conf["exchange"]["pair_blacklist"] == freqtrade.pairlists.blacklist log_has_re(r"Pair blacklist contains an invalid Wildcard.*", caplog) @@ -243,27 +266,26 @@ def test_remove_logs_for_pairs_already_in_blacklist(mocker, markets, static_pl_c markets=PropertyMock(return_value=markets), ) freqtrade.pairlists.refresh_pairlist() - whitelist = ['ETH/BTC', 'TKN/BTC'] + whitelist = ["ETH/BTC", "TKN/BTC"] caplog.clear() caplog.set_level(logging.INFO) # Ensure all except those in whitelist are removed. assert set(whitelist) == set(freqtrade.pairlists.whitelist) - assert static_pl_conf['exchange']['pair_blacklist'] == freqtrade.pairlists.blacklist + assert static_pl_conf["exchange"]["pair_blacklist"] == freqtrade.pairlists.blacklist # Ensure that log message wasn't generated. - assert not log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...', caplog) + assert not log_has("Pair BLK/BTC in your blacklist. Removing it from whitelist...", caplog) for _ in range(3): new_whitelist = freqtrade.pairlists.verify_blacklist( - whitelist + ['BLK/BTC'], logger.warning) + whitelist + ["BLK/BTC"], logger.warning + ) # Ensure that the pair is removed from the white list, and properly logged. assert set(whitelist) == set(new_whitelist) - assert num_log_has('Pair BLK/BTC in your blacklist. Removing it from whitelist...', - caplog) == 1 + assert num_log_has("Pair BLK/BTC in your blacklist. Removing it from whitelist...", caplog) == 1 def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_conf): - mocker.patch.multiple( EXMS, get_tickers=tickers, @@ -276,19 +298,20 @@ def test_refresh_pairlist_dynamic(mocker, shitcoinmarkets, tickers, whitelist_co markets=PropertyMock(return_value=shitcoinmarkets), ) # argument: use the whitelist dynamically by exchange-volume - whitelist = ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC', 'HOT/BTC'] + whitelist = ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC", "HOT/BTC"] freqtrade.pairlists.refresh_pairlist() assert whitelist == freqtrade.pairlists.whitelist - whitelist_conf['pairlists'] = [{'method': 'VolumePairList'}] - with pytest.raises(OperationalException, - match=r'`number_assets` not specified. Please check your configuration ' - r'for "pairlist.config.number_assets"'): + whitelist_conf["pairlists"] = [{"method": "VolumePairList"}] + with pytest.raises( + OperationalException, + match=r"`number_assets` not specified. Please check your configuration " + r'for "pairlist.config.number_assets"', + ): PairListManager(freqtrade.exchange, whitelist_conf, MagicMock()) def test_refresh_pairlist_dynamic_2(mocker, shitcoinmarkets, tickers, whitelist_conf_2): - tickers_dict = tickers() mocker.patch.multiple( @@ -297,7 +320,7 @@ def test_refresh_pairlist_dynamic_2(mocker, shitcoinmarkets, tickers, whitelist_ ) # Remove caching of ticker data to emulate changing volume by the time of second call mocker.patch.multiple( - 'freqtrade.plugins.pairlistmanager.PairListManager', + "freqtrade.plugins.pairlistmanager.PairListManager", _get_cached_tickers=MagicMock(return_value=tickers_dict), ) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf_2) @@ -307,14 +330,14 @@ def test_refresh_pairlist_dynamic_2(mocker, shitcoinmarkets, tickers, whitelist_ markets=PropertyMock(return_value=shitcoinmarkets), ) - whitelist = ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC', 'HOT/BTC'] + whitelist = ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC", "HOT/BTC"] freqtrade.pairlists.refresh_pairlist() assert whitelist == freqtrade.pairlists.whitelist # Delay to allow 0 TTL cache to expire... time.sleep(1) - whitelist = ['FUEL/BTC', 'ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC'] - tickers_dict['FUEL/BTC']['quoteVolume'] = 10000.0 + whitelist = ["FUEL/BTC", "ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC"] + tickers_dict["FUEL/BTC"]["quoteVolume"] = 10000.0 freqtrade.pairlists.refresh_pairlist() assert whitelist == freqtrade.pairlists.whitelist @@ -325,235 +348,469 @@ def test_VolumePairList_refresh_empty(mocker, markets_empty, whitelist_conf): exchange_has=MagicMock(return_value=True), ) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) - mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets_empty)) + mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets_empty)) # argument: use the whitelist dynamically by exchange-volume whitelist = [] - whitelist_conf['exchange']['pair_whitelist'] = [] + whitelist_conf["exchange"]["pair_whitelist"] = [] freqtrade.pairlists.refresh_pairlist() - pairslist = whitelist_conf['exchange']['pair_whitelist'] + pairslist = whitelist_conf["exchange"]["pair_whitelist"] assert set(whitelist) == set(pairslist) -@pytest.mark.parametrize("pairlists,base_currency,whitelist_result", [ - # VolumePairList only - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC', 'HOT/BTC']), - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], - "USDT", ['ETH/USDT', 'NANO/USDT', 'ADAHALF/USDT', 'ADADOUBLE/USDT']), - # No pair for ETH, VolumePairList - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], - "ETH", []), - # No pair for ETH, StaticPairList - ([{"method": "StaticPairList"}], - "ETH", []), - # No pair for ETH, all handlers - ([{"method": "StaticPairList"}, - {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 2, "max_days_listed": None}, - {"method": "PrecisionFilter"}, - {"method": "PriceFilter", "low_price_ratio": 0.03}, - {"method": "SpreadFilter", "max_spread_ratio": 0.005}, - {"method": "ShuffleFilter"}, {"method": "PerformanceFilter"}], - "ETH", []), - # AgeFilter and VolumePairList (require 2 days only, all should pass age test) - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 2, "max_days_listed": 100}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC', 'HOT/BTC']), - # AgeFilter and VolumePairList (require 10 days, all should fail age test) - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 10, "max_days_listed": None}], - "BTC", []), - # AgeFilter and VolumePairList (all pair listed > 2, all should fail age test) - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 1, "max_days_listed": 2}], - "BTC", []), - # AgeFilter and VolumePairList LTC/BTC has 6 candles - removes all - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 4, "max_days_listed": 5}], - "BTC", []), - # AgeFilter and VolumePairList LTC/BTC has 6 candles - passes - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "AgeFilter", "min_days_listed": 4, "max_days_listed": 10}], - "BTC", ["LTC/BTC"]), - # Precisionfilter and quote volume - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "PrecisionFilter"}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC']), - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "PrecisionFilter"}], - "USDT", ['ETH/USDT', 'NANO/USDT']), - # PriceFilter and VolumePairList - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "PriceFilter", "low_price_ratio": 0.03}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC', 'XRP/BTC']), - # PriceFilter and VolumePairList - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "PriceFilter", "low_price_ratio": 0.03}], - "USDT", ['ETH/USDT', 'NANO/USDT']), - # Hot is removed by precision_filter, Fuel by low_price_ratio, Ripple by min_price. - ([{"method": "VolumePairList", "number_assets": 6, "sort_key": "quoteVolume"}, - {"method": "PrecisionFilter"}, - {"method": "PriceFilter", "low_price_ratio": 0.02, "min_price": 0.01}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC']), - # Hot is removed by precision_filter, Fuel by low_price_ratio, Ethereum by max_price. - ([{"method": "VolumePairList", "number_assets": 6, "sort_key": "quoteVolume"}, - {"method": "PrecisionFilter"}, - {"method": "PriceFilter", "low_price_ratio": 0.02, "max_price": 0.05}], - "BTC", ['TKN/BTC', 'LTC/BTC', 'XRP/BTC']), - # HOT and XRP are removed because below 1250 quoteVolume - ([{"method": "VolumePairList", "number_assets": 5, - "sort_key": "quoteVolume", "min_value": 1250}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'LTC/BTC']), - # HOT, XRP and FUEL whitelisted because they are below 1300 quoteVolume. - ([{"method": "VolumePairList", "number_assets": 5, - "sort_key": "quoteVolume", "max_value": 1300}], - "BTC", ['XRP/BTC', 'HOT/BTC', 'FUEL/BTC']), - # HOT, XRP whitelisted because they are between 100 and 1300 quoteVolume. - ([{"method": "VolumePairList", "number_assets": 5, - "sort_key": "quoteVolume", "min_value": 100, "max_value": 1300}], - "BTC", ['XRP/BTC', 'HOT/BTC']), - # StaticPairlist only - ([{"method": "StaticPairList"}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'HOT/BTC']), - # Static Pairlist before VolumePairList - sorting changes - # SpreadFilter - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "SpreadFilter", "max_spread_ratio": 0.005}], - "USDT", ['ETH/USDT']), - # ShuffleFilter - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "ShuffleFilter", "seed": 77}], - "USDT", ['ADADOUBLE/USDT', 'ETH/USDT', 'NANO/USDT', 'ADAHALF/USDT']), - # ShuffleFilter, other seed - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "ShuffleFilter", "seed": 42}], - "USDT", ['ADAHALF/USDT', 'NANO/USDT', 'ADADOUBLE/USDT', 'ETH/USDT']), - # ShuffleFilter, no seed - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, - {"method": "ShuffleFilter"}], - "USDT", 4), # whitelist_result is integer -- check only length of randomized pairlist - # AgeFilter only - ([{"method": "AgeFilter", "min_days_listed": 2}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # PrecisionFilter after StaticPairList - ([{"method": "StaticPairList"}, - {"method": "PrecisionFilter"}], - "BTC", ['ETH/BTC', 'TKN/BTC']), - # PrecisionFilter only - ([{"method": "PrecisionFilter"}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # PriceFilter after StaticPairList - ([{"method": "StaticPairList"}, - {"method": "PriceFilter", "low_price_ratio": 0.02, "min_price": 0.000001, "max_price": 0.1}], - "BTC", ['ETH/BTC', 'TKN/BTC']), - # PriceFilter only - ([{"method": "PriceFilter", "low_price_ratio": 0.02}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # ShuffleFilter after StaticPairList - ([{"method": "StaticPairList"}, - {"method": "ShuffleFilter", "seed": 42}], - "BTC", ['TKN/BTC', 'ETH/BTC', 'HOT/BTC']), - # ShuffleFilter only - ([{"method": "ShuffleFilter", "seed": 42}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # PerformanceFilter after StaticPairList - ([{"method": "StaticPairList"}, - {"method": "PerformanceFilter"}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'HOT/BTC']), - # PerformanceFilter only - ([{"method": "PerformanceFilter"}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # SpreadFilter after StaticPairList - ([{"method": "StaticPairList"}, - {"method": "SpreadFilter", "max_spread_ratio": 0.005}], - "BTC", ['ETH/BTC', 'TKN/BTC']), - # SpreadFilter only - ([{"method": "SpreadFilter", "max_spread_ratio": 0.005}], - "BTC", 'filter_at_the_beginning'), # OperationalException expected - # Static Pairlist after VolumePairList, on a non-first position (appends pairs) - ([{"method": "VolumePairList", "number_assets": 2, "sort_key": "quoteVolume"}, - {"method": "StaticPairList"}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'TRST/BTC', 'SWT/BTC', 'BCC/BTC', 'HOT/BTC']), - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "PriceFilter", "low_price_ratio": 0.02}], - "USDT", ['ETH/USDT', 'NANO/USDT']), - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "PriceFilter", "max_value": 0.000001}], - "USDT", ['NANO/USDT']), - ([{"method": "StaticPairList"}, - {"method": "RangeStabilityFilter", "lookback_days": 10, - "min_rate_of_change": 0.01, "refresh_period": 1440}], - "BTC", ['ETH/BTC', 'TKN/BTC', 'HOT/BTC']), - ([{"method": "StaticPairList"}, - {"method": "RangeStabilityFilter", "lookback_days": 10, - "max_rate_of_change": 0.01, "refresh_period": 1440}], - "BTC", []), # All removed because of max_rate_of_change being 0.017 - ([{"method": "StaticPairList"}, - {"method": "RangeStabilityFilter", "lookback_days": 10, - "min_rate_of_change": 0.018, "max_rate_of_change": 0.02, "refresh_period": 1440}], - "BTC", []), # All removed - limits are above the highest change_rate - ([{"method": "StaticPairList"}, - {"method": "VolatilityFilter", "lookback_days": 3, - "min_volatility": 0.002, "max_volatility": 0.004, "refresh_period": 1440}], - "BTC", ['ETH/BTC', 'TKN/BTC']), - # VolumePairList with no offset = unchanged pairlist - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "OffsetFilter", "offset": 0, "number_assets": 0}], - "USDT", ['ETH/USDT', 'NANO/USDT', 'ADAHALF/USDT', 'ADADOUBLE/USDT']), - # VolumePairList with offset = 2 - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "OffsetFilter", "offset": 2}], - "USDT", ['ADAHALF/USDT', 'ADADOUBLE/USDT']), - # VolumePairList with offset and limit - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "OffsetFilter", "offset": 1, "number_assets": 2}], - "USDT", ['NANO/USDT', 'ADAHALF/USDT']), - # VolumePairList with higher offset, than total pairlist - ([{"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, - {"method": "OffsetFilter", "offset": 100}], - "USDT", []) -]) -def test_VolumePairList_whitelist_gen(mocker, whitelist_conf, shitcoinmarkets, tickers, - ohlcv_history, pairlists, base_currency, - whitelist_result, caplog) -> None: - whitelist_conf['pairlists'] = pairlists - whitelist_conf['stake_currency'] = base_currency +@pytest.mark.parametrize( + "pairlists,base_currency,whitelist_result", + [ + # VolumePairList only + ( + [{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC", "HOT/BTC"], + ), + ( + [{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], + "USDT", + ["ETH/USDT", "NANO/USDT", "ADAHALF/USDT", "ADADOUBLE/USDT"], + ), + # No pair for ETH, VolumePairList + ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}], "ETH", []), + # No pair for ETH, StaticPairList + ([{"method": "StaticPairList"}], "ETH", []), + # No pair for ETH, all handlers + ( + [ + {"method": "StaticPairList"}, + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 2, "max_days_listed": None}, + {"method": "PrecisionFilter"}, + {"method": "PriceFilter", "low_price_ratio": 0.03}, + {"method": "SpreadFilter", "max_spread_ratio": 0.005}, + {"method": "ShuffleFilter"}, + {"method": "PerformanceFilter"}, + ], + "ETH", + [], + ), + # AgeFilter and VolumePairList (require 2 days only, all should pass age test) + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 2, "max_days_listed": 100}, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC", "HOT/BTC"], + ), + # AgeFilter and VolumePairList (require 10 days, all should fail age test) + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 10, "max_days_listed": None}, + ], + "BTC", + [], + ), + # AgeFilter and VolumePairList (all pair listed > 2, all should fail age test) + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 1, "max_days_listed": 2}, + ], + "BTC", + [], + ), + # AgeFilter and VolumePairList LTC/BTC has 6 candles - removes all + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 4, "max_days_listed": 5}, + ], + "BTC", + [], + ), + # AgeFilter and VolumePairList LTC/BTC has 6 candles - passes + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "AgeFilter", "min_days_listed": 4, "max_days_listed": 10}, + ], + "BTC", + ["LTC/BTC"], + ), + # Precisionfilter and quote volume + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "PrecisionFilter"}, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC"], + ), + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "PrecisionFilter"}, + ], + "USDT", + ["ETH/USDT", "NANO/USDT"], + ), + # PriceFilter and VolumePairList + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "PriceFilter", "low_price_ratio": 0.03}, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC", "XRP/BTC"], + ), + # PriceFilter and VolumePairList + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "PriceFilter", "low_price_ratio": 0.03}, + ], + "USDT", + ["ETH/USDT", "NANO/USDT"], + ), + # Hot is removed by precision_filter, Fuel by low_price_ratio, Ripple by min_price. + ( + [ + {"method": "VolumePairList", "number_assets": 6, "sort_key": "quoteVolume"}, + {"method": "PrecisionFilter"}, + {"method": "PriceFilter", "low_price_ratio": 0.02, "min_price": 0.01}, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + ), + # Hot is removed by precision_filter, Fuel by low_price_ratio, Ethereum by max_price. + ( + [ + {"method": "VolumePairList", "number_assets": 6, "sort_key": "quoteVolume"}, + {"method": "PrecisionFilter"}, + {"method": "PriceFilter", "low_price_ratio": 0.02, "max_price": 0.05}, + ], + "BTC", + ["TKN/BTC", "LTC/BTC", "XRP/BTC"], + ), + # HOT and XRP are removed because below 1250 quoteVolume + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "min_value": 1250, + } + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + ), + # HOT, XRP and FUEL whitelisted because they are below 1300 quoteVolume. + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "max_value": 1300, + } + ], + "BTC", + ["XRP/BTC", "HOT/BTC", "FUEL/BTC"], + ), + # HOT, XRP whitelisted because they are between 100 and 1300 quoteVolume. + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "min_value": 100, + "max_value": 1300, + } + ], + "BTC", + ["XRP/BTC", "HOT/BTC"], + ), + # StaticPairlist only + ([{"method": "StaticPairList"}], "BTC", ["ETH/BTC", "TKN/BTC", "HOT/BTC"]), + # Static Pairlist before VolumePairList - sorting changes + # SpreadFilter + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "SpreadFilter", "max_spread_ratio": 0.005}, + ], + "USDT", + ["ETH/USDT"], + ), + # ShuffleFilter + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "ShuffleFilter", "seed": 77}, + ], + "USDT", + ["ADADOUBLE/USDT", "ETH/USDT", "NANO/USDT", "ADAHALF/USDT"], + ), + # ShuffleFilter, other seed + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "ShuffleFilter", "seed": 42}, + ], + "USDT", + ["ADAHALF/USDT", "NANO/USDT", "ADADOUBLE/USDT", "ETH/USDT"], + ), + # ShuffleFilter, no seed + ( + [ + {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume"}, + {"method": "ShuffleFilter"}, + ], + "USDT", + 4, + ), # whitelist_result is integer -- check only length of randomized pairlist + # AgeFilter only + ( + [{"method": "AgeFilter", "min_days_listed": 2}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # PrecisionFilter after StaticPairList + ( + [{"method": "StaticPairList"}, {"method": "PrecisionFilter"}], + "BTC", + ["ETH/BTC", "TKN/BTC"], + ), + # PrecisionFilter only + ( + [{"method": "PrecisionFilter"}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # PriceFilter after StaticPairList + ( + [ + {"method": "StaticPairList"}, + { + "method": "PriceFilter", + "low_price_ratio": 0.02, + "min_price": 0.000001, + "max_price": 0.1, + }, + ], + "BTC", + ["ETH/BTC", "TKN/BTC"], + ), + # PriceFilter only + ( + [{"method": "PriceFilter", "low_price_ratio": 0.02}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # ShuffleFilter after StaticPairList + ( + [{"method": "StaticPairList"}, {"method": "ShuffleFilter", "seed": 42}], + "BTC", + ["TKN/BTC", "ETH/BTC", "HOT/BTC"], + ), + # ShuffleFilter only + ( + [{"method": "ShuffleFilter", "seed": 42}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # PerformanceFilter after StaticPairList + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + "BTC", + ["ETH/BTC", "TKN/BTC", "HOT/BTC"], + ), + # PerformanceFilter only + ( + [{"method": "PerformanceFilter"}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # SpreadFilter after StaticPairList + ( + [{"method": "StaticPairList"}, {"method": "SpreadFilter", "max_spread_ratio": 0.005}], + "BTC", + ["ETH/BTC", "TKN/BTC"], + ), + # SpreadFilter only + ( + [{"method": "SpreadFilter", "max_spread_ratio": 0.005}], + "BTC", + "filter_at_the_beginning", + ), # OperationalException expected + # Static Pairlist after VolumePairList, on a non-first position (appends pairs) + ( + [ + {"method": "VolumePairList", "number_assets": 2, "sort_key": "quoteVolume"}, + {"method": "StaticPairList"}, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "TRST/BTC", "SWT/BTC", "BCC/BTC", "HOT/BTC"], + ), + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "PriceFilter", "low_price_ratio": 0.02}, + ], + "USDT", + ["ETH/USDT", "NANO/USDT"], + ), + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "PriceFilter", "max_value": 0.000001}, + ], + "USDT", + ["NANO/USDT"], + ), + ( + [ + {"method": "StaticPairList"}, + { + "method": "RangeStabilityFilter", + "lookback_days": 10, + "min_rate_of_change": 0.01, + "refresh_period": 1440, + }, + ], + "BTC", + ["ETH/BTC", "TKN/BTC", "HOT/BTC"], + ), + ( + [ + {"method": "StaticPairList"}, + { + "method": "RangeStabilityFilter", + "lookback_days": 10, + "max_rate_of_change": 0.01, + "refresh_period": 1440, + }, + ], + "BTC", + [], + ), # All removed because of max_rate_of_change being 0.017 + ( + [ + {"method": "StaticPairList"}, + { + "method": "RangeStabilityFilter", + "lookback_days": 10, + "min_rate_of_change": 0.018, + "max_rate_of_change": 0.02, + "refresh_period": 1440, + }, + ], + "BTC", + [], + ), # All removed - limits are above the highest change_rate + ( + [ + {"method": "StaticPairList"}, + { + "method": "VolatilityFilter", + "lookback_days": 3, + "min_volatility": 0.002, + "max_volatility": 0.004, + "refresh_period": 1440, + }, + ], + "BTC", + ["ETH/BTC", "TKN/BTC"], + ), + # VolumePairList with no offset = unchanged pairlist + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "OffsetFilter", "offset": 0, "number_assets": 0}, + ], + "USDT", + ["ETH/USDT", "NANO/USDT", "ADAHALF/USDT", "ADADOUBLE/USDT"], + ), + # VolumePairList with offset = 2 + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "OffsetFilter", "offset": 2}, + ], + "USDT", + ["ADAHALF/USDT", "ADADOUBLE/USDT"], + ), + # VolumePairList with offset and limit + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "OffsetFilter", "offset": 1, "number_assets": 2}, + ], + "USDT", + ["NANO/USDT", "ADAHALF/USDT"], + ), + # VolumePairList with higher offset, than total pairlist + ( + [ + {"method": "VolumePairList", "number_assets": 20, "sort_key": "quoteVolume"}, + {"method": "OffsetFilter", "offset": 100}, + ], + "USDT", + [], + ), + ], +) +def test_VolumePairList_whitelist_gen( + mocker, + whitelist_conf, + shitcoinmarkets, + tickers, + ohlcv_history, + pairlists, + base_currency, + whitelist_result, + caplog, +) -> None: + whitelist_conf["runmode"] = "backtest" + whitelist_conf["pairlists"] = pairlists + whitelist_conf["stake_currency"] = base_currency ohlcv_history_high_vola = ohlcv_history.copy() - ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, 'close'] = 0.00090 + ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, "close"] = 0.00090 ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): pd.concat([ohlcv_history, ohlcv_history]), - ('XRP/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('HOT/BTC', '1d', CandleType.SPOT): ohlcv_history_high_vola, + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): pd.concat([ohlcv_history, ohlcv_history]), + ("XRP/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("HOT/BTC", "1d", CandleType.SPOT): ohlcv_history_high_vola, } - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) - mocker.patch.multiple(EXMS, - get_tickers=tickers, - markets=PropertyMock(return_value=shitcoinmarkets) - ) + mocker.patch.multiple( + EXMS, get_tickers=tickers, markets=PropertyMock(return_value=shitcoinmarkets) + ) mocker.patch.multiple( EXMS, refresh_latest_ohlcv=MagicMock(return_value=ohlcv_data), ) # Provide for PerformanceFilter's dependency - mocker.patch.multiple('freqtrade.persistence.Trade', - get_overall_performance=MagicMock(return_value=[]) - ) + mocker.patch.multiple( + "freqtrade.persistence.Trade", get_overall_performance=MagicMock(return_value=[]) + ) # Set whitelist_result to None if pairlist is invalid and should produce exception - if whitelist_result == 'filter_at_the_beginning': - with pytest.raises(OperationalException, - match=r"This Pairlist Handler should not be used at the first position " - r"in the list of Pairlist Handlers."): + if whitelist_result == "filter_at_the_beginning": + with pytest.raises( + OperationalException, + match=r"This Pairlist Handler should not be used at the first position " + r"in the list of Pairlist Handlers.", + ): freqtrade.pairlists.refresh_pairlist() else: freqtrade.pairlists.refresh_pairlist() @@ -568,145 +825,276 @@ def test_VolumePairList_whitelist_gen(mocker, whitelist_conf, shitcoinmarkets, t assert len(whitelist) == whitelist_result for pairlist in pairlists: - if pairlist['method'] == 'AgeFilter' and pairlist['min_days_listed'] and \ - len(ohlcv_history) < pairlist['min_days_listed']: - assert log_has_re(r'^Removed .* from whitelist, because age .* is less than ' - r'.* day.*', caplog) - if pairlist['method'] == 'AgeFilter' and pairlist['max_days_listed'] and \ - len(ohlcv_history) > pairlist['max_days_listed']: - assert log_has_re(r'^Removed .* from whitelist, because age .* is less than ' - r'.* day.* or more than .* day', caplog) - if pairlist['method'] == 'PrecisionFilter' and whitelist_result: - assert log_has_re(r'^Removed .* from whitelist, because stop price .* ' - r'would be <= stop limit.*', caplog) - if pairlist['method'] == 'PriceFilter' and whitelist_result: - assert (log_has_re(r'^Removed .* from whitelist, because 1 unit is .*%$', caplog) or - log_has_re(r'^Removed .* from whitelist, ' - r'because last price < .*%$', caplog) or - log_has_re(r'^Removed .* from whitelist, ' - r'because last price > .*%$', caplog) or - log_has_re(r'^Removed .* from whitelist, ' - r'because min value change of .*', caplog) or - log_has_re(r"^Removed .* from whitelist, because ticker\['last'\] " - r"is empty.*", caplog)) - if pairlist['method'] == 'VolumePairList': - logmsg = ("DEPRECATED: using any key other than quoteVolume for " - "VolumePairList is deprecated.") - if pairlist['sort_key'] != 'quoteVolume': + if ( + pairlist["method"] == "AgeFilter" + and pairlist["min_days_listed"] + and len(ohlcv_history) < pairlist["min_days_listed"] + ): + assert log_has_re( + r"^Removed .* from whitelist, because age .* is less than " r".* day.*", caplog + ) + if ( + pairlist["method"] == "AgeFilter" + and pairlist["max_days_listed"] + and len(ohlcv_history) > pairlist["max_days_listed"] + ): + assert log_has_re( + r"^Removed .* from whitelist, because age .* is less than " + r".* day.* or more than .* day", + caplog, + ) + if pairlist["method"] == "PrecisionFilter" and whitelist_result: + assert log_has_re( + r"^Removed .* from whitelist, because stop price .* " + r"would be <= stop limit.*", + caplog, + ) + if pairlist["method"] == "PriceFilter" and whitelist_result: + assert ( + log_has_re(r"^Removed .* from whitelist, because 1 unit is .*%$", caplog) + or log_has_re( + r"^Removed .* from whitelist, " r"because last price < .*%$", caplog + ) + or log_has_re( + r"^Removed .* from whitelist, " r"because last price > .*%$", caplog + ) + or log_has_re( + r"^Removed .* from whitelist, " r"because min value change of .*", caplog + ) + or log_has_re( + r"^Removed .* from whitelist, because ticker\['last'\] " r"is empty.*", + caplog, + ) + ) + if pairlist["method"] == "VolumePairList": + logmsg = ( + "DEPRECATED: using any key other than quoteVolume for " + "VolumePairList is deprecated." + ) + if pairlist["sort_key"] != "quoteVolume": assert log_has(logmsg, caplog) else: assert not log_has(logmsg, caplog) - if pairlist["method"] == 'VolatilityFilter': - assert log_has_re(r'^Removed .* from whitelist, because volatility.*$', caplog) + if pairlist["method"] == "VolatilityFilter": + assert log_has_re(r"^Removed .* from whitelist, because volatility.*$", caplog) -@pytest.mark.parametrize("pairlists,base_currency,exchange,volumefilter_result", [ - # default refresh of 1800 to small for daily candle lookback - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_days": 1}], - "BTC", "binance", "default_refresh_too_short"), # OperationalException expected - # ambiguous configuration with lookback days and period - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_days": 1, "lookback_period": 1}], - "BTC", "binance", "lookback_days_and_period"), # OperationalException expected - # negative lookback period - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1d", "lookback_period": -1}], - "BTC", "binance", "lookback_period_negative"), # OperationalException expected - # lookback range exceedes exchange limit - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1m", "lookback_period": 2000, "refresh_period": 3600}], - "BTC", "binance", "lookback_exceeds_exchange_request_size"), # OperationalException expected - # expecting pairs as given - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}], - "BTC", "binance", ['LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC', 'HOT/BTC']), - # expecting pairs as input, because 1h candles are not available - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1h", "lookback_period": 2, "refresh_period": 3600}], - "BTC", "binance", ['ETH/BTC', 'LTC/BTC', 'NEO/BTC', 'TKN/BTC', 'XRP/BTC']), - # TKN/BTC is removed because it doesn't have enough candles - ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1d", "lookback_period": 6, "refresh_period": 86400}], - "BTC", "binance", ['LTC/BTC', 'XRP/BTC', 'ETH/BTC', 'HOT/BTC', 'NEO/BTC']), - # VolumePairlist in range mode as filter. - # TKN/BTC is removed because it doesn't have enough candles - ([{"method": "VolumePairList", "number_assets": 5}, - {"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - "lookback_timeframe": "1d", "lookback_period": 2, "refresh_period": 86400}], - "BTC", "binance", ['LTC/BTC', 'XRP/BTC', 'ETH/BTC', 'TKN/BTC', 'HOT/BTC']), - # ftx data is already in Quote currency, therefore won't require conversion - # ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", - # "lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}], - # "BTC", "ftx", ['HOT/BTC', 'LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC']), -]) +@pytest.mark.parametrize( + "pairlists,base_currency,exchange,volumefilter_result", + [ + # default refresh of 1800 to small for daily candle lookback + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_days": 1, + } + ], + "BTC", + "binance", + "default_refresh_too_short", + ), # OperationalException expected + # ambiguous configuration with lookback days and period + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_days": 1, + "lookback_period": 1, + } + ], + "BTC", + "binance", + "lookback_days_and_period", + ), # OperationalException expected + # negative lookback period + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1d", + "lookback_period": -1, + } + ], + "BTC", + "binance", + "lookback_period_negative", + ), # OperationalException expected + # lookback range exceedes exchange limit + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1m", + "lookback_period": 2000, + "refresh_period": 3600, + } + ], + "BTC", + "binance", + "lookback_exceeds_exchange_request_size", + ), # OperationalException expected + # expecting pairs as given + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1d", + "lookback_period": 1, + "refresh_period": 86400, + } + ], + "BTC", + "binance", + ["LTC/BTC", "ETH/BTC", "TKN/BTC", "XRP/BTC", "HOT/BTC"], + ), + # expecting pairs as input, because 1h candles are not available + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1h", + "lookback_period": 2, + "refresh_period": 3600, + } + ], + "BTC", + "binance", + ["ETH/BTC", "LTC/BTC", "NEO/BTC", "TKN/BTC", "XRP/BTC"], + ), + # TKN/BTC is removed because it doesn't have enough candles + ( + [ + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1d", + "lookback_period": 6, + "refresh_period": 86400, + } + ], + "BTC", + "binance", + ["LTC/BTC", "XRP/BTC", "ETH/BTC", "HOT/BTC", "NEO/BTC"], + ), + # VolumePairlist in range mode as filter. + # TKN/BTC is removed because it doesn't have enough candles + ( + [ + {"method": "VolumePairList", "number_assets": 5}, + { + "method": "VolumePairList", + "number_assets": 5, + "sort_key": "quoteVolume", + "lookback_timeframe": "1d", + "lookback_period": 2, + "refresh_period": 86400, + }, + ], + "BTC", + "binance", + ["LTC/BTC", "XRP/BTC", "ETH/BTC", "TKN/BTC", "HOT/BTC"], + ), + # ftx data is already in Quote currency, therefore won't require conversion + # ([{"method": "VolumePairList", "number_assets": 5, "sort_key": "quoteVolume", + # "lookback_timeframe": "1d", "lookback_period": 1, "refresh_period": 86400}], + # "BTC", "ftx", ['HOT/BTC', 'LTC/BTC', 'ETH/BTC', 'TKN/BTC', 'XRP/BTC']), + ], +) def test_VolumePairList_range( - mocker, whitelist_conf, shitcoinmarkets, tickers, ohlcv_history, - pairlists, base_currency, exchange, volumefilter_result, time_machine) -> None: - whitelist_conf['pairlists'] = pairlists - whitelist_conf['stake_currency'] = base_currency - whitelist_conf['exchange']['name'] = exchange + mocker, + whitelist_conf, + shitcoinmarkets, + tickers, + ohlcv_history, + pairlists, + base_currency, + exchange, + volumefilter_result, + time_machine, +) -> None: + whitelist_conf["pairlists"] = pairlists + whitelist_conf["stake_currency"] = base_currency + whitelist_conf["exchange"]["name"] = exchange # Ensure we have 6 candles ohlcv_history_long = pd.concat([ohlcv_history, ohlcv_history]) ohlcv_history_high_vola = ohlcv_history_long.copy() - ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, 'close'] = 0.00090 + ohlcv_history_high_vola.loc[ohlcv_history_high_vola.index == 1, "close"] = 0.00090 # create candles for medium overall volume with last candle high volume ohlcv_history_medium_volume = ohlcv_history_long.copy() - ohlcv_history_medium_volume.loc[ohlcv_history_medium_volume.index == 2, 'volume'] = 5 + ohlcv_history_medium_volume.loc[ohlcv_history_medium_volume.index == 2, "volume"] = 5 # create candles for high volume with all candles high volume, but very low price. ohlcv_history_high_volume = ohlcv_history_long.copy() - ohlcv_history_high_volume['volume'] = 10 - ohlcv_history_high_volume['low'] = ohlcv_history_high_volume.loc[:, 'low'] * 0.01 - ohlcv_history_high_volume['high'] = ohlcv_history_high_volume.loc[:, 'high'] * 0.01 - ohlcv_history_high_volume['close'] = ohlcv_history_high_volume.loc[:, 'close'] * 0.01 + ohlcv_history_high_volume["volume"] = 10 + ohlcv_history_high_volume["low"] = ohlcv_history_high_volume.loc[:, "low"] * 0.01 + ohlcv_history_high_volume["high"] = ohlcv_history_high_volume.loc[:, "high"] * 0.01 + ohlcv_history_high_volume["close"] = ohlcv_history_high_volume.loc[:, "close"] * 0.01 ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history_long, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): ohlcv_history_medium_volume, - ('XRP/BTC', '1d', CandleType.SPOT): ohlcv_history_high_vola, - ('HOT/BTC', '1d', CandleType.SPOT): ohlcv_history_high_volume, + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history_long, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): ohlcv_history_medium_volume, + ("XRP/BTC", "1d", CandleType.SPOT): ohlcv_history_high_vola, + ("HOT/BTC", "1d", CandleType.SPOT): ohlcv_history_high_volume, } - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) - if volumefilter_result == 'default_refresh_too_short': - with pytest.raises(OperationalException, - match=r'Refresh period of [0-9]+ seconds is smaller than one timeframe ' - r'of [0-9]+.*\. Please adjust refresh_period to at least [0-9]+ ' - r'and restart the bot\.'): + if volumefilter_result == "default_refresh_too_short": + with pytest.raises( + OperationalException, + match=r"Refresh period of [0-9]+ seconds is smaller than one timeframe " + r"of [0-9]+.*\. Please adjust refresh_period to at least [0-9]+ " + r"and restart the bot\.", + ): freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) return - elif volumefilter_result == 'lookback_days_and_period': - with pytest.raises(OperationalException, - match=r'Ambigous configuration: lookback_days and lookback_period both ' - r'set in pairlist config\..*'): + elif volumefilter_result == "lookback_days_and_period": + with pytest.raises( + OperationalException, + match=r"Ambiguous configuration: lookback_days and lookback_period both " + r"set in pairlist config\..*", + ): freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) - elif volumefilter_result == 'lookback_period_negative': - with pytest.raises(OperationalException, - match=r'VolumeFilter requires lookback_period to be >= 0'): + elif volumefilter_result == "lookback_period_negative": + with pytest.raises( + OperationalException, match=r"VolumeFilter requires lookback_period to be >= 0" + ): freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) - elif volumefilter_result == 'lookback_exceeds_exchange_request_size': - with pytest.raises(OperationalException, - match=r'VolumeFilter requires lookback_period to not exceed ' - r'exchange max request size \([0-9]+\)'): + elif volumefilter_result == "lookback_exceeds_exchange_request_size": + with pytest.raises( + OperationalException, + match=r"VolumeFilter requires lookback_period to not exceed " + r"exchange max request size \([0-9]+\)", + ): freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) else: freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) mocker.patch.multiple( - EXMS, - get_tickers=tickers, - markets=PropertyMock(return_value=shitcoinmarkets) + EXMS, get_tickers=tickers, markets=PropertyMock(return_value=shitcoinmarkets) ) start_dt = dt_now() time_machine.move_to(start_dt) # remove ohlcv when looback_timeframe != 1d # to enforce fallback to ticker data - if 'lookback_timeframe' in pairlists[0]: - if pairlists[0]['lookback_timeframe'] != '1d': + if "lookback_timeframe" in pairlists[0]: + if pairlists[0]["lookback_timeframe"] != "1d": ohlcv_data = {} ohclv_mock = mocker.patch(f"{EXMS}.refresh_latest_ohlcv", return_value=ohlcv_data) @@ -734,21 +1122,22 @@ def test_VolumePairList_range( def test_PrecisionFilter_error(mocker, whitelist_conf) -> None: - whitelist_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PrecisionFilter"}] - del whitelist_conf['stoploss'] + whitelist_conf["pairlists"] = [{"method": "StaticPairList"}, {"method": "PrecisionFilter"}] + del whitelist_conf["stoploss"] - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) - with pytest.raises(OperationalException, - match=r"PrecisionFilter can only work with stoploss defined\..*"): + with pytest.raises( + OperationalException, match=r"PrecisionFilter can only work with stoploss defined\..*" + ): PairListManager(MagicMock, whitelist_conf, MagicMock()) def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None: - whitelist_conf['pairlists'] = [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}] - if hasattr(Trade, 'session'): + whitelist_conf["pairlists"] = [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}] + if hasattr(Trade, "session"): del Trade.session - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) exchange = get_patched_exchange(mocker, whitelist_conf) pm = PairListManager(exchange, whitelist_conf, MagicMock()) pm.refresh_pairlist() @@ -758,85 +1147,108 @@ def test_PerformanceFilter_error(mocker, whitelist_conf, caplog) -> None: def test_VolatilityFilter_error(mocker, whitelist_conf) -> None: volatility_filter = {"method": "VolatilityFilter", "lookback_days": -1} - whitelist_conf['pairlists'] = [{"method": "StaticPairList"}, volatility_filter] + whitelist_conf["pairlists"] = [{"method": "StaticPairList"}, volatility_filter] - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) exchange_mock = MagicMock() exchange_mock.ohlcv_candle_limit = MagicMock(return_value=1000) - with pytest.raises(OperationalException, - match=r"VolatilityFilter requires lookback_days to be >= 1*"): + with pytest.raises( + OperationalException, match=r"VolatilityFilter requires lookback_days to be >= 1*" + ): PairListManager(exchange_mock, whitelist_conf, MagicMock()) volatility_filter = {"method": "VolatilityFilter", "lookback_days": 2000} - whitelist_conf['pairlists'] = [{"method": "StaticPairList"}, volatility_filter] - with pytest.raises(OperationalException, - match=r"VolatilityFilter requires lookback_days to not exceed exchange max"): + whitelist_conf["pairlists"] = [{"method": "StaticPairList"}, volatility_filter] + with pytest.raises( + OperationalException, + match=r"VolatilityFilter requires lookback_days to not exceed exchange max", + ): PairListManager(exchange_mock, whitelist_conf, MagicMock()) volatility_filter = {"method": "VolatilityFilter", "sort_direction": "Random"} - whitelist_conf['pairlists'] = [{"method": "StaticPairList"}, volatility_filter] - with pytest.raises(OperationalException, - match=r"VolatilityFilter requires sort_direction to be either " - r"None .*'asc'.*'desc'"): + whitelist_conf["pairlists"] = [{"method": "StaticPairList"}, volatility_filter] + with pytest.raises( + OperationalException, + match=r"VolatilityFilter requires sort_direction to be either " r"None .*'asc'.*'desc'", + ): PairListManager(exchange_mock, whitelist_conf, MagicMock()) -@pytest.mark.parametrize('pairlist,expected_pairlist', [ - ({"method": "VolatilityFilter", "sort_direction": "asc"}, - ['XRP/BTC', 'ETH/BTC', 'LTC/BTC', 'TKN/BTC']), - ({"method": "VolatilityFilter", "sort_direction": "desc"}, - ['TKN/BTC', 'LTC/BTC', 'ETH/BTC', 'XRP/BTC']), - ({"method": "VolatilityFilter", "sort_direction": "desc", 'min_volatility': 0.4}, - ['TKN/BTC', 'LTC/BTC', 'ETH/BTC']), - ({"method": "VolatilityFilter", "sort_direction": "asc", 'min_volatility': 0.4}, - ['ETH/BTC', 'LTC/BTC', 'TKN/BTC']), - ({"method": "VolatilityFilter", "sort_direction": "desc", 'max_volatility': 0.5}, - ['LTC/BTC', 'ETH/BTC', 'XRP/BTC']), - ({"method": "VolatilityFilter", "sort_direction": "asc", 'max_volatility': 0.5}, - ['XRP/BTC', 'ETH/BTC', 'LTC/BTC']), - ({"method": "RangeStabilityFilter", "sort_direction": "asc"}, - ['ETH/BTC', 'XRP/BTC', 'LTC/BTC', 'TKN/BTC']), - ({"method": "RangeStabilityFilter", "sort_direction": "desc"}, - ['TKN/BTC', 'LTC/BTC', 'XRP/BTC', 'ETH/BTC']), - ({"method": "RangeStabilityFilter", "sort_direction": "asc", 'min_rate_of_change': 0.4}, - ['XRP/BTC', 'LTC/BTC', 'TKN/BTC']), - ({"method": "RangeStabilityFilter", "sort_direction": "desc", 'min_rate_of_change': 0.4}, - ['TKN/BTC', 'LTC/BTC', 'XRP/BTC']), -]) +@pytest.mark.parametrize( + "pairlist,expected_pairlist", + [ + ( + {"method": "VolatilityFilter", "sort_direction": "asc"}, + ["XRP/BTC", "ETH/BTC", "LTC/BTC", "TKN/BTC"], + ), + ( + {"method": "VolatilityFilter", "sort_direction": "desc"}, + ["TKN/BTC", "LTC/BTC", "ETH/BTC", "XRP/BTC"], + ), + ( + {"method": "VolatilityFilter", "sort_direction": "desc", "min_volatility": 0.4}, + ["TKN/BTC", "LTC/BTC", "ETH/BTC"], + ), + ( + {"method": "VolatilityFilter", "sort_direction": "asc", "min_volatility": 0.4}, + ["ETH/BTC", "LTC/BTC", "TKN/BTC"], + ), + ( + {"method": "VolatilityFilter", "sort_direction": "desc", "max_volatility": 0.5}, + ["LTC/BTC", "ETH/BTC", "XRP/BTC"], + ), + ( + {"method": "VolatilityFilter", "sort_direction": "asc", "max_volatility": 0.5}, + ["XRP/BTC", "ETH/BTC", "LTC/BTC"], + ), + ( + {"method": "RangeStabilityFilter", "sort_direction": "asc"}, + ["ETH/BTC", "XRP/BTC", "LTC/BTC", "TKN/BTC"], + ), + ( + {"method": "RangeStabilityFilter", "sort_direction": "desc"}, + ["TKN/BTC", "LTC/BTC", "XRP/BTC", "ETH/BTC"], + ), + ( + {"method": "RangeStabilityFilter", "sort_direction": "asc", "min_rate_of_change": 0.4}, + ["XRP/BTC", "LTC/BTC", "TKN/BTC"], + ), + ( + {"method": "RangeStabilityFilter", "sort_direction": "desc", "min_rate_of_change": 0.4}, + ["TKN/BTC", "LTC/BTC", "XRP/BTC"], + ), + ], +) def test_VolatilityFilter_RangeStabilityFilter_sort( - mocker, whitelist_conf, tickers, time_machine, pairlist, expected_pairlist) -> None: - whitelist_conf['pairlists'] = [ - {'method': 'VolumePairList', 'number_assets': 10}, - pairlist - ] + mocker, whitelist_conf, tickers, time_machine, pairlist, expected_pairlist +) -> None: + whitelist_conf["pairlists"] = [{"method": "VolumePairList", "number_assets": 10}, pairlist] - df1 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=42) - df2 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=2) - df3 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=3) - df4 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=4) - df5 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=5) - df6 = generate_test_data('1d', 10, '2022-01-05 00:00:00+00:00', random_seed=6) + df1 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=42) + df2 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=2) + df3 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=3) + df4 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=4) + df5 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=5) + df6 = generate_test_data("1d", 10, "2022-01-05 00:00:00+00:00", random_seed=6) assert not df1.equals(df2) - time_machine.move_to('2022-01-15 00:00:00+00:00') + time_machine.move_to("2022-01-15 00:00:00+00:00") ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): df1, - ('TKN/BTC', '1d', CandleType.SPOT): df2, - ('LTC/BTC', '1d', CandleType.SPOT): df3, - ('XRP/BTC', '1d', CandleType.SPOT): df4, - ('HOT/BTC', '1d', CandleType.SPOT): df5, - ('BLK/BTC', '1d', CandleType.SPOT): df6, - + ("ETH/BTC", "1d", CandleType.SPOT): df1, + ("TKN/BTC", "1d", CandleType.SPOT): df2, + ("LTC/BTC", "1d", CandleType.SPOT): df3, + ("XRP/BTC", "1d", CandleType.SPOT): df4, + ("HOT/BTC", "1d", CandleType.SPOT): df5, + ("BLK/BTC", "1d", CandleType.SPOT): df6, } ohlcv_mock = MagicMock(return_value=ohlcv_data) mocker.patch.multiple( EXMS, exchange_has=MagicMock(return_value=True), refresh_latest_ohlcv=ohlcv_mock, - get_tickers=tickers - + get_tickers=tickers, ) exchange = get_patched_exchange(mocker, whitelist_conf) @@ -855,10 +1267,11 @@ def test_VolatilityFilter_RangeStabilityFilter_sort( def test_ShuffleFilter_init(mocker, whitelist_conf, caplog) -> None: - whitelist_conf['pairlists'] = [ + whitelist_conf["pairlists"] = [ {"method": "StaticPairList"}, - {"method": "ShuffleFilter", "seed": 43} + {"method": "ShuffleFilter", "seed": 43}, ] + whitelist_conf["runmode"] = "backtest" exchange = get_patched_exchange(mocker, whitelist_conf) plm = PairListManager(exchange, whitelist_conf) @@ -875,7 +1288,7 @@ def test_ShuffleFilter_init(mocker, whitelist_conf, caplog) -> None: assert plm.whitelist != pl1 caplog.clear() - whitelist_conf['runmode'] = RunMode.DRY_RUN + whitelist_conf["runmode"] = RunMode.DRY_RUN plm = PairListManager(exchange, whitelist_conf) assert not log_has("Backtesting mode detected, applying seed value: 42", caplog) assert log_has("Live mode detected, not applying seed.", caplog) @@ -883,98 +1296,122 @@ def test_ShuffleFilter_init(mocker, whitelist_conf, caplog) -> None: @pytest.mark.usefixtures("init_persistence") def test_PerformanceFilter_lookback(mocker, default_conf_usdt, fee, caplog) -> None: - default_conf_usdt['exchange']['pair_whitelist'].extend(['ADA/USDT', 'XRP/USDT', 'ETC/USDT']) - default_conf_usdt['pairlists'] = [ + default_conf_usdt["exchange"]["pair_whitelist"].extend(["ADA/USDT", "XRP/USDT", "ETC/USDT"]) + default_conf_usdt["pairlists"] = [ {"method": "StaticPairList"}, - {"method": "PerformanceFilter", "minutes": 60, "min_profit": 0.01} + {"method": "PerformanceFilter", "minutes": 60, "min_profit": 0.01}, ] - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) exchange = get_patched_exchange(mocker, default_conf_usdt) pm = PairListManager(exchange, default_conf_usdt) pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'] + assert pm.whitelist == ["ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"] with time_machine.travel("2021-09-01 05:00:00 +00:00") as t: create_mock_trades_usdt(fee) pm.refresh_pairlist() - assert pm.whitelist == ['XRP/USDT', 'NEO/USDT'] - assert log_has_re(r'Removing pair .* since .* is below .*', caplog) + assert pm.whitelist == ["XRP/USDT", "NEO/USDT"] + assert log_has_re(r"Removing pair .* since .* is below .*", caplog) # Move to "outside" of lookback window, so original sorting is restored. t.move_to("2021-09-01 07:00:00 +00:00") pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'] + assert pm.whitelist == ["ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"] @pytest.mark.usefixtures("init_persistence") def test_PerformanceFilter_keep_mid_order(mocker, default_conf_usdt, fee, caplog) -> None: - default_conf_usdt['exchange']['pair_whitelist'].extend(['ADA/USDT', 'ETC/USDT']) - default_conf_usdt['pairlists'] = [ + default_conf_usdt["exchange"]["pair_whitelist"].extend(["ADA/USDT", "ETC/USDT"]) + default_conf_usdt["pairlists"] = [ {"method": "StaticPairList", "allow_inactive": True}, - {"method": "PerformanceFilter", "minutes": 60, } + { + "method": "PerformanceFilter", + "minutes": 60, + }, ] - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) exchange = get_patched_exchange(mocker, default_conf_usdt) pm = PairListManager(exchange, default_conf_usdt) pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'LTC/USDT', 'XRP/USDT', - 'NEO/USDT', 'TKN/USDT', 'ADA/USDT', 'ETC/USDT'] + assert pm.whitelist == [ + "ETH/USDT", + "LTC/USDT", + "XRP/USDT", + "NEO/USDT", + "TKN/USDT", + "ADA/USDT", + "ETC/USDT", + ] with time_machine.travel("2021-09-01 05:00:00 +00:00") as t: create_mock_trades_usdt(fee) pm.refresh_pairlist() - assert pm.whitelist == ['XRP/USDT', 'NEO/USDT', 'ETH/USDT', 'LTC/USDT', - 'TKN/USDT', 'ADA/USDT', 'ETC/USDT', ] + assert pm.whitelist == [ + "XRP/USDT", + "NEO/USDT", + "ETH/USDT", + "LTC/USDT", + "TKN/USDT", + "ADA/USDT", + "ETC/USDT", + ] # assert log_has_re(r'Removing pair .* since .* is below .*', caplog) # Move to "outside" of lookback window, so original sorting is restored. t.move_to("2021-09-01 07:00:00 +00:00") pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'LTC/USDT', 'XRP/USDT', - 'NEO/USDT', 'TKN/USDT', 'ADA/USDT', 'ETC/USDT'] + assert pm.whitelist == [ + "ETH/USDT", + "LTC/USDT", + "XRP/USDT", + "NEO/USDT", + "TKN/USDT", + "ADA/USDT", + "ETC/USDT", + ] def test_gen_pair_whitelist_not_supported(mocker, default_conf, tickers) -> None: - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}] + default_conf["pairlists"] = [{"method": "VolumePairList", "number_assets": 10}] - mocker.patch.multiple(EXMS, - get_tickers=tickers, - exchange_has=MagicMock(return_value=False), - ) + mocker.patch.multiple( + EXMS, + get_tickers=tickers, + exchange_has=MagicMock(return_value=False), + ) - with pytest.raises(OperationalException, - match=r'Exchange does not support dynamic whitelist.*'): + with pytest.raises( + OperationalException, match=r"Exchange does not support dynamic whitelist.*" + ): get_patched_freqtradebot(mocker, default_conf) def test_pair_whitelist_not_supported_Spread(mocker, default_conf, tickers) -> None: - default_conf['pairlists'] = [{'method': 'StaticPairList'}, {'method': 'SpreadFilter'}] + default_conf["pairlists"] = [{"method": "StaticPairList"}, {"method": "SpreadFilter"}] - mocker.patch.multiple(EXMS, - get_tickers=tickers, - exchange_has=MagicMock(return_value=False), - ) + mocker.patch.multiple( + EXMS, + get_tickers=tickers, + exchange_has=MagicMock(return_value=False), + ) - with pytest.raises(OperationalException, - match=r'Exchange does not support fetchTickers, .*'): + with pytest.raises(OperationalException, match=r"Exchange does not support fetchTickers, .*"): get_patched_freqtradebot(mocker, default_conf) - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - mocker.patch(f'{EXMS}.get_option', MagicMock(return_value=False)) - with pytest.raises(OperationalException, - match=r'.*requires exchange to have bid/ask data'): + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.get_option", MagicMock(return_value=False)) + with pytest.raises(OperationalException, match=r".*requires exchange to have bid/ask data"): get_patched_freqtradebot(mocker, default_conf) @pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) def test_pairlist_class(mocker, whitelist_conf, markets, pairlist): - whitelist_conf['pairlists'][0]['method'] = pairlist - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True) - ) + whitelist_conf["pairlists"][0]["method"] = pairlist + mocker.patch.multiple( + EXMS, markets=PropertyMock(return_value=markets), exchange_has=MagicMock(return_value=True) + ) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) assert freqtrade.pairlists.name_list == [pairlist] @@ -984,27 +1421,32 @@ def test_pairlist_class(mocker, whitelist_conf, markets, pairlist): @pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) -@pytest.mark.parametrize("whitelist,log_message", [ - (['ETH/BTC', 'TKN/BTC'], ""), - # TRX/ETH not in markets - (['ETH/BTC', 'TKN/BTC', 'TRX/ETH'], "is not compatible with exchange"), - # wrong stake - (['ETH/BTC', 'TKN/BTC', 'ETH/USDT'], "is not compatible with your stake currency"), - # BCH/BTC not available - (['ETH/BTC', 'TKN/BTC', 'BCH/BTC'], "is not compatible with exchange"), - # BTT/BTC is inactive - (['ETH/BTC', 'TKN/BTC', 'BTT/BTC'], "Market is not active"), - # XLTCUSDT is not a valid pair - (['ETH/BTC', 'TKN/BTC', 'XLTCUSDT'], "is not tradable with Freqtrade"), -]) -def test__whitelist_for_active_markets(mocker, whitelist_conf, markets, pairlist, whitelist, caplog, - log_message, tickers): - whitelist_conf['pairlists'][0]['method'] = pairlist - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) +@pytest.mark.parametrize( + "whitelist,log_message", + [ + (["ETH/BTC", "TKN/BTC"], ""), + # TRX/ETH not in markets + (["ETH/BTC", "TKN/BTC", "TRX/ETH"], "is not compatible with exchange"), + # wrong stake + (["ETH/BTC", "TKN/BTC", "ETH/USDT"], "is not compatible with your stake currency"), + # BCH/BTC not available + (["ETH/BTC", "TKN/BTC", "BCH/BTC"], "is not compatible with exchange"), + # BTT/BTC is inactive + (["ETH/BTC", "TKN/BTC", "BTT/BTC"], "Market is not active"), + # XLTCUSDT is not a valid pair + (["ETH/BTC", "TKN/BTC", "XLTCUSDT"], "is not tradable with Freqtrade"), + ], +) +def test__whitelist_for_active_markets( + mocker, whitelist_conf, markets, pairlist, whitelist, caplog, log_message, tickers +): + whitelist_conf["pairlists"][0]["method"] = pairlist + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) caplog.clear() @@ -1012,43 +1454,39 @@ def test__whitelist_for_active_markets(mocker, whitelist_conf, markets, pairlist pairlist_handler = freqtrade.pairlists._pairlist_handlers[0] new_whitelist = pairlist_handler._whitelist_for_active_markets(whitelist) - assert set(new_whitelist) == set(['ETH/BTC', 'TKN/BTC']) + assert set(new_whitelist) == set(["ETH/BTC", "TKN/BTC"]) assert log_message in caplog.text @pytest.mark.parametrize("pairlist", TESTABLE_PAIRLISTS) def test__whitelist_for_active_markets_empty(mocker, whitelist_conf, pairlist, tickers): - whitelist_conf['pairlists'][0]['method'] = pairlist + whitelist_conf["pairlists"][0]["method"] = pairlist - mocker.patch(f'{EXMS}.exchange_has', return_value=True) + mocker.patch(f"{EXMS}.exchange_has", return_value=True) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=None), - get_tickers=tickers - ) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=None), get_tickers=tickers) # Assign starting whitelist pairlist_handler = freqtrade.pairlists._pairlist_handlers[0] - with pytest.raises(OperationalException, match=r'Markets not loaded.*'): - pairlist_handler._whitelist_for_active_markets(['ETH/BTC']) + with pytest.raises(OperationalException, match=r"Markets not loaded.*"): + pairlist_handler._whitelist_for_active_markets(["ETH/BTC"]) def test_volumepairlist_invalid_sortvalue(mocker, whitelist_conf): - whitelist_conf['pairlists'][0].update({"sort_key": "asdf"}) + whitelist_conf["pairlists"][0].update({"sort_key": "asdf"}) - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - with pytest.raises(OperationalException, - match=r"key asdf not in .*"): + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + with pytest.raises(OperationalException, match=r"key asdf not in .*"): get_patched_freqtradebot(mocker, whitelist_conf) def test_volumepairlist_caching(mocker, markets, whitelist_conf, tickers): - - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) assert len(freqtrade.pairlists._pairlist_handlers[0]._pair_cache) == 0 assert tickers.call_count == 0 @@ -1061,58 +1499,70 @@ def test_volumepairlist_caching(mocker, markets, whitelist_conf, tickers): def test_agefilter_min_days_listed_too_small(mocker, default_conf, markets, tickers): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'AgeFilter', 'min_days_listed': -1}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "AgeFilter", "min_days_listed": -1}, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - with pytest.raises(OperationalException, - match=r'AgeFilter requires min_days_listed to be >= 1'): + with pytest.raises( + OperationalException, match=r"AgeFilter requires min_days_listed to be >= 1" + ): get_patched_freqtradebot(mocker, default_conf) def test_agefilter_max_days_lower_than_min_days(mocker, default_conf, markets, tickers): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'AgeFilter', 'min_days_listed': 3, - "max_days_listed": 2}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "AgeFilter", "min_days_listed": 3, "max_days_listed": 2}, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - with pytest.raises(OperationalException, - match=r'AgeFilter max_days_listed <= min_days_listed not permitted'): + with pytest.raises( + OperationalException, match=r"AgeFilter max_days_listed <= min_days_listed not permitted" + ): get_patched_freqtradebot(mocker, default_conf) def test_agefilter_min_days_listed_too_large(mocker, default_conf, markets, tickers): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'AgeFilter', 'min_days_listed': 99999}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "AgeFilter", "min_days_listed": 99999}, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - with pytest.raises(OperationalException, - match=r'AgeFilter requires min_days_listed to not exceed ' - r'exchange max request size \([0-9]+\)'): + with pytest.raises( + OperationalException, + match=r"AgeFilter requires min_days_listed to not exceed " + r"exchange max request size \([0-9]+\)", + ): get_patched_freqtradebot(mocker, default_conf) def test_agefilter_caching(mocker, markets, whitelist_conf_agefilter, tickers, ohlcv_history): with time_machine.travel("2021-09-01 05:00:00 +00:00") as t: ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): ohlcv_history, + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): ohlcv_history, } mocker.patch.multiple( EXMS, @@ -1134,19 +1584,19 @@ def test_agefilter_caching(mocker, markets, whitelist_conf_agefilter, tickers, o assert freqtrade.exchange.refresh_latest_ohlcv.call_count == 2 ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('XRP/BTC', '1d', CandleType.SPOT): ohlcv_history.iloc[[0]], + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("XRP/BTC", "1d", CandleType.SPOT): ohlcv_history.iloc[[0]], } - mocker.patch(f'{EXMS}.refresh_latest_ohlcv', return_value=ohlcv_data) + mocker.patch(f"{EXMS}.refresh_latest_ohlcv", return_value=ohlcv_data) freqtrade.pairlists.refresh_pairlist() assert len(freqtrade.pairlists.whitelist) == 3 assert freqtrade.exchange.refresh_latest_ohlcv.call_count == 1 # Move to next day t.move_to("2021-09-02 01:00:00 +00:00") - mocker.patch(f'{EXMS}.refresh_latest_ohlcv', return_value=ohlcv_data) + mocker.patch(f"{EXMS}.refresh_latest_ohlcv", return_value=ohlcv_data) freqtrade.pairlists.refresh_pairlist() assert len(freqtrade.pairlists.whitelist) == 3 assert freqtrade.exchange.refresh_latest_ohlcv.call_count == 1 @@ -1155,12 +1605,12 @@ def test_agefilter_caching(mocker, markets, whitelist_conf_agefilter, tickers, o t.move_to("2021-09-03 01:00:00 +00:00") # Called once for XRP/BTC ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('XRP/BTC', '1d', CandleType.SPOT): ohlcv_history, + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("XRP/BTC", "1d", CandleType.SPOT): ohlcv_history, } - mocker.patch(f'{EXMS}.refresh_latest_ohlcv', return_value=ohlcv_data) + mocker.patch(f"{EXMS}.refresh_latest_ohlcv", return_value=ohlcv_data) freqtrade.pairlists.refresh_pairlist() assert len(freqtrade.pairlists.whitelist) == 4 # Called once (only for XRP/BTC) @@ -1168,70 +1618,99 @@ def test_agefilter_caching(mocker, markets, whitelist_conf_agefilter, tickers, o def test_OffsetFilter_error(mocker, whitelist_conf) -> None: - whitelist_conf['pairlists'] = ( - [{"method": "StaticPairList"}, {"method": "OffsetFilter", "offset": -1}] - ) + whitelist_conf["pairlists"] = [ + {"method": "StaticPairList"}, + {"method": "OffsetFilter", "offset": -1}, + ] - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) - with pytest.raises(OperationalException, - match=r'OffsetFilter requires offset to be >= 0'): + with pytest.raises(OperationalException, match=r"OffsetFilter requires offset to be >= 0"): PairListManager(MagicMock, whitelist_conf) def test_rangestabilityfilter_checks(mocker, default_conf, markets, tickers): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'RangeStabilityFilter', 'lookback_days': 99999}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "RangeStabilityFilter", "lookback_days": 99999}, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - with pytest.raises(OperationalException, - match=r'RangeStabilityFilter requires lookback_days to not exceed ' - r'exchange max request size \([0-9]+\)'): + with pytest.raises( + OperationalException, + match=r"RangeStabilityFilter requires lookback_days to not exceed " + r"exchange max request size \([0-9]+\)", + ): get_patched_freqtradebot(mocker, default_conf) - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'RangeStabilityFilter', 'lookback_days': 0}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "RangeStabilityFilter", "lookback_days": 0}, + ] - with pytest.raises(OperationalException, - match='RangeStabilityFilter requires lookback_days to be >= 1'): + with pytest.raises( + OperationalException, match="RangeStabilityFilter requires lookback_days to be >= 1" + ): get_patched_freqtradebot(mocker, default_conf) - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'RangeStabilityFilter', 'sort_direction': 'something'}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "RangeStabilityFilter", "sort_direction": "something"}, + ] - with pytest.raises(OperationalException, - match='RangeStabilityFilter requires sort_direction to be either None.*'): + with pytest.raises( + OperationalException, + match="RangeStabilityFilter requires sort_direction to be either None.*", + ): get_patched_freqtradebot(mocker, default_conf) -@pytest.mark.parametrize('min_rate_of_change,max_rate_of_change,expected_length', [ - (0.01, 0.99, 5), - (0.05, 0.0, 0), # Setting min rate_of_change to 5% removes all pairs from the whitelist. -]) -def test_rangestabilityfilter_caching(mocker, markets, default_conf, tickers, ohlcv_history, - min_rate_of_change, max_rate_of_change, expected_length): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'RangeStabilityFilter', 'lookback_days': 2, - 'min_rate_of_change': min_rate_of_change, - "max_rate_of_change": max_rate_of_change}] +@pytest.mark.parametrize( + "min_rate_of_change,max_rate_of_change,expected_length", + [ + (0.01, 0.99, 5), + (0.05, 0.0, 0), # Setting min rate_of_change to 5% removes all pairs from the whitelist. + ], +) +def test_rangestabilityfilter_caching( + mocker, + markets, + default_conf, + tickers, + ohlcv_history, + min_rate_of_change, + max_rate_of_change, + expected_length, +): + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + { + "method": "RangeStabilityFilter", + "lookback_days": 2, + "min_rate_of_change": min_rate_of_change, + "max_rate_of_change": max_rate_of_change, + }, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) ohlcv_data = { - ('ETH/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('TKN/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('LTC/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('XRP/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('HOT/BTC', '1d', CandleType.SPOT): ohlcv_history, - ('BLK/BTC', '1d', CandleType.SPOT): ohlcv_history, + ("ETH/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("TKN/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("LTC/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("XRP/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("HOT/BTC", "1d", CandleType.SPOT): ohlcv_history, + ("BLK/BTC", "1d", CandleType.SPOT): ohlcv_history, } mocker.patch.multiple( EXMS, @@ -1252,367 +1731,447 @@ def test_rangestabilityfilter_caching(mocker, markets, default_conf, tickers, oh def test_spreadfilter_invalid_data(mocker, default_conf, markets, tickers, caplog): - default_conf['pairlists'] = [{'method': 'VolumePairList', 'number_assets': 10}, - {'method': 'SpreadFilter', 'max_spread_ratio': 0.1}] + default_conf["pairlists"] = [ + {"method": "VolumePairList", "number_assets": 10}, + {"method": "SpreadFilter", "max_spread_ratio": 0.1}, + ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) ftbot = get_patched_freqtradebot(mocker, default_conf) ftbot.pairlists.refresh_pairlist() assert len(ftbot.pairlists.whitelist) == 5 - tickers.return_value['ETH/BTC']['ask'] = 0.0 - del tickers.return_value['TKN/BTC'] - del tickers.return_value['LTC/BTC'] + tickers.return_value["ETH/BTC"]["ask"] = 0.0 + del tickers.return_value["TKN/BTC"] + del tickers.return_value["LTC/BTC"] mocker.patch.multiple(EXMS, get_tickers=tickers) ftbot.pairlists.refresh_pairlist() - assert log_has_re(r'Removed .* invalid ticker data.*', caplog) + assert log_has_re(r"Removed .* invalid ticker data.*", caplog) assert len(ftbot.pairlists.whitelist) == 2 -@pytest.mark.parametrize("pairlistconfig,desc_expected,exception_expected", [ - ({"method": "PriceFilter", "low_price_ratio": 0.001, "min_price": 0.00000010, - "max_price": 1.0}, - "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below " - "0.1% or below 0.00000010 or above 1.00000000.'}]", - None - ), - ({"method": "PriceFilter", "low_price_ratio": 0.001, "min_price": 0.00000010}, - "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.1% or below 0.00000010.'}]", - None - ), - ({"method": "PriceFilter", "low_price_ratio": 0.001, "max_price": 1.00010000}, - "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.1% or above 1.00010000.'}]", - None - ), - ({"method": "PriceFilter", "min_price": 0.00002000}, - "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.00002000.'}]", - None - ), - ({"method": "PriceFilter", "max_value": 0.00002000}, - "[{'PriceFilter': 'PriceFilter - Filtering pairs priced Value above 0.00002000.'}]", - None - ), - ({"method": "PriceFilter"}, - "[{'PriceFilter': 'PriceFilter - No price filters configured.'}]", - None - ), - ({"method": "PriceFilter", "low_price_ratio": -0.001}, - None, - "PriceFilter requires low_price_ratio to be >= 0" - ), # OperationalException expected - ({"method": "PriceFilter", "min_price": -0.00000010}, - None, - "PriceFilter requires min_price to be >= 0" - ), # OperationalException expected - ({"method": "PriceFilter", "max_price": -1.00010000}, - None, - "PriceFilter requires max_price to be >= 0" - ), # OperationalException expected - ({"method": "PriceFilter", "max_value": -1.00010000}, - None, - "PriceFilter requires max_value to be >= 0" - ), # OperationalException expected - ({"method": "RangeStabilityFilter", "lookback_days": 10, - "min_rate_of_change": 0.01}, - "[{'RangeStabilityFilter': 'RangeStabilityFilter - Filtering pairs with rate of change below " - "0.01 over the last days.'}]", - None - ), - ({"method": "RangeStabilityFilter", "lookback_days": 10, - "min_rate_of_change": 0.01, "max_rate_of_change": 0.99}, - "[{'RangeStabilityFilter': 'RangeStabilityFilter - Filtering pairs with rate of change below " - "0.01 and above 0.99 over the last days.'}]", - None - ), - ({"method": "OffsetFilter", "offset": 5, "number_assets": 10}, - "[{'OffsetFilter': 'OffsetFilter - Taking 10 Pairs, starting from 5.'}]", - None - ), - ({"method": "ProducerPairList"}, - "[{'ProducerPairList': 'ProducerPairList - default'}]", - None - ), - ({"method": "RemotePairList", "number_assets": 10, "pairlist_url": "https://example.com"}, - "[{'RemotePairList': 'RemotePairList - 10 pairs from RemotePairlist.'}]", - None - ), -]) -def test_pricefilter_desc(mocker, whitelist_conf, markets, pairlistconfig, - desc_expected, exception_expected): - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True) - ) - whitelist_conf['pairlists'] = [pairlistconfig] +@pytest.mark.parametrize( + "pairlistconfig,desc_expected,exception_expected", + [ + ( + { + "method": "PriceFilter", + "low_price_ratio": 0.001, + "min_price": 0.00000010, + "max_price": 1.0, + }, + "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below " + "0.1% or below 0.00000010 or above 1.00000000.'}]", + None, + ), + ( + {"method": "PriceFilter", "low_price_ratio": 0.001, "min_price": 0.00000010}, + "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.1% " + "or below 0.00000010.'}]", + None, + ), + ( + {"method": "PriceFilter", "low_price_ratio": 0.001, "max_price": 1.00010000}, + "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.1% " + "or above 1.00010000.'}]", + None, + ), + ( + {"method": "PriceFilter", "min_price": 0.00002000}, + "[{'PriceFilter': 'PriceFilter - Filtering pairs priced below 0.00002000.'}]", + None, + ), + ( + {"method": "PriceFilter", "max_value": 0.00002000}, + "[{'PriceFilter': 'PriceFilter - Filtering pairs priced Value above 0.00002000.'}]", + None, + ), + ( + {"method": "PriceFilter"}, + "[{'PriceFilter': 'PriceFilter - No price filters configured.'}]", + None, + ), + ( + {"method": "PriceFilter", "low_price_ratio": -0.001}, + None, + "PriceFilter requires low_price_ratio to be >= 0", + ), # OperationalException expected + ( + {"method": "PriceFilter", "min_price": -0.00000010}, + None, + "PriceFilter requires min_price to be >= 0", + ), # OperationalException expected + ( + {"method": "PriceFilter", "max_price": -1.00010000}, + None, + "PriceFilter requires max_price to be >= 0", + ), # OperationalException expected + ( + {"method": "PriceFilter", "max_value": -1.00010000}, + None, + "PriceFilter requires max_value to be >= 0", + ), # OperationalException expected + ( + {"method": "RangeStabilityFilter", "lookback_days": 10, "min_rate_of_change": 0.01}, + "[{'RangeStabilityFilter': 'RangeStabilityFilter - Filtering pairs with rate " + "of change below 0.01 over the last days.'}]", + None, + ), + ( + { + "method": "RangeStabilityFilter", + "lookback_days": 10, + "min_rate_of_change": 0.01, + "max_rate_of_change": 0.99, + }, + "[{'RangeStabilityFilter': 'RangeStabilityFilter - Filtering pairs with rate " + "of change below 0.01 and above 0.99 over the last days.'}]", + None, + ), + ( + {"method": "OffsetFilter", "offset": 5, "number_assets": 10}, + "[{'OffsetFilter': 'OffsetFilter - Taking 10 Pairs, starting from 5.'}]", + None, + ), + ( + {"method": "ProducerPairList"}, + "[{'ProducerPairList': 'ProducerPairList - default'}]", + None, + ), + ( + { + "method": "RemotePairList", + "number_assets": 10, + "pairlist_url": "https://example.com", + }, + "[{'RemotePairList': 'RemotePairList - 10 pairs from RemotePairlist.'}]", + None, + ), + ], +) +def test_pricefilter_desc( + mocker, whitelist_conf, markets, pairlistconfig, desc_expected, exception_expected +): + mocker.patch.multiple( + EXMS, markets=PropertyMock(return_value=markets), exchange_has=MagicMock(return_value=True) + ) + whitelist_conf["pairlists"] = [pairlistconfig] if desc_expected is not None: freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) short_desc = str(freqtrade.pairlists.short_desc()) assert short_desc == desc_expected else: # OperationalException expected - with pytest.raises(OperationalException, - match=exception_expected): + with pytest.raises(OperationalException, match=exception_expected): freqtrade = get_patched_freqtradebot(mocker, whitelist_conf) def test_pairlistmanager_no_pairlist(mocker, whitelist_conf): - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) - whitelist_conf['pairlists'] = [] + whitelist_conf["pairlists"] = [] - with pytest.raises(OperationalException, - match=r"No Pairlist Handlers defined"): + with pytest.raises(OperationalException, match=r"No Pairlist Handlers defined"): get_patched_freqtradebot(mocker, whitelist_conf) -@pytest.mark.parametrize("pairlists,pair_allowlist,overall_performance,allowlist_result", [ - # No trades yet - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC', 'LTC/BTC'], [], ['ETH/BTC', 'TKN/BTC', 'LTC/BTC']), - # Happy path: Descending order, all values filled - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC'], - [{'pair': 'TKN/BTC', 'profit_ratio': 0.05, 'count': 3}, - {'pair': 'ETH/BTC', 'profit_ratio': 0.04, 'count': 2}], - ['TKN/BTC', 'ETH/BTC']), - # Performance data outside allow list ignored - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC'], - [{'pair': 'OTHER/BTC', 'profit_ratio': 0.05, 'count': 3}, - {'pair': 'ETH/BTC', 'profit_ratio': 0.04, 'count': 2}], - ['ETH/BTC', 'TKN/BTC']), - # Partial performance data missing and sorted between positive and negative profit - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC', 'LTC/BTC'], - [{'pair': 'ETH/BTC', 'profit_ratio': -0.05, 'count': 100}, - {'pair': 'TKN/BTC', 'profit_ratio': 0.04, 'count': 2}], - ['TKN/BTC', 'LTC/BTC', 'ETH/BTC']), - # Tie in performance data broken by count (ascending) - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC', 'LTC/BTC'], - [{'pair': 'LTC/BTC', 'profit_ratio': -0.0501, 'count': 101}, - {'pair': 'TKN/BTC', 'profit_ratio': -0.0501, 'count': 2}, - {'pair': 'ETH/BTC', 'profit_ratio': -0.0501, 'count': 100}], - ['TKN/BTC', 'ETH/BTC', 'LTC/BTC']), - # Tie in performance and count, broken by prior sorting sort - ([{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], - ['ETH/BTC', 'TKN/BTC', 'LTC/BTC'], - [{'pair': 'LTC/BTC', 'profit_ratio': -0.0501, 'count': 1}, - {'pair': 'TKN/BTC', 'profit_ratio': -0.0501, 'count': 1}, - {'pair': 'ETH/BTC', 'profit_ratio': -0.0501, 'count': 1}], - ['ETH/BTC', 'TKN/BTC', 'LTC/BTC']), -]) -def test_performance_filter(mocker, whitelist_conf, pairlists, pair_allowlist, overall_performance, - allowlist_result, tickers, markets, ohlcv_history_list): +@pytest.mark.parametrize( + "pairlists,pair_allowlist,overall_performance,allowlist_result", + [ + # No trades yet + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + [], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + ), + # Happy path: Descending order, all values filled + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC"], + [ + {"pair": "TKN/BTC", "profit_ratio": 0.05, "count": 3}, + {"pair": "ETH/BTC", "profit_ratio": 0.04, "count": 2}, + ], + ["TKN/BTC", "ETH/BTC"], + ), + # Performance data outside allow list ignored + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC"], + [ + {"pair": "OTHER/BTC", "profit_ratio": 0.05, "count": 3}, + {"pair": "ETH/BTC", "profit_ratio": 0.04, "count": 2}, + ], + ["ETH/BTC", "TKN/BTC"], + ), + # Partial performance data missing and sorted between positive and negative profit + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + [ + {"pair": "ETH/BTC", "profit_ratio": -0.05, "count": 100}, + {"pair": "TKN/BTC", "profit_ratio": 0.04, "count": 2}, + ], + ["TKN/BTC", "LTC/BTC", "ETH/BTC"], + ), + # Tie in performance data broken by count (ascending) + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + [ + {"pair": "LTC/BTC", "profit_ratio": -0.0501, "count": 101}, + {"pair": "TKN/BTC", "profit_ratio": -0.0501, "count": 2}, + {"pair": "ETH/BTC", "profit_ratio": -0.0501, "count": 100}, + ], + ["TKN/BTC", "ETH/BTC", "LTC/BTC"], + ), + # Tie in performance and count, broken by prior sorting sort + ( + [{"method": "StaticPairList"}, {"method": "PerformanceFilter"}], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + [ + {"pair": "LTC/BTC", "profit_ratio": -0.0501, "count": 1}, + {"pair": "TKN/BTC", "profit_ratio": -0.0501, "count": 1}, + {"pair": "ETH/BTC", "profit_ratio": -0.0501, "count": 1}, + ], + ["ETH/BTC", "TKN/BTC", "LTC/BTC"], + ), + ], +) +def test_performance_filter( + mocker, + whitelist_conf, + pairlists, + pair_allowlist, + overall_performance, + allowlist_result, + tickers, + markets, + ohlcv_history_list, +): allowlist_conf = whitelist_conf - allowlist_conf['pairlists'] = pairlists - allowlist_conf['exchange']['pair_whitelist'] = pair_allowlist + allowlist_conf["pairlists"] = pairlists + allowlist_conf["exchange"]["pair_whitelist"] = pair_allowlist - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) freqtrade = get_patched_freqtradebot(mocker, allowlist_conf) - mocker.patch.multiple(EXMS, - get_tickers=tickers, - markets=PropertyMock(return_value=markets) - ) - mocker.patch.multiple(EXMS, - get_historic_ohlcv=MagicMock(return_value=ohlcv_history_list), - ) - mocker.patch.multiple('freqtrade.persistence.Trade', - get_overall_performance=MagicMock(return_value=overall_performance), - ) + mocker.patch.multiple(EXMS, get_tickers=tickers, markets=PropertyMock(return_value=markets)) + mocker.patch.multiple( + EXMS, + get_historic_ohlcv=MagicMock(return_value=ohlcv_history_list), + ) + mocker.patch.multiple( + "freqtrade.persistence.Trade", + get_overall_performance=MagicMock(return_value=overall_performance), + ) freqtrade.pairlists.refresh_pairlist() allowlist = freqtrade.pairlists.whitelist assert allowlist == allowlist_result -@pytest.mark.parametrize('wildcardlist,pairs,expected', [ - (['BTC/USDT'], - ['BTC/USDT'], - ['BTC/USDT']), - (['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETH/USDT']), - (['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT'], ['BTC/USDT']), # Test one too many - (['.*/USDT'], - ['BTC/USDT', 'ETH/USDT'], ['BTC/USDT', 'ETH/USDT']), # Wildcard simple - (['.*C/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT'], ['BTC/USDT', 'ETC/USDT']), # Wildcard exclude one - (['.*UP/USDT', 'BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT', 'XRPDOWN/USDT'], - ['BTC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT']), # Wildcard exclude one - (['BTC/.*', 'ETH/.*'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTC/USD', 'ETH/EUR', 'BTC/GBP'], - ['BTC/USDT', 'ETH/USDT', 'BTC/USD', 'ETH/EUR', 'BTC/GBP']), # Wildcard exclude one - (['*UP/USDT', 'BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT', 'XRPDOWN/USDT'], - None), - (['BTC/USD'], - ['BTC/USD', 'BTC/USDT'], - ['BTC/USD']), -]) +@pytest.mark.parametrize( + "wildcardlist,pairs,expected", + [ + (["BTC/USDT"], ["BTC/USDT"], ["BTC/USDT"]), + (["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"]), + (["BTC/USDT", "ETH/USDT"], ["BTC/USDT"], ["BTC/USDT"]), # Test one too many + ([".*/USDT"], ["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"]), # Wildcard simple + ( + [".*C/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT"], + ), # Wildcard exclude one + ( + [".*UP/USDT", "BTC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT", "XRPDOWN/USDT"], + ["BTC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT"], + ), # Wildcard exclude one + ( + ["BTC/.*", "ETH/.*"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTC/USD", "ETH/EUR", "BTC/GBP"], + ["BTC/USDT", "ETH/USDT", "BTC/USD", "ETH/EUR", "BTC/GBP"], + ), # Wildcard exclude one + ( + ["*UP/USDT", "BTC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT", "XRPDOWN/USDT"], + None, + ), + (["BTC/USD"], ["BTC/USD", "BTC/USDT"], ["BTC/USD"]), + ], +) def test_expand_pairlist(wildcardlist, pairs, expected): if expected is None: - with pytest.raises(ValueError, match=r'Wildcard error in \*UP/USDT,'): + with pytest.raises(ValueError, match=r"Wildcard error in \*UP/USDT,"): expand_pairlist(wildcardlist, pairs) else: assert sorted(expand_pairlist(wildcardlist, pairs)) == sorted(expected) conf = { - 'pairs': wildcardlist, - 'freqai': { + "pairs": wildcardlist, + "freqai": { "enabled": True, "feature_parameters": { "include_corr_pairlist": [ "BTC/USDT:USDT", "XRP/BUSD", ] - } - } + }, + }, } - assert sorted(dynamic_expand_pairlist(conf, pairs)) == sorted(expected + [ - "BTC/USDT:USDT", - "XRP/BUSD", - ]) + assert sorted(dynamic_expand_pairlist(conf, pairs)) == sorted( + expected + + [ + "BTC/USDT:USDT", + "XRP/BUSD", + ] + ) -@pytest.mark.parametrize('wildcardlist,pairs,expected', [ - (['BTC/USDT'], - ['BTC/USDT'], - ['BTC/USDT']), - (['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETH/USDT']), - (['BTC/USDT', 'ETH/USDT'], - ['BTC/USDT'], ['BTC/USDT', 'ETH/USDT']), # Test one too many - (['.*/USDT'], - ['BTC/USDT', 'ETH/USDT'], ['BTC/USDT', 'ETH/USDT']), # Wildcard simple - (['.*C/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT'], ['BTC/USDT', 'ETC/USDT']), # Wildcard exclude one - (['.*UP/USDT', 'BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT', 'XRPDOWN/USDT'], - ['BTC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT']), # Wildcard exclude one - (['BTC/.*', 'ETH/.*'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTC/USD', 'ETH/EUR', 'BTC/GBP'], - ['BTC/USDT', 'ETH/USDT', 'BTC/USD', 'ETH/EUR', 'BTC/GBP']), # Wildcard exclude one - (['*UP/USDT', 'BTC/USDT', 'ETH/USDT'], - ['BTC/USDT', 'ETC/USDT', 'ETH/USDT', 'BTCUP/USDT', 'XRPUP/USDT', 'XRPDOWN/USDT'], - None), - (['HELLO/WORLD'], [], ['HELLO/WORLD']), # Invalid pair kept - (['BTC/USD'], - ['BTC/USD', 'BTC/USDT'], - ['BTC/USD']), - (['BTC/USDT:USDT'], - ['BTC/USDT:USDT', 'BTC/USDT'], - ['BTC/USDT:USDT']), - (['BB_BTC/USDT', 'CC_BTC/USDT', 'AA_ETH/USDT', 'XRP/USDT', 'ETH/USDT', 'XX_BTC/USDT'], - ['BTC/USDT', 'ETH/USDT'], - ['XRP/USDT', 'ETH/USDT']), -]) +@pytest.mark.parametrize( + "wildcardlist,pairs,expected", + [ + (["BTC/USDT"], ["BTC/USDT"], ["BTC/USDT"]), + (["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"]), + (["BTC/USDT", "ETH/USDT"], ["BTC/USDT"], ["BTC/USDT", "ETH/USDT"]), # Test one too many + ([".*/USDT"], ["BTC/USDT", "ETH/USDT"], ["BTC/USDT", "ETH/USDT"]), # Wildcard simple + ( + [".*C/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT"], + ), # Wildcard exclude one + ( + [".*UP/USDT", "BTC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT", "XRPDOWN/USDT"], + ["BTC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT"], + ), # Wildcard exclude one + ( + ["BTC/.*", "ETH/.*"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTC/USD", "ETH/EUR", "BTC/GBP"], + ["BTC/USDT", "ETH/USDT", "BTC/USD", "ETH/EUR", "BTC/GBP"], + ), # Wildcard exclude one + ( + ["*UP/USDT", "BTC/USDT", "ETH/USDT"], + ["BTC/USDT", "ETC/USDT", "ETH/USDT", "BTCUP/USDT", "XRPUP/USDT", "XRPDOWN/USDT"], + None, + ), + (["HELLO/WORLD"], [], ["HELLO/WORLD"]), # Invalid pair kept + (["BTC/USD"], ["BTC/USD", "BTC/USDT"], ["BTC/USD"]), + (["BTC/USDT:USDT"], ["BTC/USDT:USDT", "BTC/USDT"], ["BTC/USDT:USDT"]), + ( + ["BB_BTC/USDT", "CC_BTC/USDT", "AA_ETH/USDT", "XRP/USDT", "ETH/USDT", "XX_BTC/USDT"], + ["BTC/USDT", "ETH/USDT"], + ["XRP/USDT", "ETH/USDT"], + ), + ], +) def test_expand_pairlist_keep_invalid(wildcardlist, pairs, expected): if expected is None: - with pytest.raises(ValueError, match=r'Wildcard error in \*UP/USDT,'): + with pytest.raises(ValueError, match=r"Wildcard error in \*UP/USDT,"): expand_pairlist(wildcardlist, pairs, keep_invalid=True) else: assert sorted(expand_pairlist(wildcardlist, pairs, keep_invalid=True)) == sorted(expected) def test_ProducerPairlist_no_emc(mocker, whitelist_conf): - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) - whitelist_conf['pairlists'] = [ + whitelist_conf["pairlists"] = [ { "method": "ProducerPairList", "number_assets": 10, "producer_name": "hello_world", } ] - del whitelist_conf['external_message_consumer'] + del whitelist_conf["external_message_consumer"] - with pytest.raises(OperationalException, - match=r"ProducerPairList requires external_message_consumer to be enabled."): + with pytest.raises( + OperationalException, + match=r"ProducerPairList requires external_message_consumer to be enabled.", + ): get_patched_freqtradebot(mocker, whitelist_conf) def test_ProducerPairlist(mocker, whitelist_conf, markets): - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - ) - whitelist_conf['pairlists'] = [ + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + ) + whitelist_conf["pairlists"] = [ { "method": "ProducerPairList", "number_assets": 2, "producer_name": "hello_world", } ] - whitelist_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "hello_world", - "host": "null", - "port": 9891, - "ws_token": "dummy", - } - ] + whitelist_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "hello_world", + "host": "null", + "port": 9891, + "ws_token": "dummy", + } + ], + } } - }) + ) exchange = get_patched_exchange(mocker, whitelist_conf) dp = DataProvider(whitelist_conf, exchange, None) - pairs = ['ETH/BTC', 'LTC/BTC', 'XRP/BTC'] + pairs = ["ETH/BTC", "LTC/BTC", "XRP/BTC"] # different producer - dp._set_producer_pairs(pairs + ['MEEP/USDT'], 'default') + dp._set_producer_pairs(pairs + ["MEEP/USDT"], "default") pm = PairListManager(exchange, whitelist_conf, dp) pm.refresh_pairlist() assert pm.whitelist == [] # proper producer - dp._set_producer_pairs(pairs, 'hello_world') + dp._set_producer_pairs(pairs, "hello_world") pm.refresh_pairlist() # Pairlist reduced to 2 assert pm.whitelist == pairs[:2] assert len(pm.whitelist) == 2 - whitelist_conf['exchange']['pair_whitelist'] = ['TKN/BTC'] + whitelist_conf["exchange"]["pair_whitelist"] = ["TKN/BTC"] - whitelist_conf['pairlists'] = [ + whitelist_conf["pairlists"] = [ {"method": "StaticPairList"}, { "method": "ProducerPairList", "producer_name": "hello_world", - } + }, ] pm = PairListManager(exchange, whitelist_conf, dp) pm.refresh_pairlist() assert len(pm.whitelist) == 4 - assert pm.whitelist == ['TKN/BTC'] + pairs + assert pm.whitelist == ["TKN/BTC"] + pairs @pytest.mark.usefixtures("init_persistence") def test_FullTradesFilter(mocker, default_conf_usdt, fee, caplog) -> None: - default_conf_usdt['exchange']['pair_whitelist'].extend(['ADA/USDT', 'XRP/USDT', 'ETC/USDT']) - default_conf_usdt['pairlists'] = [ - {"method": "StaticPairList"}, - {"method": "FullTradesFilter"} - ] - default_conf_usdt['max_open_trades'] = -1 - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) + default_conf_usdt["exchange"]["pair_whitelist"].extend(["ADA/USDT", "XRP/USDT", "ETC/USDT"]) + default_conf_usdt["pairlists"] = [{"method": "StaticPairList"}, {"method": "FullTradesFilter"}] + default_conf_usdt["max_open_trades"] = -1 + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) exchange = get_patched_exchange(mocker, default_conf_usdt) pm = PairListManager(exchange, default_conf_usdt) pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'] + assert pm.whitelist == ["ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"] with time_machine.travel("2021-09-01 05:00:00 +00:00") as t: create_mock_trades_usdt(fee) @@ -1620,13 +2179,13 @@ def test_FullTradesFilter(mocker, default_conf_usdt, fee, caplog) -> None: # Unlimited max open trades, so no change to whitelist pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'] + assert pm.whitelist == ["ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"] # Set max_open_trades to 4, the filter should empty the whitelist - default_conf_usdt['max_open_trades'] = 4 + default_conf_usdt["max_open_trades"] = 4 pm.refresh_pairlist() assert pm.whitelist == [] - assert log_has_re(r'Whitelist with 0 pairs: \[]', caplog) + assert log_has_re(r"Whitelist with 0 pairs: \[]", caplog) list_trades = LocalTrade.get_open_trades() assert len(list_trades) == 4 @@ -1640,56 +2199,90 @@ def test_FullTradesFilter(mocker, default_conf_usdt, fee, caplog) -> None: list_trades = LocalTrade.get_open_trades() assert len(list_trades) == 3 pm.refresh_pairlist() - assert pm.whitelist == ['ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'] + assert pm.whitelist == ["ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"] # Set max_open_trades to 3, the filter should empty the whitelist - default_conf_usdt['max_open_trades'] = 3 + default_conf_usdt["max_open_trades"] = 3 pm.refresh_pairlist() assert pm.whitelist == [] - assert log_has_re(r'Whitelist with 0 pairs: \[]', caplog) + assert log_has_re(r"Whitelist with 0 pairs: \[]", caplog) -@pytest.mark.parametrize('pairlists,trade_mode,result', [ - ([ - # Get 2 pairs - {"method": "StaticPairList", "allow_inactive": True}, - {"method": "MarketCapPairList", "number_assets": 2} - ], 'spot', ['BTC/USDT', 'ETH/USDT']), - ([ - # Get 6 pairs - {"method": "StaticPairList", "allow_inactive": True}, - {"method": "MarketCapPairList", "number_assets": 6} - ], 'spot', ['BTC/USDT', 'ETH/USDT', 'XRP/USDT', 'ADA/USDT']), - ([ - # Get 3 pairs within top 6 ranks - {"method": "StaticPairList", "allow_inactive": True}, - {"method": "MarketCapPairList", "max_rank": 6, "number_assets": 3} - ], 'spot', ['BTC/USDT', 'ETH/USDT', 'XRP/USDT']), - - ([ - # Get 4 pairs within top 8 ranks - {"method": "StaticPairList", "allow_inactive": True}, - {"method": "MarketCapPairList", "max_rank": 8, "number_assets": 4} - ], 'spot', ['BTC/USDT', 'ETH/USDT', 'XRP/USDT']), - ([ - # MarketCapPairList as generator - {"method": "MarketCapPairList", "number_assets": 5} - ], 'spot', ['BTC/USDT', 'ETH/USDT', 'XRP/USDT']), - ([ - # MarketCapPairList as generator - low max_rank - {"method": "MarketCapPairList", "max_rank": 2, "number_assets": 5} - ], 'spot', ['BTC/USDT', 'ETH/USDT']), - ([ - # MarketCapPairList as generator - futures - low max_rank - {"method": "MarketCapPairList", "max_rank": 2, "number_assets": 5} - ], 'futures', ['ETH/USDT:USDT']), - ([ - # MarketCapPairList as generator - futures - low number_assets - {"method": "MarketCapPairList", "number_assets": 2} - ], 'futures', ['ETH/USDT:USDT', 'ADA/USDT:USDT']), -]) +@pytest.mark.parametrize( + "pairlists,trade_mode,result", + [ + ( + [ + # Get 2 pairs + {"method": "StaticPairList", "allow_inactive": True}, + {"method": "MarketCapPairList", "number_assets": 2}, + ], + "spot", + ["BTC/USDT", "ETH/USDT"], + ), + ( + [ + # Get 6 pairs + {"method": "StaticPairList", "allow_inactive": True}, + {"method": "MarketCapPairList", "number_assets": 6}, + ], + "spot", + ["BTC/USDT", "ETH/USDT", "XRP/USDT", "ADA/USDT"], + ), + ( + [ + # Get 3 pairs within top 6 ranks + {"method": "StaticPairList", "allow_inactive": True}, + {"method": "MarketCapPairList", "max_rank": 6, "number_assets": 3}, + ], + "spot", + ["BTC/USDT", "ETH/USDT", "XRP/USDT"], + ), + ( + [ + # Get 4 pairs within top 8 ranks + {"method": "StaticPairList", "allow_inactive": True}, + {"method": "MarketCapPairList", "max_rank": 8, "number_assets": 4}, + ], + "spot", + ["BTC/USDT", "ETH/USDT", "XRP/USDT"], + ), + ( + [ + # MarketCapPairList as generator + {"method": "MarketCapPairList", "number_assets": 5} + ], + "spot", + ["BTC/USDT", "ETH/USDT", "XRP/USDT"], + ), + ( + [ + # MarketCapPairList as generator - low max_rank + {"method": "MarketCapPairList", "max_rank": 2, "number_assets": 5} + ], + "spot", + ["BTC/USDT", "ETH/USDT"], + ), + ( + [ + # MarketCapPairList as generator - futures - low max_rank + {"method": "MarketCapPairList", "max_rank": 2, "number_assets": 5} + ], + "futures", + ["ETH/USDT:USDT"], + ), + ( + [ + # MarketCapPairList as generator - futures - low number_assets + {"method": "MarketCapPairList", "number_assets": 2} + ], + "futures", + ["ETH/USDT:USDT", "ADA/USDT:USDT"], + ), + ], +) def test_MarketCapPairList_filter( - mocker, default_conf_usdt, trade_mode, markets, pairlists, result + mocker, default_conf_usdt, trade_mode, markets, pairlists, result ): test_value = [ {"symbol": "btc"}, @@ -1704,17 +2297,20 @@ def test_MarketCapPairList_filter( {"symbol": "avax"}, ] - default_conf_usdt['trading_mode'] = trade_mode - if trade_mode == 'spot': - default_conf_usdt['exchange']['pair_whitelist'].extend(['BTC/USDT', 'ETC/USDT', 'ADA/USDT']) - default_conf_usdt['pairlists'] = pairlists - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - ) + default_conf_usdt["trading_mode"] = trade_mode + if trade_mode == "spot": + default_conf_usdt["exchange"]["pair_whitelist"].extend(["BTC/USDT", "ETC/USDT", "ADA/USDT"]) + default_conf_usdt["pairlists"] = pairlists + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + ) - mocker.patch("freqtrade.plugins.pairlist.MarketCapPairList.CoinGeckoAPI.get_coins_markets", - return_value=test_value) + mocker.patch( + "freqtrade.plugins.pairlist.MarketCapPairList.FtCoinGeckoApi.get_coins_markets", + return_value=test_value, + ) exchange = get_patched_exchange(mocker, default_conf_usdt) @@ -1738,18 +2334,21 @@ def test_MarketCapPairList_timing(mocker, default_conf_usdt, markets, time_machi {"symbol": "avax"}, ] - default_conf_usdt['trading_mode'] = 'spot' - default_conf_usdt['exchange']['pair_whitelist'].extend(['BTC/USDT', 'ETC/USDT', 'ADA/USDT']) - default_conf_usdt['pairlists'] = [{"method": "MarketCapPairList", "number_assets": 2}] + default_conf_usdt["trading_mode"] = "spot" + default_conf_usdt["exchange"]["pair_whitelist"].extend(["BTC/USDT", "ETC/USDT", "ADA/USDT"]) + default_conf_usdt["pairlists"] = [{"method": "MarketCapPairList", "number_assets": 2}] markets_mock = MagicMock(return_value=markets) - mocker.patch.multiple(EXMS, - get_markets=markets_mock, - exchange_has=MagicMock(return_value=True), - ) + mocker.patch.multiple( + EXMS, + get_markets=markets_mock, + exchange_has=MagicMock(return_value=True), + ) - mocker.patch("freqtrade.plugins.pairlist.MarketCapPairList.CoinGeckoAPI.get_coins_markets", - return_value=test_value) + mocker.patch( + "freqtrade.plugins.pairlist.MarketCapPairList.FtCoinGeckoApi.get_coins_markets", + return_value=test_value, + ) start_dt = dt_now() @@ -1774,17 +2373,17 @@ def test_MarketCapPairList_timing(mocker, default_conf_usdt, markets, time_machi assert markets_mock.call_count == 3 -def test_MarketCapPairList_exceptions(mocker, default_conf_usdt, markets, time_machine): - +def test_MarketCapPairList_exceptions(mocker, default_conf_usdt): exchange = get_patched_exchange(mocker, default_conf_usdt) - default_conf_usdt['pairlists'] = [{"method": "MarketCapPairList"}] + default_conf_usdt["pairlists"] = [{"method": "MarketCapPairList"}] with pytest.raises(OperationalException, match=r"`number_assets` not specified.*"): # No number_assets PairListManager(exchange, default_conf_usdt) - default_conf_usdt['pairlists'] = [{ - "method": "MarketCapPairList", 'number_assets': 20, 'max_rank': 260 - }] - with pytest.raises(OperationalException, - match="This filter only support marketcap rank up to 250."): + default_conf_usdt["pairlists"] = [ + {"method": "MarketCapPairList", "number_assets": 20, "max_rank": 260} + ] + with pytest.raises( + OperationalException, match="This filter only support marketcap rank up to 250." + ): PairListManager(exchange, default_conf_usdt) diff --git a/tests/plugins/test_pairlocks.py b/tests/plugins/test_pairlocks.py index 6e209df60..0102079fe 100644 --- a/tests/plugins/test_pairlocks.py +++ b/tests/plugins/test_pairlocks.py @@ -7,10 +7,10 @@ from freqtrade.persistence.models import PairLock from freqtrade.util import dt_now -@pytest.mark.parametrize('use_db', (False, True)) +@pytest.mark.parametrize("use_db", (False, True)) @pytest.mark.usefixtures("init_persistence") def test_PairLocks(use_db): - PairLocks.timeframe = '5m' + PairLocks.timeframe = "5m" PairLocks.use_db = use_db # No lock should be present if use_db: @@ -18,28 +18,28 @@ def test_PairLocks(use_db): assert PairLocks.use_db == use_db - pair = 'ETH/BTC' + pair = "ETH/BTC" assert not PairLocks.is_pair_locked(pair) PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4)) # ETH/BTC locked for 4 minutes (on both sides) assert PairLocks.is_pair_locked(pair) - assert PairLocks.is_pair_locked(pair, side='long') - assert PairLocks.is_pair_locked(pair, side='short') + assert PairLocks.is_pair_locked(pair, side="long") + assert PairLocks.is_pair_locked(pair, side="short") - pair = 'BNB/BTC' - PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4), side='long') + pair = "BNB/BTC" + PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4), side="long") assert not PairLocks.is_pair_locked(pair) - assert PairLocks.is_pair_locked(pair, side='long') - assert not PairLocks.is_pair_locked(pair, side='short') + assert PairLocks.is_pair_locked(pair, side="long") + assert not PairLocks.is_pair_locked(pair, side="short") - pair = 'BNB/USDT' - PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4), side='short') + pair = "BNB/USDT" + PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4), side="short") assert not PairLocks.is_pair_locked(pair) - assert not PairLocks.is_pair_locked(pair, side='long') - assert PairLocks.is_pair_locked(pair, side='short') + assert not PairLocks.is_pair_locked(pair, side="long") + assert PairLocks.is_pair_locked(pair, side="short") # XRP/BTC should not be locked now - pair = 'XRP/BTC' + pair = "XRP/BTC" assert not PairLocks.is_pair_locked(pair) # Unlocking a pair that's not locked should not raise an error PairLocks.unlock_pair(pair) @@ -52,12 +52,12 @@ def test_PairLocks(use_db): assert len(locks) == 2 # Unlock original pair - pair = 'ETH/BTC' + pair = "ETH/BTC" PairLocks.unlock_pair(pair) assert not PairLocks.is_pair_locked(pair) assert not PairLocks.is_global_lock() - pair = 'BTC/USDT' + pair = "BTC/USDT" # Lock until 14:30 lock_time = datetime(2020, 5, 1, 14, 30, 0, tzinfo=timezone.utc) PairLocks.lock_pair(pair, lock_time) @@ -73,18 +73,18 @@ def test_PairLocks(use_db): locks = PairLocks.get_pair_locks(pair, lock_time + timedelta(minutes=-2)) assert len(locks) == 1 - assert 'PairLock' in str(locks[0]) + assert "PairLock" in str(locks[0]) # Unlock all PairLocks.unlock_pair(pair, lock_time + timedelta(minutes=-2)) assert not PairLocks.is_global_lock(lock_time + timedelta(minutes=-50)) # Global lock - PairLocks.lock_pair('*', lock_time) + PairLocks.lock_pair("*", lock_time) assert PairLocks.is_global_lock(lock_time + timedelta(minutes=-50)) # Global lock also locks every pair separately assert PairLocks.is_pair_locked(pair, lock_time + timedelta(minutes=-50)) - assert PairLocks.is_pair_locked('XRP/USDT', lock_time + timedelta(minutes=-50)) + assert PairLocks.is_pair_locked("XRP/USDT", lock_time + timedelta(minutes=-50)) if use_db: locks = PairLocks.get_all_locks() @@ -100,10 +100,10 @@ def test_PairLocks(use_db): PairLocks.use_db = True -@pytest.mark.parametrize('use_db', (False, True)) +@pytest.mark.parametrize("use_db", (False, True)) @pytest.mark.usefixtures("init_persistence") def test_PairLocks_getlongestlock(use_db): - PairLocks.timeframe = '5m' + PairLocks.timeframe = "5m" # No lock should be present PairLocks.use_db = use_db if use_db: @@ -111,7 +111,7 @@ def test_PairLocks_getlongestlock(use_db): assert PairLocks.use_db == use_db - pair = 'ETH/BTC' + pair = "ETH/BTC" assert not PairLocks.is_pair_locked(pair) PairLocks.lock_pair(pair, dt_now() + timedelta(minutes=4)) # ETH/BTC locked for 4 minutes @@ -132,10 +132,10 @@ def test_PairLocks_getlongestlock(use_db): PairLocks.use_db = True -@pytest.mark.parametrize('use_db', (False, True)) +@pytest.mark.parametrize("use_db", (False, True)) @pytest.mark.usefixtures("init_persistence") def test_PairLocks_reason(use_db): - PairLocks.timeframe = '5m' + PairLocks.timeframe = "5m" PairLocks.use_db = use_db # No lock should be present if use_db: @@ -143,15 +143,15 @@ def test_PairLocks_reason(use_db): assert PairLocks.use_db == use_db - PairLocks.lock_pair('XRP/USDT', dt_now() + timedelta(minutes=4), 'TestLock1') - PairLocks.lock_pair('ETH/USDT', dt_now() + timedelta(minutes=4), 'TestLock2') + PairLocks.lock_pair("XRP/USDT", dt_now() + timedelta(minutes=4), "TestLock1") + PairLocks.lock_pair("ETH/USDT", dt_now() + timedelta(minutes=4), "TestLock2") - assert PairLocks.is_pair_locked('XRP/USDT') - assert PairLocks.is_pair_locked('ETH/USDT') + assert PairLocks.is_pair_locked("XRP/USDT") + assert PairLocks.is_pair_locked("ETH/USDT") - PairLocks.unlock_reason('TestLock1') - assert not PairLocks.is_pair_locked('XRP/USDT') - assert PairLocks.is_pair_locked('ETH/USDT') + PairLocks.unlock_reason("TestLock1") + assert not PairLocks.is_pair_locked("XRP/USDT") + assert PairLocks.is_pair_locked("ETH/USDT") PairLocks.reset_locks() PairLocks.use_db = True diff --git a/tests/plugins/test_protections.py b/tests/plugins/test_protections.py index 0228910f2..c8a8fdf20 100644 --- a/tests/plugins/test_protections.py +++ b/tests/plugins/test_protections.py @@ -11,12 +11,16 @@ from freqtrade.plugins.protectionmanager import ProtectionManager from tests.conftest import get_patched_freqtradebot, log_has_re -def generate_mock_trade(pair: str, fee: float, is_open: bool, - exit_reason: str = ExitType.EXIT_SIGNAL, - min_ago_open: int = None, min_ago_close: int = None, - profit_rate: float = 0.9, - is_short: bool = False, - ): +def generate_mock_trade( + pair: str, + fee: float, + is_open: bool, + exit_reason: str = ExitType.EXIT_SIGNAL, + min_ago_open: int = None, + min_ago_close: int = None, + profit_rate: float = 0.9, + is_short: bool = False, +): open_rate = random.random() trade = Trade( @@ -29,32 +33,15 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool, open_rate=open_rate, is_open=is_open, amount=0.01 / open_rate, - exchange='binance', + exchange="binance", is_short=is_short, leverage=1, ) - trade.orders.append(Order( - ft_order_side=trade.entry_side, - order_id=f'{pair}-{trade.entry_side}-{trade.open_date}', - ft_is_open=False, - ft_pair=pair, - ft_amount=trade.amount, - ft_price=trade.open_rate, - amount=trade.amount, - filled=trade.amount, - remaining=0, - price=open_rate, - average=open_rate, - status="closed", - order_type="market", - side=trade.entry_side, - )) - if not is_open: - close_price = open_rate * (2 - profit_rate if is_short else profit_rate) - trade.orders.append(Order( - ft_order_side=trade.exit_side, - order_id=f'{pair}-{trade.exit_side}-{trade.close_date}', + trade.orders.append( + Order( + ft_order_side=trade.entry_side, + order_id=f"{pair}-{trade.entry_side}-{trade.open_date}", ft_is_open=False, ft_pair=pair, ft_amount=trade.amount, @@ -62,12 +49,33 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool, amount=trade.amount, filled=trade.amount, remaining=0, - price=close_price, - average=close_price, + price=open_rate, + average=open_rate, status="closed", order_type="market", - side=trade.exit_side, - )) + side=trade.entry_side, + ) + ) + if not is_open: + close_price = open_rate * (2 - profit_rate if is_short else profit_rate) + trade.orders.append( + Order( + ft_order_side=trade.exit_side, + order_id=f"{pair}-{trade.exit_side}-{trade.close_date}", + ft_is_open=False, + ft_pair=pair, + ft_amount=trade.amount, + ft_price=trade.open_rate, + amount=trade.amount, + filled=trade.amount, + remaining=0, + price=close_price, + average=close_price, + status="closed", + order_type="market", + side=trade.exit_side, + ) + ) trade.recalc_open_trade_value() if not is_open: @@ -80,54 +88,79 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool, def test_protectionmanager(mocker, default_conf): - default_conf['protections'] = [{'method': protection} - for protection in constants.AVAILABLE_PROTECTIONS] + default_conf["protections"] = [ + {"method": protection} for protection in constants.AVAILABLE_PROTECTIONS + ] freqtrade = get_patched_freqtradebot(mocker, default_conf) for handler in freqtrade.protections._protection_handlers: assert handler.name in constants.AVAILABLE_PROTECTIONS if not handler.has_global_stop: - assert handler.global_stop(datetime.now(timezone.utc), '*') is None + assert handler.global_stop(datetime.now(timezone.utc), "*") is None if not handler.has_local_stop: - assert handler.stop_per_pair('XRP/BTC', datetime.now(timezone.utc), '*') is None + assert handler.stop_per_pair("XRP/BTC", datetime.now(timezone.utc), "*") is None -@pytest.mark.parametrize('timeframe,expected,protconf', [ - ('1m', [20, 10], - [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}]), - ('5m', [100, 15], - [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 15}]), - ('1h', [1200, 40], - [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 40}]), - ('1d', [1440, 5], - [{"method": "StoplossGuard", "lookback_period_candles": 1, "stop_duration": 5}]), - ('1m', [20, 5], - [{"method": "StoplossGuard", "lookback_period": 20, "stop_duration_candles": 5}]), - ('5m', [15, 25], - [{"method": "StoplossGuard", "lookback_period": 15, "stop_duration_candles": 5}]), - ('1h', [50, 600], - [{"method": "StoplossGuard", "lookback_period": 50, "stop_duration_candles": 10}]), - ('1h', [60, 540], - [{"method": "StoplossGuard", "lookback_period_candles": 1, "stop_duration_candles": 9}]), -]) +@pytest.mark.parametrize( + "timeframe,expected,protconf", + [ + ( + "1m", + [20, 10], + [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], + ), + ( + "5m", + [100, 15], + [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 15}], + ), + ( + "1h", + [1200, 40], + [{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 40}], + ), + ( + "1d", + [1440, 5], + [{"method": "StoplossGuard", "lookback_period_candles": 1, "stop_duration": 5}], + ), + ( + "1m", + [20, 5], + [{"method": "StoplossGuard", "lookback_period": 20, "stop_duration_candles": 5}], + ), + ( + "5m", + [15, 25], + [{"method": "StoplossGuard", "lookback_period": 15, "stop_duration_candles": 5}], + ), + ( + "1h", + [50, 600], + [{"method": "StoplossGuard", "lookback_period": 50, "stop_duration_candles": 10}], + ), + ( + "1h", + [60, 540], + [{"method": "StoplossGuard", "lookback_period_candles": 1, "stop_duration_candles": 9}], + ), + ], +) def test_protections_init(default_conf, timeframe, expected, protconf): - default_conf['timeframe'] = timeframe + default_conf["timeframe"] = timeframe man = ProtectionManager(default_conf, protconf) assert len(man._protection_handlers) == len(protconf) assert man._protection_handlers[0]._lookback_period == expected[0] assert man._protection_handlers[0]._stop_duration == expected[1] -@pytest.mark.parametrize('is_short', [False, True]) +@pytest.mark.parametrize("is_short", [False, True]) @pytest.mark.usefixtures("init_persistence") def test_stoploss_guard(mocker, default_conf, fee, caplog, is_short): # Active for both sides (long and short) - default_conf['protections'] = [{ - "method": "StoplossGuard", - "lookback_period": 60, - "stop_duration": 40, - "trade_limit": 3 - }] + default_conf["protections"] = [ + {"method": "StoplossGuard", "lookback_period": 60, "stop_duration": 40, "trade_limit": 3} + ] freqtrade = get_patched_freqtradebot(mocker, default_conf) message = r"Trading stopped due to .*" assert not freqtrade.protections.global_stop() @@ -135,8 +168,13 @@ def test_stoploss_guard(mocker, default_conf, fee, caplog, is_short): caplog.clear() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=200, min_ago_close=30, is_short=is_short, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=200, + min_ago_close=30, + is_short=is_short, ) assert not freqtrade.protections.global_stop() @@ -144,13 +182,23 @@ def test_stoploss_guard(mocker, default_conf, fee, caplog, is_short): caplog.clear() # This trade does not count, as it's closed too long ago generate_mock_trade( - 'BCH/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=250, min_ago_close=100, is_short=is_short, + "BCH/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=250, + min_ago_close=100, + is_short=is_short, ) generate_mock_trade( - 'ETH/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=240, min_ago_close=30, is_short=is_short, + "ETH/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=240, + min_ago_close=30, + is_short=is_short, ) # 3 Trades closed - but the 2nd has been closed too long ago. assert not freqtrade.protections.global_stop() @@ -158,8 +206,13 @@ def test_stoploss_guard(mocker, default_conf, fee, caplog, is_short): caplog.clear() generate_mock_trade( - 'LTC/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=180, min_ago_close=30, is_short=is_short, + "LTC/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=180, + min_ago_close=30, + is_short=is_short, ) assert freqtrade.protections.global_stop() @@ -168,36 +221,44 @@ def test_stoploss_guard(mocker, default_conf, fee, caplog, is_short): # Test 5m after lock-period - this should try and relock the pair, but end-time # should be the previous end-time - end_time = PairLocks.get_pair_longest_lock('*').lock_end_time + timedelta(minutes=5) + end_time = PairLocks.get_pair_longest_lock("*").lock_end_time + timedelta(minutes=5) freqtrade.protections.global_stop(end_time) assert not PairLocks.is_global_lock(end_time) -@pytest.mark.parametrize('only_per_pair', [False, True]) -@pytest.mark.parametrize('only_per_side', [False, True]) +@pytest.mark.parametrize("only_per_pair", [False, True]) +@pytest.mark.parametrize("only_per_side", [False, True]) @pytest.mark.usefixtures("init_persistence") def test_stoploss_guard_perpair(mocker, default_conf, fee, caplog, only_per_pair, only_per_side): - default_conf['protections'] = [{ - "method": "StoplossGuard", - "lookback_period": 60, - "trade_limit": 2, - "stop_duration": 60, - "only_per_pair": only_per_pair, - "only_per_side": only_per_side, - }] - check_side = 'long' if only_per_side else '*' + default_conf["protections"] = [ + { + "method": "StoplossGuard", + "lookback_period": 60, + "trade_limit": 2, + "stop_duration": 60, + "only_per_pair": only_per_pair, + "only_per_side": only_per_side, + } + ] + check_side = "long" if only_per_side else "*" is_short = False freqtrade = get_patched_freqtradebot(mocker, default_conf) message = r"Trading stopped due to .*" - pair = 'XRP/BTC' + pair = "XRP/BTC" assert not freqtrade.protections.stop_per_pair(pair) assert not freqtrade.protections.global_stop() assert not log_has_re(message, caplog) caplog.clear() generate_mock_trade( - pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=200, min_ago_close=30, profit_rate=0.9, is_short=is_short + pair, + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=200, + min_ago_close=30, + profit_rate=0.9, + is_short=is_short, ) assert not freqtrade.protections.stop_per_pair(pair) @@ -206,13 +267,25 @@ def test_stoploss_guard_perpair(mocker, default_conf, fee, caplog, only_per_pair caplog.clear() # This trade does not count, as it's closed too long ago generate_mock_trade( - pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=250, min_ago_close=100, profit_rate=0.9, is_short=is_short + pair, + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=250, + min_ago_close=100, + profit_rate=0.9, + is_short=is_short, ) # Trade does not count for per pair stop as it's the wrong pair. generate_mock_trade( - 'ETH/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=240, min_ago_close=30, profit_rate=0.9, is_short=is_short + "ETH/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=240, + min_ago_close=30, + profit_rate=0.9, + is_short=is_short, ) # 3 Trades closed - but the 2nd has been closed too long ago. assert not freqtrade.protections.stop_per_pair(pair) @@ -226,23 +299,35 @@ def test_stoploss_guard_perpair(mocker, default_conf, fee, caplog, only_per_pair # Trade does not count potentially, as it's in the wrong direction generate_mock_trade( - pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=150, min_ago_close=25, profit_rate=0.9, is_short=not is_short + pair, + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=150, + min_ago_close=25, + profit_rate=0.9, + is_short=not is_short, ) freqtrade.protections.stop_per_pair(pair) assert freqtrade.protections.global_stop() != only_per_pair assert PairLocks.is_pair_locked(pair, side=check_side) != (only_per_side and only_per_pair) assert PairLocks.is_global_lock(side=check_side) != only_per_pair if only_per_side: - assert not PairLocks.is_pair_locked(pair, side='*') - assert not PairLocks.is_global_lock(side='*') + assert not PairLocks.is_pair_locked(pair, side="*") + assert not PairLocks.is_global_lock(side="*") caplog.clear() # 2nd Trade that counts with correct pair generate_mock_trade( - pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=180, min_ago_close=31, profit_rate=0.9, is_short=is_short + pair, + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=180, + min_ago_close=31, + profit_rate=0.9, + is_short=is_short, ) freqtrade.protections.stop_per_pair(pair) @@ -250,174 +335,239 @@ def test_stoploss_guard_perpair(mocker, default_conf, fee, caplog, only_per_pair assert PairLocks.is_pair_locked(pair, side=check_side) assert PairLocks.is_global_lock(side=check_side) != only_per_pair if only_per_side: - assert not PairLocks.is_pair_locked(pair, side='*') - assert not PairLocks.is_global_lock(side='*') + assert not PairLocks.is_pair_locked(pair, side="*") + assert not PairLocks.is_global_lock(side="*") @pytest.mark.usefixtures("init_persistence") def test_CooldownPeriod(mocker, default_conf, fee, caplog): - default_conf['protections'] = [{ - "method": "CooldownPeriod", - "stop_duration": 60, - }] + default_conf["protections"] = [ + { + "method": "CooldownPeriod", + "stop_duration": 60, + } + ] freqtrade = get_patched_freqtradebot(mocker, default_conf) message = r"Trading stopped due to .*" assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") assert not log_has_re(message, caplog) caplog.clear() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=200, min_ago_close=30, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=200, + min_ago_close=30, ) assert not freqtrade.protections.global_stop() - assert freqtrade.protections.stop_per_pair('XRP/BTC') - assert PairLocks.is_pair_locked('XRP/BTC') + assert freqtrade.protections.stop_per_pair("XRP/BTC") + assert PairLocks.is_pair_locked("XRP/BTC") assert not PairLocks.is_global_lock() generate_mock_trade( - 'ETH/BTC', fee.return_value, False, exit_reason=ExitType.ROI.value, - min_ago_open=205, min_ago_close=35, + "ETH/BTC", + fee.return_value, + False, + exit_reason=ExitType.ROI.value, + min_ago_open=205, + min_ago_close=35, ) assert not freqtrade.protections.global_stop() - assert not PairLocks.is_pair_locked('ETH/BTC') - assert freqtrade.protections.stop_per_pair('ETH/BTC') - assert PairLocks.is_pair_locked('ETH/BTC') + assert not PairLocks.is_pair_locked("ETH/BTC") + assert freqtrade.protections.stop_per_pair("ETH/BTC") + assert PairLocks.is_pair_locked("ETH/BTC") assert not PairLocks.is_global_lock() -@pytest.mark.parametrize('only_per_side', [False, True]) +@pytest.mark.parametrize("only_per_side", [False, True]) @pytest.mark.usefixtures("init_persistence") def test_LowProfitPairs(mocker, default_conf, fee, caplog, only_per_side): - default_conf['protections'] = [{ - "method": "LowProfitPairs", - "lookback_period": 400, - "stop_duration": 60, - "trade_limit": 2, - "required_profit": 0.0, - "only_per_side": only_per_side, - }] + default_conf["protections"] = [ + { + "method": "LowProfitPairs", + "lookback_period": 400, + "stop_duration": 60, + "trade_limit": 2, + "required_profit": 0.0, + "only_per_side": only_per_side, + } + ] freqtrade = get_patched_freqtradebot(mocker, default_conf) message = r"Trading stopped due to .*" assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") assert not log_has_re(message, caplog) caplog.clear() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=800, min_ago_close=450, profit_rate=0.9, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=800, + min_ago_close=450, + profit_rate=0.9, ) Trade.commit() # Not locked with 1 trade assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') - assert not PairLocks.is_pair_locked('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") + assert not PairLocks.is_pair_locked("XRP/BTC") assert not PairLocks.is_global_lock() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=200, min_ago_close=120, profit_rate=0.9, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=200, + min_ago_close=120, + profit_rate=0.9, ) Trade.commit() # Not locked with 1 trade (first trade is outside of lookback_period) assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') - assert not PairLocks.is_pair_locked('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") + assert not PairLocks.is_pair_locked("XRP/BTC") assert not PairLocks.is_global_lock() # Add positive trade generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.ROI.value, - min_ago_open=20, min_ago_close=10, profit_rate=1.15, is_short=True + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.ROI.value, + min_ago_open=20, + min_ago_close=10, + profit_rate=1.15, + is_short=True, ) Trade.commit() - assert freqtrade.protections.stop_per_pair('XRP/BTC') != only_per_side - assert not PairLocks.is_pair_locked('XRP/BTC', side='*') - assert PairLocks.is_pair_locked('XRP/BTC', side='long') == only_per_side + assert freqtrade.protections.stop_per_pair("XRP/BTC") != only_per_side + assert not PairLocks.is_pair_locked("XRP/BTC", side="*") + assert PairLocks.is_pair_locked("XRP/BTC", side="long") == only_per_side generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=110, min_ago_close=21, profit_rate=0.8, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=110, + min_ago_close=21, + profit_rate=0.8, ) Trade.commit() # Locks due to 2nd trade assert freqtrade.protections.global_stop() != only_per_side - assert freqtrade.protections.stop_per_pair('XRP/BTC') != only_per_side - assert PairLocks.is_pair_locked('XRP/BTC', side='long') - assert PairLocks.is_pair_locked('XRP/BTC', side='*') != only_per_side + assert freqtrade.protections.stop_per_pair("XRP/BTC") != only_per_side + assert PairLocks.is_pair_locked("XRP/BTC", side="long") + assert PairLocks.is_pair_locked("XRP/BTC", side="*") != only_per_side assert not PairLocks.is_global_lock() Trade.commit() @pytest.mark.usefixtures("init_persistence") def test_MaxDrawdown(mocker, default_conf, fee, caplog): - default_conf['protections'] = [{ - "method": "MaxDrawdown", - "lookback_period": 1000, - "stop_duration": 60, - "trade_limit": 3, - "max_allowed_drawdown": 0.15 - }] + default_conf["protections"] = [ + { + "method": "MaxDrawdown", + "lookback_period": 1000, + "stop_duration": 60, + "trade_limit": 3, + "max_allowed_drawdown": 0.15, + } + ] freqtrade = get_patched_freqtradebot(mocker, default_conf) message = r"Trading stopped due to Max.*" assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") caplog.clear() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=1000, min_ago_close=900, profit_rate=1.1, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=1000, + min_ago_close=900, + profit_rate=1.1, ) generate_mock_trade( - 'ETH/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=1000, min_ago_close=900, profit_rate=1.1, + "ETH/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=1000, + min_ago_close=900, + profit_rate=1.1, ) generate_mock_trade( - 'NEO/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=1000, min_ago_close=900, profit_rate=1.1, + "NEO/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=1000, + min_ago_close=900, + profit_rate=1.1, ) Trade.commit() # No losing trade yet ... so max_drawdown will raise exception assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=500, min_ago_close=400, profit_rate=0.9, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=500, + min_ago_close=400, + profit_rate=0.9, ) # Not locked with one trade assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') - assert not PairLocks.is_pair_locked('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") + assert not PairLocks.is_pair_locked("XRP/BTC") assert not PairLocks.is_global_lock() generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, - min_ago_open=1200, min_ago_close=1100, profit_rate=0.5, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.STOP_LOSS.value, + min_ago_open=1200, + min_ago_close=1100, + profit_rate=0.5, ) Trade.commit() # Not locked with 1 trade (2nd trade is outside of lookback_period) assert not freqtrade.protections.global_stop() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') - assert not PairLocks.is_pair_locked('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") + assert not PairLocks.is_pair_locked("XRP/BTC") assert not PairLocks.is_global_lock() assert not log_has_re(message, caplog) # Winning trade ... (should not lock, does not change drawdown!) generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.ROI.value, - min_ago_open=320, min_ago_close=410, profit_rate=1.5, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.ROI.value, + min_ago_open=320, + min_ago_close=410, + profit_rate=1.5, ) Trade.commit() assert not freqtrade.protections.global_stop() @@ -427,63 +577,89 @@ def test_MaxDrawdown(mocker, default_conf, fee, caplog): # Add additional negative trade, causing a loss of > 15% generate_mock_trade( - 'XRP/BTC', fee.return_value, False, exit_reason=ExitType.ROI.value, - min_ago_open=20, min_ago_close=10, profit_rate=0.8, + "XRP/BTC", + fee.return_value, + False, + exit_reason=ExitType.ROI.value, + min_ago_open=20, + min_ago_close=10, + profit_rate=0.8, ) Trade.commit() - assert not freqtrade.protections.stop_per_pair('XRP/BTC') + assert not freqtrade.protections.stop_per_pair("XRP/BTC") # local lock not supported - assert not PairLocks.is_pair_locked('XRP/BTC') + assert not PairLocks.is_pair_locked("XRP/BTC") assert freqtrade.protections.global_stop() assert PairLocks.is_global_lock() assert log_has_re(message, caplog) -@pytest.mark.parametrize("protectionconf,desc_expected,exception_expected", [ - ({"method": "StoplossGuard", "lookback_period": 60, "trade_limit": 2, "stop_duration": 60}, - "[{'StoplossGuard': 'StoplossGuard - Frequent Stoploss Guard, " - "2 stoplosses with profit < 0.00% within 60 minutes.'}]", - None - ), - ({"method": "CooldownPeriod", "stop_duration": 60}, - "[{'CooldownPeriod': 'CooldownPeriod - Cooldown period of 60 minutes.'}]", - None - ), - ({"method": "LowProfitPairs", "lookback_period": 60, "stop_duration": 60}, - "[{'LowProfitPairs': 'LowProfitPairs - Low Profit Protection, locks pairs with " - "profit < 0.0 within 60 minutes.'}]", - None - ), - ({"method": "MaxDrawdown", "lookback_period": 60, "stop_duration": 60}, - "[{'MaxDrawdown': 'MaxDrawdown - Max drawdown protection, stop trading if drawdown is > 0.0 " - "within 60 minutes.'}]", - None - ), - ({"method": "StoplossGuard", "lookback_period_candles": 12, "trade_limit": 2, - "required_profit": -0.05, "stop_duration": 60}, - "[{'StoplossGuard': 'StoplossGuard - Frequent Stoploss Guard, " - "2 stoplosses with profit < -5.00% within 12 candles.'}]", - None - ), - ({"method": "CooldownPeriod", "stop_duration_candles": 5}, - "[{'CooldownPeriod': 'CooldownPeriod - Cooldown period of 5 candles.'}]", - None - ), - ({"method": "LowProfitPairs", "lookback_period_candles": 11, "stop_duration": 60}, - "[{'LowProfitPairs': 'LowProfitPairs - Low Profit Protection, locks pairs with " - "profit < 0.0 within 11 candles.'}]", - None - ), - ({"method": "MaxDrawdown", "lookback_period_candles": 20, "stop_duration": 60}, - "[{'MaxDrawdown': 'MaxDrawdown - Max drawdown protection, stop trading if drawdown is > 0.0 " - "within 20 candles.'}]", - None - ), -]) -def test_protection_manager_desc(mocker, default_conf, protectionconf, - desc_expected, exception_expected): - - default_conf['protections'] = [protectionconf] +@pytest.mark.parametrize( + "protectionconf,desc_expected,exception_expected", + [ + ( + { + "method": "StoplossGuard", + "lookback_period": 60, + "trade_limit": 2, + "stop_duration": 60, + }, + "[{'StoplossGuard': 'StoplossGuard - Frequent Stoploss Guard, " + "2 stoplosses with profit < 0.00% within 60 minutes.'}]", + None, + ), + ( + {"method": "CooldownPeriod", "stop_duration": 60}, + "[{'CooldownPeriod': 'CooldownPeriod - Cooldown period of 60 minutes.'}]", + None, + ), + ( + {"method": "LowProfitPairs", "lookback_period": 60, "stop_duration": 60}, + "[{'LowProfitPairs': 'LowProfitPairs - Low Profit Protection, locks pairs with " + "profit < 0.0 within 60 minutes.'}]", + None, + ), + ( + {"method": "MaxDrawdown", "lookback_period": 60, "stop_duration": 60}, + "[{'MaxDrawdown': 'MaxDrawdown - Max drawdown protection, stop trading " + "if drawdown is > 0.0 within 60 minutes.'}]", + None, + ), + ( + { + "method": "StoplossGuard", + "lookback_period_candles": 12, + "trade_limit": 2, + "required_profit": -0.05, + "stop_duration": 60, + }, + "[{'StoplossGuard': 'StoplossGuard - Frequent Stoploss Guard, " + "2 stoplosses with profit < -5.00% within 12 candles.'}]", + None, + ), + ( + {"method": "CooldownPeriod", "stop_duration_candles": 5}, + "[{'CooldownPeriod': 'CooldownPeriod - Cooldown period of 5 candles.'}]", + None, + ), + ( + {"method": "LowProfitPairs", "lookback_period_candles": 11, "stop_duration": 60}, + "[{'LowProfitPairs': 'LowProfitPairs - Low Profit Protection, locks pairs with " + "profit < 0.0 within 11 candles.'}]", + None, + ), + ( + {"method": "MaxDrawdown", "lookback_period_candles": 20, "stop_duration": 60}, + "[{'MaxDrawdown': 'MaxDrawdown - Max drawdown protection, stop trading " + "if drawdown is > 0.0 within 20 candles.'}]", + None, + ), + ], +) +def test_protection_manager_desc( + mocker, default_conf, protectionconf, desc_expected, exception_expected +): + default_conf["protections"] = [protectionconf] freqtrade = get_patched_freqtradebot(mocker, default_conf) short_desc = str(freqtrade.protections.short_desc()) diff --git a/tests/plugins/test_remotepairlist.py b/tests/plugins/test_remotepairlist.py index 9d407de9f..ed2cd8ac4 100644 --- a/tests/plugins/test_remotepairlist.py +++ b/tests/plugins/test_remotepairlist.py @@ -12,59 +12,57 @@ from tests.conftest import EXMS, get_patched_exchange, get_patched_freqtradebot, @pytest.fixture(scope="function") def rpl_config(default_conf): - default_conf['stake_currency'] = 'USDT' + default_conf["stake_currency"] = "USDT" - default_conf['exchange']['pair_whitelist'] = [ - 'ETH/USDT', - 'XRP/USDT', - ] - default_conf['exchange']['pair_blacklist'] = [ - 'BLK/USDT' + default_conf["exchange"]["pair_whitelist"] = [ + "ETH/USDT", + "XRP/USDT", ] + default_conf["exchange"]["pair_blacklist"] = ["BLK/USDT"] return default_conf def test_gen_pairlist_with_local_file(mocker, rpl_config): - mock_file = MagicMock() mock_file.read.return_value = '{"pairs": ["TKN/USDT","ETH/USDT","NANO/USDT"]}' - mocker.patch('freqtrade.plugins.pairlist.RemotePairList.open', return_value=mock_file) + mocker.patch("freqtrade.plugins.pairlist.RemotePairList.open", return_value=mock_file) - mock_file_path = mocker.patch('freqtrade.plugins.pairlist.RemotePairList.Path') + mock_file_path = mocker.patch("freqtrade.plugins.pairlist.RemotePairList.Path") mock_file_path.exists.return_value = True jsonparse = json.loads(mock_file.read.return_value) - mocker.patch('freqtrade.plugins.pairlist.RemotePairList.rapidjson.load', return_value=jsonparse) + mocker.patch("freqtrade.plugins.pairlist.RemotePairList.rapidjson.load", return_value=jsonparse) - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", - 'number_assets': 2, - 'refresh_period': 1800, - 'keep_pairlist_on_failure': True, - 'pairlist_url': 'file:///pairlist.json', - 'bearer_token': '', - 'read_timeout': 60 + "number_assets": 2, + "refresh_period": 1800, + "keep_pairlist_on_failure": True, + "pairlist_url": "file:///pairlist.json", + "bearer_token": "", + "read_timeout": 60, } ] exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config['pairlists'][0], 0) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0 + ) result = remote_pairlist.gen_pairlist([]) - assert result == ['TKN/USDT', 'ETH/USDT'] + assert result == ["TKN/USDT", "ETH/USDT"] def test_fetch_pairlist_mock_response_html(mocker, rpl_config): mock_response = MagicMock() - mock_response.headers = {'content-type': 'text/html'} + mock_response.headers = {"content-type": "text/html"} - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "pairlist_url": "http://example.com/pairlist", @@ -77,17 +75,19 @@ def test_fetch_pairlist_mock_response_html(mocker, rpl_config): exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", - return_value=mock_response) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config['pairlists'][0], 0) + mocker.patch( + "freqtrade.plugins.pairlist.RemotePairList.requests.get", return_value=mock_response + ) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0 + ) - with pytest.raises(OperationalException, match='RemotePairList is not of type JSON.'): + with pytest.raises(OperationalException, match="RemotePairList is not of type JSON."): remote_pairlist.fetch_pairlist() def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog): - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "pairlist_url": "http://example.com/pairlist", @@ -100,25 +100,27 @@ def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog): exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", - side_effect=requests.exceptions.RequestException) + mocker.patch( + "freqtrade.plugins.pairlist.RemotePairList.requests.get", + side_effect=requests.exceptions.RequestException, + ) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config['pairlists'][0], 0) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0 + ) remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"] remote_pairlist._init_done = True - pairlist_url = rpl_config['pairlists'][0]['pairlist_url'] + pairlist_url = rpl_config["pairlists"][0]["pairlist_url"] pairs, _time_elapsed = remote_pairlist.fetch_pairlist() - assert log_has(f'Error: Was not able to fetch pairlist from: ' f'{pairlist_url}', caplog) + assert log_has(f"Error: Was not able to fetch pairlist from: {pairlist_url}", caplog) assert log_has("Keeping last fetched pairlist", caplog) assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"] def test_remote_pairlist_init_no_pairlist_url(mocker, rpl_config): - - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "number_assets": 10, @@ -127,14 +129,16 @@ def test_remote_pairlist_init_no_pairlist_url(mocker, rpl_config): ] get_patched_exchange(mocker, rpl_config) - with pytest.raises(OperationalException, match=r'`pairlist_url` not specified.' - r' Please check your configuration for "pairlist.config.pairlist_url"'): + with pytest.raises( + OperationalException, + match=r"`pairlist_url` not specified." + r' Please check your configuration for "pairlist.config.pairlist_url"', + ): get_patched_freqtradebot(mocker, rpl_config) def test_remote_pairlist_init_no_number_assets(mocker, rpl_config): - - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "pairlist_url": "http://example.com/pairlist", @@ -144,14 +148,16 @@ def test_remote_pairlist_init_no_number_assets(mocker, rpl_config): get_patched_exchange(mocker, rpl_config) - with pytest.raises(OperationalException, match=r'`number_assets` not specified. ' - 'Please check your configuration for "pairlist.config.number_assets"'): + with pytest.raises( + OperationalException, + match=r"`number_assets` not specified. " + 'Please check your configuration for "pairlist.config.number_assets"', + ): get_patched_freqtradebot(mocker, rpl_config) def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): - - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "pairlist_url": "http://example.com/pairlist", @@ -166,21 +172,21 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): mock_response.json.return_value = { "pairs": ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"], - "refresh_period": 60 + "refresh_period": 60, } - mock_response.headers = { - "content-type": "application/json" - } + mock_response.headers = {"content-type": "application/json"} mock_response.elapsed.total_seconds.return_value = 0.4 - mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", - return_value=mock_response) + mocker.patch( + "freqtrade.plugins.pairlist.RemotePairList.requests.get", return_value=mock_response + ) exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config['pairlists'][0], 0) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][0], 0 + ) pairs, time_elapsed = remote_pairlist.fetch_pairlist() assert pairs == ["ETH/USDT", "XRP/USDT", "LTC/USDT", "EOS/USDT"] @@ -189,7 +195,7 @@ def test_fetch_pairlist_mock_response_valid(mocker, rpl_config): def test_remote_pairlist_init_wrong_mode(mocker, rpl_config): - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "mode": "blacklis", @@ -201,11 +207,11 @@ def test_remote_pairlist_init_wrong_mode(mocker, rpl_config): with pytest.raises( OperationalException, - match=r'`mode` not configured correctly. Supported Modes are "whitelist","blacklist"' + match=r'`mode` not configured correctly. Supported Modes are "whitelist","blacklist"', ): get_patched_freqtradebot(mocker, rpl_config) - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "mode": "blacklist", @@ -216,14 +222,13 @@ def test_remote_pairlist_init_wrong_mode(mocker, rpl_config): ] with pytest.raises( - OperationalException, - match=r'A `blacklist` mode RemotePairList can not be.*first.*' + OperationalException, match=r"A `blacklist` mode RemotePairList can not be.*first.*" ): get_patched_freqtradebot(mocker, rpl_config) def test_remote_pairlist_init_wrong_proc_mode(mocker, rpl_config): - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "RemotePairList", "processing_mode": "filler", @@ -237,25 +242,19 @@ def test_remote_pairlist_init_wrong_proc_mode(mocker, rpl_config): get_patched_exchange(mocker, rpl_config) with pytest.raises( OperationalException, - match=r'`processing_mode` not configured correctly. Supported Modes are "filter","append"' + match=r'`processing_mode` not configured correctly. Supported Modes are "filter","append"', ): get_patched_freqtradebot(mocker, rpl_config) def test_remote_pairlist_blacklist(mocker, rpl_config, caplog, markets, tickers): - mock_response = MagicMock() - mock_response.json.return_value = { - "pairs": ["XRP/USDT"], - "refresh_period": 60 - } + mock_response.json.return_value = {"pairs": ["XRP/USDT"], "refresh_period": 60} - mock_response.headers = { - "content-type": "application/json" - } + mock_response.headers = {"content-type": "application/json"} - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "StaticPairList", }, @@ -263,31 +262,34 @@ def test_remote_pairlist_blacklist(mocker, rpl_config, caplog, markets, tickers) "method": "RemotePairList", "mode": "blacklist", "pairlist_url": "http://example.com/pairlist", - "number_assets": 3 - } + "number_assets": 3, + }, ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", - return_value=mock_response) + mocker.patch( + "freqtrade.plugins.pairlist.RemotePairList.requests.get", return_value=mock_response + ) exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config["pairlists"][1], 1) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][1], 1 + ) pairs, _time_elapsed = remote_pairlist.fetch_pairlist() assert pairs == ["XRP/USDT"] - whitelist = remote_pairlist.filter_pairlist(rpl_config['exchange']['pair_whitelist'], {}) + whitelist = remote_pairlist.filter_pairlist(rpl_config["exchange"]["pair_whitelist"], {}) assert whitelist == ["ETH/USDT"] assert log_has(f"Blacklist - Filtered out pairs: {pairs}", caplog) @@ -295,19 +297,13 @@ def test_remote_pairlist_blacklist(mocker, rpl_config, caplog, markets, tickers) @pytest.mark.parametrize("processing_mode", ["filter", "append"]) def test_remote_pairlist_whitelist(mocker, rpl_config, processing_mode, markets, tickers): - mock_response = MagicMock() - mock_response.json.return_value = { - "pairs": ["XRP/USDT"], - "refresh_period": 60 - } + mock_response.json.return_value = {"pairs": ["XRP/USDT"], "refresh_period": 60} - mock_response.headers = { - "content-type": "application/json" - } + mock_response.headers = {"content-type": "application/json"} - rpl_config['pairlists'] = [ + rpl_config["pairlists"] = [ { "method": "StaticPairList", }, @@ -316,29 +312,32 @@ def test_remote_pairlist_whitelist(mocker, rpl_config, processing_mode, markets, "mode": "whitelist", "processing_mode": processing_mode, "pairlist_url": "http://example.com/pairlist", - "number_assets": 3 - } + "number_assets": 3, + }, ] - mocker.patch.multiple(EXMS, - markets=PropertyMock(return_value=markets), - exchange_has=MagicMock(return_value=True), - get_tickers=tickers - ) + mocker.patch.multiple( + EXMS, + markets=PropertyMock(return_value=markets), + exchange_has=MagicMock(return_value=True), + get_tickers=tickers, + ) - mocker.patch("freqtrade.plugins.pairlist.RemotePairList.requests.get", - return_value=mock_response) + mocker.patch( + "freqtrade.plugins.pairlist.RemotePairList.requests.get", return_value=mock_response + ) exchange = get_patched_exchange(mocker, rpl_config) pairlistmanager = PairListManager(exchange, rpl_config) - remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, - rpl_config["pairlists"][1], 1) + remote_pairlist = RemotePairList( + exchange, pairlistmanager, rpl_config, rpl_config["pairlists"][1], 1 + ) pairs, _time_elapsed = remote_pairlist.fetch_pairlist() assert pairs == ["XRP/USDT"] - whitelist = remote_pairlist.filter_pairlist(rpl_config['exchange']['pair_whitelist'], {}) - assert whitelist == (["XRP/USDT"] if processing_mode == "filter" else ['ETH/USDT', 'XRP/USDT']) + whitelist = remote_pairlist.filter_pairlist(rpl_config["exchange"]["pair_whitelist"], {}) + assert whitelist == (["XRP/USDT"] if processing_mode == "filter" else ["ETH/USDT", "XRP/USDT"]) diff --git a/tests/rpc/test_fiat_convert.py b/tests/rpc/test_fiat_convert.py index 717866cfd..061df2e53 100644 --- a/tests/rpc/test_fiat_convert.py +++ b/tests/rpc/test_fiat_convert.py @@ -8,86 +8,98 @@ import pytest from requests.exceptions import RequestException from freqtrade.rpc.fiat_convert import CryptoToFiatConverter +from freqtrade.util.coin_gecko import FtCoinGeckoApi from tests.conftest import log_has, log_has_re -def test_fiat_convert_is_supported(mocker): - fiat_convert = CryptoToFiatConverter() - assert fiat_convert._is_supported_fiat(fiat='USD') is True - assert fiat_convert._is_supported_fiat(fiat='usd') is True - assert fiat_convert._is_supported_fiat(fiat='abc') is False - assert fiat_convert._is_supported_fiat(fiat='ABC') is False +def test_fiat_convert_is_singleton(): + fiat_convert = CryptoToFiatConverter({"a": 22}) + fiat_convert2 = CryptoToFiatConverter({}) + + assert fiat_convert is fiat_convert2 + assert id(fiat_convert) == id(fiat_convert2) + + +def test_fiat_convert_is_supported(): + fiat_convert = CryptoToFiatConverter({}) + assert fiat_convert._is_supported_fiat(fiat="USD") is True + assert fiat_convert._is_supported_fiat(fiat="usd") is True + assert fiat_convert._is_supported_fiat(fiat="abc") is False + assert fiat_convert._is_supported_fiat(fiat="ABC") is False def test_fiat_convert_find_price(mocker): - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) fiat_convert._coinlistings = {} fiat_convert._backoff = 0 - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._load_cryptomap', - return_value=None) - assert fiat_convert.get_price(crypto_symbol='BTC', fiat_symbol='EUR') == 0.0 + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._load_cryptomap", return_value=None + ) + assert fiat_convert.get_price(crypto_symbol="BTC", fiat_symbol="EUR") == 0.0 - with pytest.raises(ValueError, match=r'The fiat ABC is not supported.'): - fiat_convert._find_price(crypto_symbol='BTC', fiat_symbol='ABC') + with pytest.raises(ValueError, match=r"The fiat ABC is not supported."): + fiat_convert._find_price(crypto_symbol="BTC", fiat_symbol="ABC") - assert fiat_convert.get_price(crypto_symbol='XRP', fiat_symbol='USD') == 0.0 + assert fiat_convert.get_price(crypto_symbol="XRP", fiat_symbol="USD") == 0.0 - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=12345.0) - assert fiat_convert.get_price(crypto_symbol='BTC', fiat_symbol='USD') == 12345.0 - assert fiat_convert.get_price(crypto_symbol='btc', fiat_symbol='usd') == 12345.0 + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=12345.0 + ) + assert fiat_convert.get_price(crypto_symbol="BTC", fiat_symbol="USD") == 12345.0 + assert fiat_convert.get_price(crypto_symbol="btc", fiat_symbol="usd") == 12345.0 - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=13000.2) - assert fiat_convert.get_price(crypto_symbol='BTC', fiat_symbol='EUR') == 13000.2 + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=13000.2 + ) + assert fiat_convert.get_price(crypto_symbol="BTC", fiat_symbol="EUR") == 13000.2 def test_fiat_convert_unsupported_crypto(mocker, caplog): - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._coinlistings', return_value=[]) - fiat_convert = CryptoToFiatConverter() - assert fiat_convert._find_price(crypto_symbol='CRYPTO_123', fiat_symbol='EUR') == 0.0 - assert log_has('unsupported crypto-symbol CRYPTO_123 - returning 0.0', caplog) + mocker.patch("freqtrade.rpc.fiat_convert.CryptoToFiatConverter._coinlistings", return_value=[]) + fiat_convert = CryptoToFiatConverter({}) + assert fiat_convert._find_price(crypto_symbol="CRYPTO_123", fiat_symbol="EUR") == 0.0 + assert log_has("unsupported crypto-symbol CRYPTO_123 - returning 0.0", caplog) def test_fiat_convert_get_price(mocker): - find_price = mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=28000.0) + find_price = mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=28000.0 + ) - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) - with pytest.raises(ValueError, match=r'The fiat us dollar is not supported.'): - fiat_convert.get_price(crypto_symbol='btc', fiat_symbol='US Dollar') + with pytest.raises(ValueError, match=r"The fiat us dollar is not supported."): + fiat_convert.get_price(crypto_symbol="btc", fiat_symbol="US Dollar") # Check the value return by the method pair_len = len(fiat_convert._pair_price) assert pair_len == 0 - assert fiat_convert.get_price(crypto_symbol='btc', fiat_symbol='usd') == 28000.0 - assert fiat_convert._pair_price['btc/usd'] == 28000.0 + assert fiat_convert.get_price(crypto_symbol="btc", fiat_symbol="usd") == 28000.0 + assert fiat_convert._pair_price["btc/usd"] == 28000.0 assert len(fiat_convert._pair_price) == 1 assert find_price.call_count == 1 # Verify the cached is used - fiat_convert._pair_price['btc/usd'] = 9867.543 - assert fiat_convert.get_price(crypto_symbol='btc', fiat_symbol='usd') == 9867.543 + fiat_convert._pair_price["btc/usd"] = 9867.543 + assert fiat_convert.get_price(crypto_symbol="btc", fiat_symbol="usd") == 9867.543 assert find_price.call_count == 1 -def test_fiat_convert_same_currencies(mocker): - fiat_convert = CryptoToFiatConverter() +def test_fiat_convert_same_currencies(): + fiat_convert = CryptoToFiatConverter({}) - assert fiat_convert.get_price(crypto_symbol='USD', fiat_symbol='USD') == 1.0 + assert fiat_convert.get_price(crypto_symbol="USD", fiat_symbol="USD") == 1.0 -def test_fiat_convert_two_FIAT(mocker): - fiat_convert = CryptoToFiatConverter() +def test_fiat_convert_two_FIAT(): + fiat_convert = CryptoToFiatConverter({}) - assert fiat_convert.get_price(crypto_symbol='USD', fiat_symbol='EUR') == 0.0 + assert fiat_convert.get_price(crypto_symbol="USD", fiat_symbol="EUR") == 0.0 -def test_loadcryptomap(mocker): - - fiat_convert = CryptoToFiatConverter() +def test_loadcryptomap(): + fiat_convert = CryptoToFiatConverter({}) assert len(fiat_convert._coinlistings) == 2 assert fiat_convert._get_gecko_id("btc") == "bitcoin" @@ -97,28 +109,28 @@ def test_fiat_init_network_exception(mocker): # Because CryptoToFiatConverter is a Singleton we reset the listings listmock = MagicMock(side_effect=RequestException) mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", get_coins_list=listmock, ) # with pytest.raises(RequestEsxception): - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) fiat_convert._coinlistings = {} fiat_convert._load_cryptomap() assert len(fiat_convert._coinlistings) == 0 -def test_fiat_convert_without_network(mocker): +def test_fiat_convert_without_network(): # Because CryptoToFiatConverter is a Singleton we reset the value of _coingecko - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) - cmc_temp = CryptoToFiatConverter._coingecko - CryptoToFiatConverter._coingecko = None + cmc_temp = fiat_convert._coingecko + fiat_convert._coingecko = None assert fiat_convert._coingecko is None - assert fiat_convert._find_price(crypto_symbol='btc', fiat_symbol='usd') == 0.0 - CryptoToFiatConverter._coingecko = cmc_temp + assert fiat_convert._find_price(crypto_symbol="btc", fiat_symbol="usd") == 0.0 + fiat_convert._coingecko = cmc_temp def test_fiat_too_many_requests_response(mocker, caplog): @@ -126,77 +138,82 @@ def test_fiat_too_many_requests_response(mocker, caplog): req_exception = "429 Too Many Requests" listmock = MagicMock(return_value="{}", side_effect=RequestException(req_exception)) mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", get_coins_list=listmock, ) # with pytest.raises(RequestEsxception): - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) fiat_convert._coinlistings = {} fiat_convert._load_cryptomap() assert len(fiat_convert._coinlistings) == 0 assert fiat_convert._backoff > datetime.datetime.now().timestamp() assert log_has( - 'Too many requests for CoinGecko API, backing off and trying again later.', - caplog + "Too many requests for CoinGecko API, backing off and trying again later.", caplog ) -def test_fiat_multiple_coins(mocker, caplog): - fiat_convert = CryptoToFiatConverter() +def test_fiat_multiple_coins(caplog): + fiat_convert = CryptoToFiatConverter({}) fiat_convert._coinlistings = [ - {'id': 'helium', 'symbol': 'hnt', 'name': 'Helium'}, - {'id': 'hymnode', 'symbol': 'hnt', 'name': 'Hymnode'}, - {'id': 'bitcoin', 'symbol': 'btc', 'name': 'Bitcoin'}, - {'id': 'ethereum', 'symbol': 'eth', 'name': 'Ethereum'}, - {'id': 'ethereum-wormhole', 'symbol': 'eth', 'name': 'Ethereum Wormhole'}, + {"id": "helium", "symbol": "hnt", "name": "Helium"}, + {"id": "hymnode", "symbol": "hnt", "name": "Hymnode"}, + {"id": "bitcoin", "symbol": "btc", "name": "Bitcoin"}, + {"id": "ethereum", "symbol": "eth", "name": "Ethereum"}, + {"id": "ethereum-wormhole", "symbol": "eth", "name": "Ethereum Wormhole"}, ] - assert fiat_convert._get_gecko_id('btc') == 'bitcoin' - assert fiat_convert._get_gecko_id('hnt') is None - assert fiat_convert._get_gecko_id('eth') == 'ethereum' + assert fiat_convert._get_gecko_id("btc") == "bitcoin" + assert fiat_convert._get_gecko_id("hnt") is None + assert fiat_convert._get_gecko_id("eth") == "ethereum" - assert log_has('Found multiple mappings in CoinGecko for hnt.', caplog) + assert log_has("Found multiple mappings in CoinGecko for hnt.", caplog) def test_fiat_invalid_response(mocker, caplog): # Because CryptoToFiatConverter is a Singleton we reset the listings listmock = MagicMock(return_value=None) mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", get_coins_list=listmock, ) # with pytest.raises(RequestEsxception): - fiat_convert = CryptoToFiatConverter() + fiat_convert = CryptoToFiatConverter({}) fiat_convert._coinlistings = [] fiat_convert._load_cryptomap() assert len(fiat_convert._coinlistings) == 0 - assert log_has_re('Could not load FIAT Cryptocurrency map for the following problem: .*', - caplog) + assert log_has_re( + "Could not load FIAT Cryptocurrency map for the following problem: .*", caplog + ) def test_convert_amount(mocker): - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter.get_price', return_value=12345.0) + mocker.patch("freqtrade.rpc.fiat_convert.CryptoToFiatConverter.get_price", return_value=12345.0) - fiat_convert = CryptoToFiatConverter() - result = fiat_convert.convert_amount( - crypto_amount=1.23, - crypto_symbol="BTC", - fiat_symbol="USD" - ) + fiat_convert = CryptoToFiatConverter({}) + result = fiat_convert.convert_amount(crypto_amount=1.23, crypto_symbol="BTC", fiat_symbol="USD") assert result == 15184.35 - result = fiat_convert.convert_amount( - crypto_amount=1.23, - crypto_symbol="BTC", - fiat_symbol="BTC" - ) + result = fiat_convert.convert_amount(crypto_amount=1.23, crypto_symbol="BTC", fiat_symbol="BTC") assert result == 1.23 result = fiat_convert.convert_amount( - crypto_amount="1.23", - crypto_symbol="BTC", - fiat_symbol="BTC" + crypto_amount="1.23", crypto_symbol="BTC", fiat_symbol="BTC" ) assert result == 1.23 + + +def test_FtCoinGeckoApi(): + ftc = FtCoinGeckoApi() + assert ftc._api_key == "" + assert ftc.api_base_url == "https://api.coingecko.com/api/v3/" + + # defaults to demo + ftc = FtCoinGeckoApi(api_key="123456") + assert ftc._api_key == "123456" + assert ftc.api_base_url == "https://api.coingecko.com/api/v3/" + + ftc = FtCoinGeckoApi(api_key="123456", is_demo=False) + assert ftc._api_key == "123456" + assert ftc.api_base_url == "https://pro-api.coingecko.com/api/v3/" diff --git a/tests/rpc/test_rpc.py b/tests/rpc/test_rpc.py index 5c8602c2f..a0c235cd5 100644 --- a/tests/rpc/test_rpc.py +++ b/tests/rpc/test_rpc.py @@ -13,97 +13,119 @@ from freqtrade.persistence import Order, Trade from freqtrade.persistence.key_value_store import set_startup_time from freqtrade.rpc import RPC, RPCException from freqtrade.rpc.fiat_convert import CryptoToFiatConverter -from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt, - get_patched_freqtradebot, patch_get_signal) +from tests.conftest import ( + EXMS, + create_mock_trades, + create_mock_trades_usdt, + get_patched_freqtradebot, + patch_get_signal, +) def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: gen_response = { - 'trade_id': 1, - 'pair': 'ETH/BTC', - 'base_currency': 'ETH', - 'quote_currency': 'BTC', - 'open_date': ANY, - 'open_timestamp': ANY, - 'open_fill_date': ANY, - 'open_fill_timestamp': ANY, - 'is_open': ANY, - 'fee_open': ANY, - 'fee_open_cost': ANY, - 'fee_open_currency': ANY, - 'fee_close': fee.return_value, - 'fee_close_cost': ANY, - 'fee_close_currency': ANY, - 'open_rate_requested': ANY, - 'open_trade_value': 0.0010025, - 'close_rate_requested': ANY, - 'exit_reason': ANY, - 'exit_order_status': ANY, - 'min_rate': ANY, - 'max_rate': ANY, - 'strategy': ANY, - 'enter_tag': ANY, - 'timeframe': 5, - 'close_date': None, - 'close_timestamp': None, - 'open_rate': 1.098e-05, - 'close_rate': None, - 'current_rate': 1.099e-05, - 'amount': 91.07468123, - 'amount_requested': 91.07468124, - 'stake_amount': 0.001, - 'max_stake_amount': None, - 'trade_duration': None, - 'trade_duration_s': None, - 'close_profit': None, - 'close_profit_pct': None, - 'close_profit_abs': None, - 'profit_ratio': -0.00408133, - 'profit_pct': -0.41, - 'profit_abs': -4.09e-06, - 'profit_fiat': ANY, - 'stop_loss_abs': 9.89e-06, - 'stop_loss_pct': -10.0, - 'stop_loss_ratio': -0.1, - 'stoploss_last_update': ANY, - 'stoploss_last_update_timestamp': ANY, - 'initial_stop_loss_abs': 9.89e-06, - 'initial_stop_loss_pct': -10.0, - 'initial_stop_loss_ratio': -0.1, - 'stoploss_current_dist': pytest.approx(-1.0999999e-06), - 'stoploss_current_dist_ratio': -0.10009099, - 'stoploss_current_dist_pct': -10.01, - 'stoploss_entry_dist': -0.00010402, - 'stoploss_entry_dist_ratio': -0.10376381, - 'open_orders': '', - 'realized_profit': 0.0, - 'realized_profit_ratio': None, - 'total_profit_abs': -4.09e-06, - 'total_profit_fiat': ANY, - 'total_profit_ratio': None, - 'exchange': 'binance', - 'leverage': 1.0, - 'interest_rate': 0.0, - 'liquidation_price': None, - 'is_short': False, - 'funding_fees': 0.0, - 'trading_mode': TradingMode.SPOT, - 'amount_precision': 8.0, - 'price_precision': 8.0, - 'precision_mode': 2, - 'contract_size': 1, - 'has_open_orders': False, - 'orders': [{ - 'amount': 91.07468123, 'average': 1.098e-05, 'safe_price': 1.098e-05, - 'cost': 0.0009999999999054, 'filled': 91.07468123, 'ft_order_side': 'buy', - 'order_date': ANY, 'order_timestamp': ANY, 'order_filled_date': ANY, - 'order_filled_timestamp': ANY, 'order_type': 'limit', 'price': 1.098e-05, - 'is_open': False, 'pair': 'ETH/BTC', 'order_id': ANY, - 'remaining': ANY, 'status': ANY, 'ft_is_entry': True, 'ft_fee_base': None, - 'funding_fee': ANY, 'ft_order_tag': None, - }], + "trade_id": 1, + "pair": "ETH/BTC", + "base_currency": "ETH", + "quote_currency": "BTC", + "open_date": ANY, + "open_timestamp": ANY, + "open_fill_date": ANY, + "open_fill_timestamp": ANY, + "is_open": ANY, + "fee_open": ANY, + "fee_open_cost": ANY, + "fee_open_currency": ANY, + "fee_close": fee.return_value, + "fee_close_cost": ANY, + "fee_close_currency": ANY, + "open_rate_requested": ANY, + "open_trade_value": 0.0010025, + "close_rate_requested": ANY, + "exit_reason": ANY, + "exit_order_status": ANY, + "min_rate": ANY, + "max_rate": ANY, + "strategy": ANY, + "enter_tag": ANY, + "timeframe": 5, + "close_date": None, + "close_timestamp": None, + "open_rate": 1.098e-05, + "close_rate": None, + "current_rate": 1.099e-05, + "amount": 91.07468123, + "amount_requested": 91.07468124, + "stake_amount": 0.001, + "max_stake_amount": None, + "trade_duration": None, + "trade_duration_s": None, + "close_profit": None, + "close_profit_pct": None, + "close_profit_abs": None, + "profit_ratio": -0.00408133, + "profit_pct": -0.41, + "profit_abs": -4.09e-06, + "profit_fiat": ANY, + "stop_loss_abs": 9.89e-06, + "stop_loss_pct": -10.0, + "stop_loss_ratio": -0.1, + "stoploss_last_update": ANY, + "stoploss_last_update_timestamp": ANY, + "initial_stop_loss_abs": 9.89e-06, + "initial_stop_loss_pct": -10.0, + "initial_stop_loss_ratio": -0.1, + "stoploss_current_dist": pytest.approx(-1.0999999e-06), + "stoploss_current_dist_ratio": -0.10009099, + "stoploss_current_dist_pct": -10.01, + "stoploss_entry_dist": -0.00010402, + "stoploss_entry_dist_ratio": -0.10376381, + "open_orders": "", + "realized_profit": 0.0, + "realized_profit_ratio": None, + "total_profit_abs": -4.09e-06, + "total_profit_fiat": ANY, + "total_profit_ratio": None, + "exchange": "binance", + "leverage": 1.0, + "interest_rate": 0.0, + "liquidation_price": None, + "is_short": False, + "funding_fees": 0.0, + "trading_mode": TradingMode.SPOT, + "amount_precision": 8.0, + "price_precision": 8.0, + "precision_mode": 2, + "contract_size": 1, + "has_open_orders": False, + "orders": [ + { + "amount": 91.07468123, + "average": 1.098e-05, + "safe_price": 1.098e-05, + "cost": 0.0009999999999054, + "filled": 91.07468123, + "ft_order_side": "buy", + "order_date": ANY, + "order_timestamp": ANY, + "order_filled_date": ANY, + "order_filled_timestamp": ANY, + "order_type": "limit", + "price": 1.098e-05, + "is_open": False, + "pair": "ETH/BTC", + "order_id": ANY, + "remaining": ANY, + "status": ANY, + "ft_is_entry": True, + "ft_fee_base": None, + "funding_fee": ANY, + "ft_order_tag": None, + } + ], } - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + freqtradebot = get_patched_freqtradebot(mocker, default_conf) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -111,12 +133,11 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: _dry_is_price_crossed=MagicMock(side_effect=[False, True]), ) - freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) freqtradebot.state = State.RUNNING - with pytest.raises(RPCException, match=r'.*no active trade*'): + with pytest.raises(RPCException, match=r".*no active trade*"): rpc._rpc_trade_status() freqtradebot.enter_positions() @@ -125,20 +146,20 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: results = rpc._rpc_trade_status() response_unfilled = deepcopy(gen_response) # Different from "filled" response: - response_unfilled.update({ - 'amount': 91.07468124, - 'profit_ratio': 0.0, - 'profit_pct': 0.0, - 'profit_abs': 0.0, - 'total_profit_abs': 0.0, - 'open_orders': '(limit buy rem=91.07468123)', - 'has_open_orders': True, - }) - response_unfilled['orders'][0].update({ - 'is_open': True, - 'filled': 0.0, - 'remaining': 91.07468123 - }) + response_unfilled.update( + { + "amount": 91.07468124, + "profit_ratio": 0.0, + "profit_pct": 0.0, + "profit_abs": 0.0, + "total_profit_abs": 0.0, + "open_orders": "(limit buy rem=91.07468123)", + "has_open_orders": True, + } + ) + response_unfilled["orders"][0].update( + {"is_open": True, "filled": 0.0, "remaining": 91.07468123} + ) assert results[0] == response_unfilled # Open order without remaining @@ -149,9 +170,11 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: results = rpc._rpc_trade_status() # Reuse above object, only remaining changed. - response_unfilled['orders'][0].update({ - 'remaining': None, - }) + response_unfilled["orders"][0].update( + { + "remaining": None, + } + ) assert results[0] == response_unfilled trade = Trade.get_open_trades()[0] @@ -166,179 +189,179 @@ def test_rpc_trade_status(default_conf, ticker, fee, mocker) -> None: results = rpc._rpc_trade_status() response = deepcopy(gen_response) - response.update({ - 'max_stake_amount': 0.001, - 'total_profit_ratio': pytest.approx(-0.00409153), - 'has_open_orders': False, - }) + response.update( + { + "max_stake_amount": 0.001, + "total_profit_ratio": pytest.approx(-0.00409153), + "has_open_orders": False, + } + ) assert results[0] == response - mocker.patch(f'{EXMS}.get_rate', - MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available"))) + mocker.patch( + f"{EXMS}.get_rate", MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available")) + ) results = rpc._rpc_trade_status() - assert isnan(results[0]['profit_ratio']) - assert isnan(results[0]['current_rate']) + assert isnan(results[0]["profit_ratio"]) + assert isnan(results[0]["current_rate"]) response_norate = deepcopy(gen_response) # Update elements that are NaN when no rate is available. - response_norate.update({ - 'stoploss_current_dist': ANY, - 'stoploss_current_dist_ratio': ANY, - 'stoploss_current_dist_pct': ANY, - 'max_stake_amount': 0.001, - 'profit_ratio': ANY, - 'profit_pct': ANY, - 'profit_abs': ANY, - 'total_profit_abs': ANY, - 'total_profit_ratio': ANY, - 'current_rate': ANY, - }) + response_norate.update( + { + "stoploss_current_dist": ANY, + "stoploss_current_dist_ratio": ANY, + "stoploss_current_dist_pct": ANY, + "max_stake_amount": 0.001, + "profit_ratio": ANY, + "profit_pct": ANY, + "profit_abs": ANY, + "total_profit_abs": ANY, + "total_profit_ratio": ANY, + "current_rate": ANY, + } + ) assert results[0] == response_norate def test_rpc_status_table(default_conf, ticker, fee, mocker) -> None: mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', - get_price=MagicMock(return_value={'bitcoin': {'usd': 15000.0}}), + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", + get_price=MagicMock(return_value={"bitcoin": {"usd": 15000.0}}), ) - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, fetch_ticker=ticker, get_fee=fee, ) - del default_conf['fiat_display_currency'] + del default_conf["fiat_display_currency"] freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) freqtradebot.state = State.RUNNING - with pytest.raises(RPCException, match=r'.*no active trade*'): - rpc._rpc_status_table(default_conf['stake_currency'], 'USD') - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=False) + with pytest.raises(RPCException, match=r".*no active trade*"): + rpc._rpc_status_table(default_conf["stake_currency"], "USD") + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=False) freqtradebot.enter_positions() - result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') + result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf["stake_currency"], "USD") assert "Since" in headers assert "Pair" in headers - assert 'now' == result[0][2] - assert 'ETH/BTC' in result[0][1] - assert '0.00 (0.00)' == result[0][3] - assert '0.00' == f'{fiat_profit_sum:.2f}' + assert "now" == result[0][2] + assert "ETH/BTC" in result[0][1] + assert "0.00 (0.00)" == result[0][3] + assert "0.00" == f"{fiat_profit_sum:.2f}" - mocker.patch(f'{EXMS}._dry_is_price_crossed', return_value=True) + mocker.patch(f"{EXMS}._dry_is_price_crossed", return_value=True) freqtradebot.process() - result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') + result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf["stake_currency"], "USD") assert "Since" in headers assert "Pair" in headers - assert 'now' == result[0][2] - assert 'ETH/BTC' in result[0][1] - assert '-0.41% (-0.00)' == result[0][3] - assert '-0.00' == f'{fiat_profit_sum:.2f}' + assert "now" == result[0][2] + assert "ETH/BTC" in result[0][1] + assert "-0.41% (-0.00)" == result[0][3] + assert "-0.00" == f"{fiat_profit_sum:.2f}" # Test with fiat convert - rpc._fiat_converter = CryptoToFiatConverter() - result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') + rpc._fiat_converter = CryptoToFiatConverter({}) + result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf["stake_currency"], "USD") assert "Since" in headers assert "Pair" in headers assert len(result[0]) == 4 - assert 'now' == result[0][2] - assert 'ETH/BTC' in result[0][1] - assert '-0.41% (-0.06)' == result[0][3] - assert '-0.06' == f'{fiat_profit_sum:.2f}' + assert "now" == result[0][2] + assert "ETH/BTC" in result[0][1] + assert "-0.41% (-0.06)" == result[0][3] + assert "-0.06" == f"{fiat_profit_sum:.2f}" - rpc._config['position_adjustment_enable'] = True - rpc._config['max_entry_position_adjustment'] = 3 - result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') + rpc._config["position_adjustment_enable"] = True + rpc._config["max_entry_position_adjustment"] = 3 + result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf["stake_currency"], "USD") assert "# Entries" in headers assert len(result[0]) == 5 # 4th column should be 1/4 - as 1 order filled (a total of 4 is possible) # 3 on top of the initial one. - assert result[0][4] == '1/4' + assert result[0][4] == "1/4" - mocker.patch(f'{EXMS}.get_rate', - MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available"))) - result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf['stake_currency'], 'USD') - assert 'now' == result[0][2] - assert 'ETH/BTC' in result[0][1] - assert 'nan%' == result[0][3] + mocker.patch( + f"{EXMS}.get_rate", MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available")) + ) + result, headers, fiat_profit_sum = rpc._rpc_status_table(default_conf["stake_currency"], "USD") + assert "now" == result[0][2] + assert "ETH/BTC" in result[0][1] + assert "nan%" == result[0][3] assert isnan(fiat_profit_sum) def test__rpc_timeunit_profit( - default_conf_usdt, ticker, fee, markets, mocker, time_machine) -> None: - + default_conf_usdt, ticker, fee, markets, mocker, time_machine +) -> None: time_machine.move_to("2023-09-05 10:00:00 +00:00", tick=False) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( - EXMS, - fetch_ticker=ticker, - get_fee=fee, - markets=PropertyMock(return_value=markets) + EXMS, fetch_ticker=ticker, get_fee=fee, markets=PropertyMock(return_value=markets) ) freqtradebot = get_patched_freqtradebot(mocker, default_conf_usdt) create_mock_trades_usdt(fee) - stake_currency = default_conf_usdt['stake_currency'] - fiat_display_currency = default_conf_usdt['fiat_display_currency'] + stake_currency = default_conf_usdt["stake_currency"] + fiat_display_currency = default_conf_usdt["fiat_display_currency"] rpc = RPC(freqtradebot) - rpc._fiat_converter = CryptoToFiatConverter() + rpc._fiat_converter = CryptoToFiatConverter({}) # Try valid data days = rpc._rpc_timeunit_profit(7, stake_currency, fiat_display_currency) - assert len(days['data']) == 7 - assert days['stake_currency'] == default_conf_usdt['stake_currency'] - assert days['fiat_display_currency'] == default_conf_usdt['fiat_display_currency'] - for day in days['data']: + assert len(days["data"]) == 7 + assert days["stake_currency"] == default_conf_usdt["stake_currency"] + assert days["fiat_display_currency"] == default_conf_usdt["fiat_display_currency"] + for day in days["data"]: # {'date': datetime.date(2022, 6, 11), 'abs_profit': 13.8299999, # 'starting_balance': 1055.37, 'rel_profit': 0.0131044, # 'fiat_value': 0.0, 'trade_count': 2} - assert day['abs_profit'] in (0.0, pytest.approx(6.83), pytest.approx(-4.09)) - assert day['rel_profit'] in (0.0, pytest.approx(0.00642902), pytest.approx(-0.00383512)) - assert day['trade_count'] in (0, 1, 2) - assert day['starting_balance'] in (pytest.approx(1062.37), pytest.approx(1066.46)) - assert day['fiat_value'] in (0.0, ) + assert day["abs_profit"] in (0.0, pytest.approx(6.83), pytest.approx(-4.09)) + assert day["rel_profit"] in (0.0, pytest.approx(0.00642902), pytest.approx(-0.00383512)) + assert day["trade_count"] in (0, 1, 2) + assert day["starting_balance"] in (pytest.approx(1062.37), pytest.approx(1066.46)) + assert day["fiat_value"] in (0.0,) # ensure first day is current date - assert str(days['data'][0]['date']) == str(datetime.now(timezone.utc).date()) + assert str(days["data"][0]["date"]) == str(datetime.now(timezone.utc).date()) # Try invalid data - with pytest.raises(RPCException, match=r'.*must be an integer greater than 0*'): + with pytest.raises(RPCException, match=r".*must be an integer greater than 0*"): rpc._rpc_timeunit_profit(0, stake_currency, fiat_display_currency) -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_rpc_trade_history(mocker, default_conf, markets, fee, is_short): - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - markets=PropertyMock(return_value=markets) - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=markets)) freqtradebot = get_patched_freqtradebot(mocker, default_conf) create_mock_trades(fee, is_short) rpc = RPC(freqtradebot) - rpc._fiat_converter = CryptoToFiatConverter() + rpc._fiat_converter = CryptoToFiatConverter({}) trades = rpc._rpc_trade_history(2) - assert len(trades['trades']) == 2 - assert trades['trades_count'] == 2 - assert isinstance(trades['trades'][0], dict) - assert isinstance(trades['trades'][1], dict) + assert len(trades["trades"]) == 2 + assert trades["trades_count"] == 2 + assert isinstance(trades["trades"][0], dict) + assert isinstance(trades["trades"][1], dict) trades = rpc._rpc_trade_history(0) - assert len(trades['trades']) == 2 - assert trades['trades_count'] == 2 + assert len(trades["trades"]) == 2 + assert trades["trades_count"] == 2 # The first closed trade is for ETC ... sorting is descending - assert trades['trades'][-1]['pair'] == 'ETC/BTC' - assert trades['trades'][0]['pair'] == 'XRP/BTC' + assert trades["trades"][-1]["pair"] == "ETC/BTC" + assert trades["trades"][0]["pair"] == "XRP/BTC" -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_rpc_delete_trade(mocker, default_conf, fee, markets, caplog, is_short): - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) stoploss_mock = MagicMock() cancel_mock = MagicMock() mocker.patch.multiple( @@ -349,57 +372,57 @@ def test_rpc_delete_trade(mocker, default_conf, fee, markets, caplog, is_short): ) freqtradebot = get_patched_freqtradebot(mocker, default_conf) - freqtradebot.strategy.order_types['stoploss_on_exchange'] = True + freqtradebot.strategy.order_types["stoploss_on_exchange"] = True create_mock_trades(fee, is_short) rpc = RPC(freqtradebot) - with pytest.raises(RPCException, match='invalid argument'): - rpc._rpc_delete('200') + with pytest.raises(RPCException, match="invalid argument"): + rpc._rpc_delete("200") trades = Trade.session.scalars(select(Trade)).all() trades[2].orders.append( Order( - ft_order_side='stoploss', + ft_order_side="stoploss", ft_pair=trades[2].pair, ft_is_open=True, ft_amount=trades[2].amount, ft_price=trades[2].stop_loss, - order_id='102', - status='open', + order_id="102", + status="open", ) ) assert len(trades) > 2 - res = rpc._rpc_delete('1') + res = rpc._rpc_delete("1") assert isinstance(res, dict) - assert res['result'] == 'success' - assert res['trade_id'] == '1' - assert res['cancel_order_count'] == 1 + assert res["result"] == "success" + assert res["trade_id"] == "1" + assert res["cancel_order_count"] == 1 assert cancel_mock.call_count == 1 assert stoploss_mock.call_count == 0 cancel_mock.reset_mock() stoploss_mock.reset_mock() - res = rpc._rpc_delete('5') + res = rpc._rpc_delete("5") assert isinstance(res, dict) assert stoploss_mock.call_count == 1 - assert res['cancel_order_count'] == 1 + assert res["cancel_order_count"] == 1 - stoploss_mock = mocker.patch(f'{EXMS}.cancel_stoploss_order', side_effect=InvalidOrderException) + stoploss_mock = mocker.patch(f"{EXMS}.cancel_stoploss_order", side_effect=InvalidOrderException) - res = rpc._rpc_delete('3') + res = rpc._rpc_delete("3") assert stoploss_mock.call_count == 1 stoploss_mock.reset_mock() - cancel_mock = mocker.patch(f'{EXMS}.cancel_order', side_effect=InvalidOrderException) + cancel_mock = mocker.patch(f"{EXMS}.cancel_order", side_effect=InvalidOrderException) - res = rpc._rpc_delete('4') + res = rpc._rpc_delete("4") assert cancel_mock.call_count == 1 assert stoploss_mock.call_count == 0 def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=1.1) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.1) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -407,106 +430,107 @@ def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None: ) freqtradebot = get_patched_freqtradebot(mocker, default_conf_usdt) - stake_currency = default_conf_usdt['stake_currency'] - fiat_display_currency = default_conf_usdt['fiat_display_currency'] + stake_currency = default_conf_usdt["stake_currency"] + fiat_display_currency = default_conf_usdt["fiat_display_currency"] rpc = RPC(freqtradebot) - rpc._fiat_converter = CryptoToFiatConverter() + rpc._fiat_converter = CryptoToFiatConverter({}) res = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency) - assert res['trade_count'] == 0 - assert res['first_trade_date'] == '' - assert res['first_trade_timestamp'] == 0 - assert res['latest_trade_date'] == '' - assert res['latest_trade_timestamp'] == 0 - assert res['expectancy'] == 0 - assert res['expectancy_ratio'] == 100 + assert res["trade_count"] == 0 + assert res["first_trade_date"] == "" + assert res["first_trade_timestamp"] == 0 + assert res["latest_trade_date"] == "" + assert res["latest_trade_timestamp"] == 0 + assert res["expectancy"] == 0 + assert res["expectancy_ratio"] == 100 # Create some test data create_mock_trades_usdt(fee) stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency) - assert pytest.approx(stats['profit_closed_coin']) == 2.74 - assert pytest.approx(stats['profit_closed_percent_mean']) == -1.67 - assert pytest.approx(stats['profit_closed_fiat']) == 3.014 - assert pytest.approx(stats['profit_all_coin']) == -77.45964918 - assert pytest.approx(stats['profit_all_percent_mean']) == -57.86 - assert pytest.approx(stats['profit_all_fiat']) == -85.205614098 - assert pytest.approx(stats['winrate']) == 0.666666667 - assert pytest.approx(stats['expectancy']) == 0.913333333 - assert pytest.approx(stats['expectancy_ratio']) == 0.223308883 - assert stats['trade_count'] == 7 - assert stats['first_trade_humanized'] == '2 days ago' - assert stats['latest_trade_humanized'] == '17 minutes ago' - assert stats['avg_duration'] in ('0:17:40') - assert stats['best_pair'] == 'XRP/USDT' - assert stats['best_rate'] == 10.0 + assert pytest.approx(stats["profit_closed_coin"]) == 2.74 + assert pytest.approx(stats["profit_closed_percent_mean"]) == -1.67 + assert pytest.approx(stats["profit_closed_fiat"]) == 3.014 + assert pytest.approx(stats["profit_all_coin"]) == -57.40975881 + assert pytest.approx(stats["profit_all_percent_mean"]) == -50.83 + assert pytest.approx(stats["profit_all_fiat"]) == -63.150734691 + assert pytest.approx(stats["winrate"]) == 0.666666667 + assert pytest.approx(stats["expectancy"]) == 0.913333333 + assert pytest.approx(stats["expectancy_ratio"]) == 0.223308883 + assert stats["trade_count"] == 7 + assert stats["first_trade_humanized"] == "2 days ago" + assert stats["latest_trade_humanized"] == "17 minutes ago" + assert stats["avg_duration"] in ("0:17:40") + assert stats["best_pair"] == "XRP/USDT" + assert stats["best_rate"] == 10.0 # Test non-available pair - mocker.patch(f'{EXMS}.get_rate', - MagicMock(side_effect=ExchangeError("Pair 'XRP/USDT' not available"))) + mocker.patch( + f"{EXMS}.get_rate", MagicMock(side_effect=ExchangeError("Pair 'XRP/USDT' not available")) + ) stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency) - assert stats['trade_count'] == 7 - assert stats['first_trade_humanized'] == '2 days ago' - assert stats['latest_trade_humanized'] == '17 minutes ago' - assert stats['avg_duration'] in ('0:17:40') - assert stats['best_pair'] == 'XRP/USDT' - assert stats['best_rate'] == 10.0 - assert isnan(stats['profit_all_coin']) + assert stats["trade_count"] == 7 + assert stats["first_trade_humanized"] == "2 days ago" + assert stats["latest_trade_humanized"] == "17 minutes ago" + assert stats["avg_duration"] in ("0:17:40") + assert stats["best_pair"] == "XRP/USDT" + assert stats["best_rate"] == 10.0 + assert isnan(stats["profit_all_coin"]) def test_rpc_balance_handle_error(default_conf, mocker): mock_balance = { - 'BTC': { - 'free': 10.0, - 'total': 12.0, - 'used': 2.0, + "BTC": { + "free": 10.0, + "total": 12.0, + "used": 2.0, + }, + "ETH": { + "free": 1.0, + "total": 5.0, + "used": 4.0, }, - 'ETH': { - 'free': 1.0, - 'total': 5.0, - 'used': 4.0, - } } # ETH will be skipped due to mocked Error below mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', - get_price=MagicMock(return_value={'bitcoin': {'usd': 15000.0}}), + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", + get_price=MagicMock(return_value={"bitcoin": {"usd": 15000.0}}), ) - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=mock_balance), - get_tickers=MagicMock(side_effect=TemporaryError('Could not load ticker due to xxx')) + get_tickers=MagicMock(side_effect=TemporaryError("Could not load ticker due to xxx")), ) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - rpc._fiat_converter = CryptoToFiatConverter() + rpc._fiat_converter = CryptoToFiatConverter({}) with pytest.raises(RPCException, match="Error getting current tickers."): - rpc._rpc_balance(default_conf['stake_currency'], default_conf['fiat_display_currency']) + rpc._rpc_balance(default_conf["stake_currency"], default_conf["fiat_display_currency"]) def test_rpc_balance_handle(default_conf_usdt, mocker, tickers): mock_balance = { - 'BTC': { - 'free': 0.01, - 'total': 0.012, - 'used': 0.002, + "BTC": { + "free": 0.01, + "total": 0.012, + "used": 0.002, }, - 'ETH': { - 'free': 1.0, - 'total': 5.0, - 'used': 4.0, + "ETH": { + "free": 1.0, + "total": 5.0, + "used": 4.0, + }, + "USDT": { + "free": 50.0, + "total": 100.0, + "used": 5.0, }, - 'USDT': { - 'free': 50.0, - 'total': 100.0, - 'used': 5.0, - } } mock_pos = [ { @@ -528,17 +552,17 @@ def test_rpc_balance_handle(default_conf_usdt, mocker, tickers): "markPrice": 2896.41, "collateral": 20, "marginType": "isolated", - "side": 'short', - "percentage": None + "side": "short", + "percentage": None, } ] mocker.patch.multiple( - 'freqtrade.rpc.fiat_convert.CoinGeckoAPI', - get_price=MagicMock(return_value={'bitcoin': {'usd': 1.2}}), + "freqtrade.rpc.fiat_convert.FtCoinGeckoApi", + get_price=MagicMock(return_value={"bitcoin": {"usd": 1.2}}), ) - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=1.2) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.2) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, validate_trading_mode_and_margin_mode=MagicMock(), @@ -546,96 +570,95 @@ def test_rpc_balance_handle(default_conf_usdt, mocker, tickers): fetch_positions=MagicMock(return_value=mock_pos), get_tickers=tickers, get_valid_pair_combination=MagicMock( - side_effect=lambda a, b: f"{b}/{a}" if a == "USDT" else f"{a}/{b}") + side_effect=lambda a, b: f"{b}/{a}" if a == "USDT" else f"{a}/{b}" + ), ) - default_conf_usdt['dry_run'] = False - default_conf_usdt['trading_mode'] = 'futures' + default_conf_usdt["dry_run"] = False + default_conf_usdt["trading_mode"] = "futures" freqtradebot = get_patched_freqtradebot(mocker, default_conf_usdt) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - rpc._fiat_converter = CryptoToFiatConverter() + rpc._fiat_converter = CryptoToFiatConverter({}) result = rpc._rpc_balance( - default_conf_usdt['stake_currency'], default_conf_usdt['fiat_display_currency']) + default_conf_usdt["stake_currency"], default_conf_usdt["fiat_display_currency"] + ) - assert pytest.approx(result['total']) == 2824.83464 - assert pytest.approx(result['value']) == 2824.83464 * 1.2 + assert pytest.approx(result["total"]) == 2824.83464 + assert pytest.approx(result["value"]) == 2824.83464 * 1.2 assert tickers.call_count == 1 - assert tickers.call_args_list[0][1]['cached'] is True - assert 'USD' == result['symbol'] - assert result['currencies'] == [ + assert tickers.call_args_list[0][1]["cached"] is True + assert "USD" == result["symbol"] + assert result["currencies"] == [ { - 'currency': 'BTC', - 'free': 0.01, - 'balance': 0.012, - 'used': 0.002, - 'bot_owned': 0, - 'est_stake': 103.78464, - 'est_stake_bot': 0, - 'stake': 'USDT', - 'side': 'long', - 'leverage': 1, - 'position': 0, - 'is_bot_managed': False, - 'is_position': False + "currency": "BTC", + "free": 0.01, + "balance": 0.012, + "used": 0.002, + "bot_owned": 0, + "est_stake": 103.78464, + "est_stake_bot": 0, + "stake": "USDT", + "side": "long", + "leverage": 1, + "position": 0, + "is_bot_managed": False, + "is_position": False, }, { - 'currency': 'ETH', - 'free': 1.0, - 'balance': 5.0, - 'used': 4.0, - 'bot_owned': 0, - 'est_stake': 2651.05, - 'est_stake_bot': 0, - 'stake': 'USDT', - 'side': 'long', - 'leverage': 1, - 'position': 0, - 'is_bot_managed': False, - 'is_position': False + "currency": "ETH", + "free": 1.0, + "balance": 5.0, + "used": 4.0, + "bot_owned": 0, + "est_stake": 2651.05, + "est_stake_bot": 0, + "stake": "USDT", + "side": "long", + "leverage": 1, + "position": 0, + "is_bot_managed": False, + "is_position": False, }, { - 'currency': 'USDT', - 'free': 50.0, - 'balance': 100.0, - 'used': 5.0, - 'bot_owned': 49.5, - 'est_stake': 50.0, - 'est_stake_bot': 49.5, - 'stake': 'USDT', - 'side': 'long', - 'leverage': 1, - 'position': 0, - 'is_bot_managed': True, - 'is_position': False + "currency": "USDT", + "free": 50.0, + "balance": 100.0, + "used": 5.0, + "bot_owned": 49.5, + "est_stake": 50.0, + "est_stake_bot": 49.5, + "stake": "USDT", + "side": "long", + "leverage": 1, + "position": 0, + "is_bot_managed": True, + "is_position": False, }, { - 'currency': 'ETH/USDT:USDT', - 'free': 0, - 'balance': 0, - 'used': 0, - 'position': 10.0, - 'est_stake': 20, - 'est_stake_bot': 20, - 'stake': 'USDT', - 'leverage': 5.0, - 'side': 'short', - 'is_bot_managed': True, - 'is_position': True - } + "currency": "ETH/USDT:USDT", + "free": 0, + "balance": 0, + "used": 0, + "position": 10.0, + "est_stake": 20, + "est_stake_bot": 20, + "stake": "USDT", + "leverage": 5.0, + "side": "short", + "is_bot_managed": True, + "is_position": True, + }, ] - assert pytest.approx(result['total_bot']) == 69.5 - assert pytest.approx(result['total']) == 2824.83464 # ETH stake is missing. - assert result['starting_capital'] == 50 - assert result['starting_capital_ratio'] == pytest.approx(0.3899999) + assert pytest.approx(result["total_bot"]) == 69.5 + assert pytest.approx(result["total"]) == 2824.83464 # ETH stake is missing. + assert result["starting_capital"] == 50 + assert result["starting_capital_ratio"] == pytest.approx(0.3899999) def test_rpc_start(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - fetch_ticker=MagicMock() - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, fetch_ticker=MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) @@ -643,20 +666,17 @@ def test_rpc_start(mocker, default_conf) -> None: freqtradebot.state = State.STOPPED result = rpc._rpc_start() - assert {'status': 'starting trader ...'} == result + assert {"status": "starting trader ..."} == result assert freqtradebot.state == State.RUNNING result = rpc._rpc_start() - assert {'status': 'already running'} == result + assert {"status": "already running"} == result assert freqtradebot.state == State.RUNNING def test_rpc_stop(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - fetch_ticker=MagicMock() - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, fetch_ticker=MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) @@ -664,35 +684,32 @@ def test_rpc_stop(mocker, default_conf) -> None: freqtradebot.state = State.RUNNING result = rpc._rpc_stop() - assert {'status': 'stopping trader ...'} == result + assert {"status": "stopping trader ..."} == result assert freqtradebot.state == State.STOPPED result = rpc._rpc_stop() - assert {'status': 'already stopped'} == result + assert {"status": "already stopped"} == result assert freqtradebot.state == State.STOPPED def test_rpc_stopentry(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - fetch_ticker=MagicMock() - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, fetch_ticker=MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) freqtradebot.state = State.RUNNING - assert freqtradebot.config['max_open_trades'] != 0 + assert freqtradebot.config["max_open_trades"] != 0 result = rpc._rpc_stopentry() - assert {'status': 'No more entries will occur from now. Run /reload_config to reset.'} == result - assert freqtradebot.config['max_open_trades'] == 0 + assert {"status": "No more entries will occur from now. Run /reload_config to reset."} == result + assert freqtradebot.config["max_open_trades"] == 0 def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) cancel_order_mock = MagicMock() mocker.patch.multiple( @@ -701,157 +718,151 @@ def test_rpc_force_exit(default_conf, ticker, fee, mocker) -> None: cancel_order=cancel_order_mock, fetch_order=MagicMock( return_value={ - 'status': 'closed', - 'type': 'limit', - 'side': 'buy', - 'filled': 0.0, + "status": "closed", + "type": "limit", + "side": "buy", + "filled": 0.0, } ), _dry_is_price_crossed=MagicMock(return_value=True), get_fee=fee, ) - mocker.patch('freqtrade.wallets.Wallets.get_free', return_value=1000) + mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) freqtradebot.state = State.STOPPED - with pytest.raises(RPCException, match=r'.*trader is not running*'): + with pytest.raises(RPCException, match=r".*trader is not running*"): rpc._rpc_force_exit(None) freqtradebot.state = State.RUNNING - with pytest.raises(RPCException, match=r'.*invalid argument*'): + with pytest.raises(RPCException, match=r".*invalid argument*"): rpc._rpc_force_exit(None) - msg = rpc._rpc_force_exit('all') - assert msg == {'result': 'Created exit orders for all open trades.'} + msg = rpc._rpc_force_exit("all") + assert msg == {"result": "Created exit orders for all open trades."} freqtradebot.enter_positions() - msg = rpc._rpc_force_exit('all') - assert msg == {'result': 'Created exit orders for all open trades.'} + msg = rpc._rpc_force_exit("all") + assert msg == {"result": "Created exit orders for all open trades."} freqtradebot.enter_positions() - msg = rpc._rpc_force_exit('2') - assert msg == {'result': 'Created exit order for trade 2.'} + msg = rpc._rpc_force_exit("2") + assert msg == {"result": "Created exit order for trade 2."} freqtradebot.state = State.STOPPED - with pytest.raises(RPCException, match=r'.*trader is not running*'): + with pytest.raises(RPCException, match=r".*trader is not running*"): rpc._rpc_force_exit(None) - with pytest.raises(RPCException, match=r'.*trader is not running*'): - rpc._rpc_force_exit('all') + with pytest.raises(RPCException, match=r".*trader is not running*"): + rpc._rpc_force_exit("all") freqtradebot.state = State.RUNNING assert cancel_order_mock.call_count == 0 - mocker.patch(f'{EXMS}._dry_is_price_crossed', MagicMock(return_value=False)) + mocker.patch(f"{EXMS}._dry_is_price_crossed", MagicMock(return_value=False)) freqtradebot.enter_positions() # make an limit-buy open trade - trade = Trade.session.scalars(select(Trade).filter(Trade.id == '3')).first() + trade = Trade.session.scalars(select(Trade).filter(Trade.id == "3")).first() filled_amount = trade.amount / 2 # Fetch order - it's open first, and closed after cancel_order is called. mocker.patch( - f'{EXMS}.fetch_order', - side_effect=[{ - 'id': trade.orders[0].order_id, - 'status': 'open', - 'type': 'limit', - 'side': 'buy', - 'filled': filled_amount - }, { - 'id': trade.orders[0].order_id, - 'status': 'closed', - 'type': 'limit', - 'side': 'buy', - 'filled': filled_amount - }] + f"{EXMS}.fetch_order", + side_effect=[ + { + "id": trade.orders[0].order_id, + "status": "open", + "type": "limit", + "side": "buy", + "filled": filled_amount, + }, + { + "id": trade.orders[0].order_id, + "status": "closed", + "type": "limit", + "side": "buy", + "filled": filled_amount, + }, + ], ) # check that the trade is called, which is done by ensuring exchange.cancel_order is called # and trade amount is updated - rpc._rpc_force_exit('3') + rpc._rpc_force_exit("3") assert cancel_order_mock.call_count == 1 assert pytest.approx(trade.amount) == filled_amount mocker.patch( - f'{EXMS}.fetch_order', - return_value={ - 'status': 'open', - 'type': 'limit', - 'side': 'buy', - 'filled': filled_amount - }) + f"{EXMS}.fetch_order", + return_value={"status": "open", "type": "limit", "side": "buy", "filled": filled_amount}, + ) - freqtradebot.config['max_open_trades'] = 3 + freqtradebot.config["max_open_trades"] = 3 freqtradebot.enter_positions() cancel_order_mock.reset_mock() - trade = Trade.session.scalars(select(Trade).filter(Trade.id == '3')).first() + trade = Trade.session.scalars(select(Trade).filter(Trade.id == "3")).first() amount = trade.amount # make an limit-sell open order trade mocker.patch( - f'{EXMS}.fetch_order', + f"{EXMS}.fetch_order", return_value={ - 'status': 'open', - 'type': 'limit', - 'side': 'sell', - 'amount': amount, - 'remaining': amount, - 'filled': 0.0, - 'id': trade.orders[-1].order_id, - } + "status": "open", + "type": "limit", + "side": "sell", + "amount": amount, + "remaining": amount, + "filled": 0.0, + "id": trade.orders[-1].order_id, + }, ) cancel_order_3 = mocker.patch( - f'{EXMS}.cancel_order_with_result', + f"{EXMS}.cancel_order_with_result", return_value={ - 'status': 'canceled', - 'type': 'limit', - 'side': 'sell', - 'amount': amount, - 'remaining': amount, - 'filled': 0.0, - 'id': trade.orders[-1].order_id, - } + "status": "canceled", + "type": "limit", + "side": "sell", + "amount": amount, + "remaining": amount, + "filled": 0.0, + "id": trade.orders[-1].order_id, + }, ) - msg = rpc._rpc_force_exit('3') - assert msg == {'result': 'Created exit order for trade 3.'} + msg = rpc._rpc_force_exit("3") + assert msg == {"result": "Created exit order for trade 3."} # status quo, no exchange calls assert cancel_order_3.call_count == 1 assert cancel_order_mock.call_count == 0 - trade = Trade.session.scalars(select(Trade).filter(Trade.id == '4')).first() + trade = Trade.session.scalars(select(Trade).filter(Trade.id == "4")).first() amount = trade.amount # make an limit-buy open trade, if there is no 'filled', don't sell it mocker.patch( - f'{EXMS}.fetch_order', - return_value={ - 'status': 'open', - 'type': 'limit', - 'side': 'buy', - 'filled': None - } + f"{EXMS}.fetch_order", + return_value={"status": "open", "type": "limit", "side": "buy", "filled": None}, ) cancel_order_4 = mocker.patch( - f'{EXMS}.cancel_order_with_result', - return_value={ - 'status': 'canceled', - 'type': 'limit', - 'side': 'sell', - 'amount': amount, - 'remaining': 0.0, - 'filled': amount, - 'id': trade.orders[0].order_id, - } - ) + f"{EXMS}.cancel_order_with_result", + return_value={ + "status": "canceled", + "type": "limit", + "side": "sell", + "amount": amount, + "remaining": 0.0, + "filled": amount, + "id": trade.orders[0].order_id, + }, + ) # check that the trade is called, which is done by ensuring exchange.cancel_order is called - msg = rpc._rpc_force_exit('4') - assert msg == {'result': 'Created exit order for trade 4.'} + msg = rpc._rpc_force_exit("4") + assert msg == {"result": "Created exit order for trade 4."} assert cancel_order_4.call_count == 1 assert cancel_order_mock.call_count == 0 assert pytest.approx(trade.amount) == amount def test_performance_handle(default_conf_usdt, ticker, fee, mocker) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), @@ -867,14 +878,13 @@ def test_performance_handle(default_conf_usdt, ticker, fee, mocker) -> None: res = rpc._rpc_performance() assert len(res) == 3 - assert res[0]['pair'] == 'NEO/USDT' - assert res[0]['count'] == 1 - assert res[0]['profit_pct'] == 5.0 + assert res[0]["pair"] == "NEO/USDT" + assert res[0]["count"] == 1 + assert res[0]["profit_pct"] == 5.0 def test_enter_tag_performance_handle(default_conf, ticker, fee, mocker) -> None: - - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), @@ -893,24 +903,21 @@ def test_enter_tag_performance_handle(default_conf, ticker, fee, mocker) -> None res = rpc._rpc_enter_tag_performance(None) assert len(res) == 3 - assert res[0]['enter_tag'] == 'TEST1' - assert res[0]['count'] == 1 - assert res[0]['profit_pct'] == 5.0 + assert res[0]["enter_tag"] == "TEST1" + assert res[0]["count"] == 1 + assert res[0]["profit_pct"] == 5.0 res = rpc._rpc_enter_tag_performance(None) assert len(res) == 3 - assert res[0]['enter_tag'] == 'TEST1' - assert res[0]['count'] == 1 - assert res[0]['profit_pct'] == 5.0 + assert res[0]["enter_tag"] == "TEST1" + assert res[0]["count"] == 1 + assert res[0]["profit_pct"] == 5.0 def test_enter_tag_performance_handle_2(mocker, default_conf, markets, fee): - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - markets=PropertyMock(return_value=markets) - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=markets)) freqtradebot = get_patched_freqtradebot(mocker, default_conf) create_mock_trades(fee) @@ -919,23 +926,23 @@ def test_enter_tag_performance_handle_2(mocker, default_conf, markets, fee): res = rpc._rpc_enter_tag_performance(None) assert len(res) == 2 - assert res[0]['enter_tag'] == 'TEST1' - assert res[0]['count'] == 1 - assert pytest.approx(res[0]['profit_pct']) == 0.5 - assert res[1]['enter_tag'] == 'Other' - assert res[1]['count'] == 1 - assert pytest.approx(res[1]['profit_pct']) == 1.0 + assert res[0]["enter_tag"] == "TEST1" + assert res[0]["count"] == 1 + assert pytest.approx(res[0]["profit_pct"]) == 0.5 + assert res[1]["enter_tag"] == "Other" + assert res[1]["count"] == 1 + assert pytest.approx(res[1]["profit_pct"]) == 1.0 # Test for a specific pair - res = rpc._rpc_enter_tag_performance('ETC/BTC') + res = rpc._rpc_enter_tag_performance("ETC/BTC") assert len(res) == 1 - assert res[0]['count'] == 1 - assert res[0]['enter_tag'] == 'TEST1' - assert pytest.approx(res[0]['profit_pct']) == 0.5 + assert res[0]["count"] == 1 + assert res[0]["enter_tag"] == "TEST1" + assert pytest.approx(res[0]["profit_pct"]) == 0.5 def test_exit_reason_performance_handle(default_conf_usdt, ticker, fee, mocker) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), @@ -953,20 +960,17 @@ def test_exit_reason_performance_handle(default_conf_usdt, ticker, fee, mocker) res = rpc._rpc_exit_reason_performance(None) assert len(res) == 3 - assert res[0]['exit_reason'] == 'exit_signal' - assert res[0]['count'] == 1 - assert res[0]['profit_pct'] == 5.0 + assert res[0]["exit_reason"] == "exit_signal" + assert res[0]["count"] == 1 + assert res[0]["profit_pct"] == 5.0 - assert res[1]['exit_reason'] == 'roi' - assert res[2]['exit_reason'] == 'Other' + assert res[1]["exit_reason"] == "roi" + assert res[2]["exit_reason"] == "Other" def test_exit_reason_performance_handle_2(mocker, default_conf, markets, fee): - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - markets=PropertyMock(return_value=markets) - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=markets)) freqtradebot = get_patched_freqtradebot(mocker, default_conf) create_mock_trades(fee) @@ -975,23 +979,23 @@ def test_exit_reason_performance_handle_2(mocker, default_conf, markets, fee): res = rpc._rpc_exit_reason_performance(None) assert len(res) == 2 - assert res[0]['exit_reason'] == 'sell_signal' - assert res[0]['count'] == 1 - assert pytest.approx(res[0]['profit_pct']) == 0.5 - assert res[1]['exit_reason'] == 'roi' - assert res[1]['count'] == 1 - assert pytest.approx(res[1]['profit_pct']) == 1.0 + assert res[0]["exit_reason"] == "sell_signal" + assert res[0]["count"] == 1 + assert pytest.approx(res[0]["profit_pct"]) == 0.5 + assert res[1]["exit_reason"] == "roi" + assert res[1]["count"] == 1 + assert pytest.approx(res[1]["profit_pct"]) == 1.0 # Test for a specific pair - res = rpc._rpc_exit_reason_performance('ETC/BTC') + res = rpc._rpc_exit_reason_performance("ETC/BTC") assert len(res) == 1 - assert res[0]['count'] == 1 - assert res[0]['exit_reason'] == 'sell_signal' - assert pytest.approx(res[0]['profit_pct']) == 0.5 + assert res[0]["count"] == 1 + assert res[0]["exit_reason"] == "sell_signal" + assert pytest.approx(res[0]["profit_pct"]) == 0.5 def test_mix_tag_performance_handle(default_conf, ticker, fee, mocker) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), @@ -1009,17 +1013,14 @@ def test_mix_tag_performance_handle(default_conf, ticker, fee, mocker) -> None: res = rpc._rpc_mix_tag_performance(None) assert len(res) == 3 - assert res[0]['mix_tag'] == 'TEST1 exit_signal' - assert res[0]['count'] == 1 - assert res[0]['profit_pct'] == 5.0 + assert res[0]["mix_tag"] == "TEST1 exit_signal" + assert res[0]["count"] == 1 + assert res[0]["profit_pct"] == 5.0 def test_mix_tag_performance_handle_2(mocker, default_conf, markets, fee): - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch.multiple( - EXMS, - markets=PropertyMock(return_value=markets) - ) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=markets)) freqtradebot = get_patched_freqtradebot(mocker, default_conf) create_mock_trades(fee) @@ -1028,24 +1029,24 @@ def test_mix_tag_performance_handle_2(mocker, default_conf, markets, fee): res = rpc._rpc_mix_tag_performance(None) assert len(res) == 2 - assert res[0]['mix_tag'] == 'TEST1 sell_signal' - assert res[0]['count'] == 1 - assert pytest.approx(res[0]['profit_pct']) == 0.5 - assert res[1]['mix_tag'] == 'Other roi' - assert res[1]['count'] == 1 - assert pytest.approx(res[1]['profit_pct']) == 1.0 + assert res[0]["mix_tag"] == "TEST1 sell_signal" + assert res[0]["count"] == 1 + assert pytest.approx(res[0]["profit_pct"]) == 0.5 + assert res[1]["mix_tag"] == "Other roi" + assert res[1]["count"] == 1 + assert pytest.approx(res[1]["profit_pct"]) == 1.0 # Test for a specific pair - res = rpc._rpc_mix_tag_performance('ETC/BTC') + res = rpc._rpc_mix_tag_performance("ETC/BTC") assert len(res) == 1 - assert res[0]['count'] == 1 - assert res[0]['mix_tag'] == 'TEST1 sell_signal' - assert pytest.approx(res[0]['profit_pct']) == 0.5 + assert res[0]["count"] == 1 + assert res[0]["mix_tag"] == "TEST1 sell_signal" + assert pytest.approx(res[0]["profit_pct"]) == 0.5 def test_rpc_count(mocker, default_conf, ticker, fee) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), @@ -1067,104 +1068,104 @@ def test_rpc_count(mocker, default_conf, ticker, fee) -> None: def test_rpc_force_entry(mocker, default_conf, ticker, fee, limit_buy_order_open) -> None: - default_conf['force_entry_enable'] = True - default_conf['max_open_trades'] = 0 - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + default_conf["force_entry_enable"] = True + default_conf["max_open_trades"] = 0 + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) buy_mm = MagicMock(return_value=limit_buy_order_open) mocker.patch.multiple( EXMS, get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - create_order=buy_mm + create_order=buy_mm, ) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - pair = 'ETH/BTC' - with pytest.raises(RPCException, match='Maximum number of trades is reached.'): + pair = "ETH/BTC" + with pytest.raises(RPCException, match="Maximum number of trades is reached."): rpc._rpc_force_entry(pair, None) - freqtradebot.config['max_open_trades'] = 5 + freqtradebot.config["max_open_trades"] = 5 trade = rpc._rpc_force_entry(pair, None) assert isinstance(trade, Trade) assert trade.pair == pair - assert trade.open_rate == ticker()['bid'] + assert trade.open_rate == ticker()["bid"] # Test buy duplicate - with pytest.raises(RPCException, match=r'position for ETH/BTC already open - id: 1'): + with pytest.raises(RPCException, match=r"position for ETH/BTC already open - id: 1"): rpc._rpc_force_entry(pair, 0.0001) - pair = 'XRP/BTC' - trade = rpc._rpc_force_entry(pair, 0.0001, order_type='limit') + pair = "XRP/BTC" + trade = rpc._rpc_force_entry(pair, 0.0001, order_type="limit") assert isinstance(trade, Trade) assert trade.pair == pair assert trade.open_rate == 0.0001 - with pytest.raises(RPCException, - match=r'Symbol does not exist or market is not active.'): - rpc._rpc_force_entry('LTC/NOTHING', 0.0001) + with pytest.raises(RPCException, match=r"Symbol does not exist or market is not active."): + rpc._rpc_force_entry("LTC/NOTHING", 0.0001) # Test buy pair not with stakes - with pytest.raises(RPCException, - match=r'Wrong pair selected. Only pairs with stake-currency.*'): - rpc._rpc_force_entry('LTC/ETH', 0.0001) + with pytest.raises( + RPCException, match=r"Wrong pair selected. Only pairs with stake-currency.*" + ): + rpc._rpc_force_entry("LTC/ETH", 0.0001) # Test with defined stake_amount - pair = 'LTC/BTC' - trade = rpc._rpc_force_entry(pair, 0.0001, order_type='limit', stake_amount=0.05) + pair = "LTC/BTC" + trade = rpc._rpc_force_entry(pair, 0.0001, order_type="limit", stake_amount=0.05) assert trade.stake_amount == 0.05 - assert trade.buy_tag == 'force_entry' + assert trade.buy_tag == "force_entry" - assert trade.open_orders_ids[-1] == 'mocked_limit_buy' + assert trade.open_orders_ids[-1] == "mocked_limit_buy" freqtradebot.strategy.position_adjustment_enable = True - with pytest.raises(RPCException, match=r'position for LTC/BTC already open.*open order.*'): - rpc._rpc_force_entry(pair, 0.0001, order_type='limit', stake_amount=0.05) + with pytest.raises(RPCException, match=r"position for LTC/BTC already open.*open order.*"): + rpc._rpc_force_entry(pair, 0.0001, order_type="limit", stake_amount=0.05) # Test not buying - pair = 'XRP/BTC' + pair = "XRP/BTC" freqtradebot = get_patched_freqtradebot(mocker, default_conf) - freqtradebot.config['stake_amount'] = 0 + freqtradebot.config["stake_amount"] = 0 patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - pair = 'TKN/BTC' + pair = "TKN/BTC" with pytest.raises(RPCException, match=r"Failed to enter position for TKN/BTC."): trade = rpc._rpc_force_entry(pair, None) def test_rpc_force_entry_stopped(mocker, default_conf) -> None: - default_conf['force_entry_enable'] = True - default_conf['initial_state'] = 'stopped' - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + default_conf["force_entry_enable"] = True + default_conf["initial_state"] = "stopped" + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - pair = 'ETH/BTC' - with pytest.raises(RPCException, match=r'trader is not running'): + pair = "ETH/BTC" + with pytest.raises(RPCException, match=r"trader is not running"): rpc._rpc_force_entry(pair, None) def test_rpc_force_entry_disabled(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - pair = 'ETH/BTC' - with pytest.raises(RPCException, match=r'Force_entry not enabled.'): + pair = "ETH/BTC" + with pytest.raises(RPCException, match=r"Force_entry not enabled."): rpc._rpc_force_entry(pair, None) def test_rpc_force_entry_wrong_mode(mocker, default_conf) -> None: - default_conf['force_entry_enable'] = True - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + default_conf["force_entry_enable"] = True + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) patch_get_signal(freqtradebot) rpc = RPC(freqtradebot) - pair = 'ETH/BTC' + pair = "ETH/BTC" with pytest.raises(RPCException, match="Can't go short on Spot markets."): rpc._rpc_force_entry(pair, None, order_side=SignalDirection.SHORT) @@ -1173,134 +1174,140 @@ def test_rpc_force_entry_wrong_mode(mocker, default_conf) -> None: def test_rpc_add_and_delete_lock(mocker, default_conf): freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) - pair = 'ETH/BTC' + pair = "ETH/BTC" - rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=4), '', '*') - rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=5), '', '*') - rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=10), '', '*') + rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=4), "", "*") + rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=5), "", "*") + rpc._rpc_add_lock(pair, datetime.now(timezone.utc) + timedelta(minutes=10), "", "*") locks = rpc._rpc_locks() - assert locks['lock_count'] == 3 - locks1 = rpc._rpc_delete_lock(lockid=locks['locks'][0]['id']) - assert locks1['lock_count'] == 2 + assert locks["lock_count"] == 3 + locks1 = rpc._rpc_delete_lock(lockid=locks["locks"][0]["id"]) + assert locks1["lock_count"] == 2 locks2 = rpc._rpc_delete_lock(pair=pair) - assert locks2['lock_count'] == 0 + assert locks2["lock_count"] == 0 def test_rpc_whitelist(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) ret = rpc._rpc_whitelist() - assert len(ret['method']) == 1 - assert 'StaticPairList' in ret['method'] - assert ret['whitelist'] == default_conf['exchange']['pair_whitelist'] + assert len(ret["method"]) == 1 + assert "StaticPairList" in ret["method"] + assert ret["whitelist"] == default_conf["exchange"]["pair_whitelist"] def test_rpc_whitelist_dynamic(mocker, default_conf) -> None: - default_conf['pairlists'] = [{'method': 'VolumePairList', - 'number_assets': 4, - }] - mocker.patch(f'{EXMS}.exchange_has', MagicMock(return_value=True)) - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + default_conf["pairlists"] = [ + { + "method": "VolumePairList", + "number_assets": 4, + } + ] + mocker.patch(f"{EXMS}.exchange_has", MagicMock(return_value=True)) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) ret = rpc._rpc_whitelist() - assert len(ret['method']) == 1 - assert 'VolumePairList' in ret['method'] - assert ret['length'] == 4 - assert ret['whitelist'] == default_conf['exchange']['pair_whitelist'] + assert len(ret["method"]) == 1 + assert "VolumePairList" in ret["method"] + assert ret["length"] == 4 + assert ret["whitelist"] == default_conf["exchange"]["pair_whitelist"] def test_rpc_blacklist(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) ret = rpc._rpc_blacklist(None) - assert len(ret['method']) == 1 - assert 'StaticPairList' in ret['method'] - assert len(ret['blacklist']) == 2 - assert ret['blacklist'] == default_conf['exchange']['pair_blacklist'] - assert ret['blacklist'] == ['DOGE/BTC', 'HOT/BTC'] + assert len(ret["method"]) == 1 + assert "StaticPairList" in ret["method"] + assert len(ret["blacklist"]) == 2 + assert ret["blacklist"] == default_conf["exchange"]["pair_blacklist"] + assert ret["blacklist"] == ["DOGE/BTC", "HOT/BTC"] ret = rpc._rpc_blacklist(["ETH/BTC"]) - assert 'StaticPairList' in ret['method'] - assert len(ret['blacklist']) == 3 - assert ret['blacklist'] == default_conf['exchange']['pair_blacklist'] - assert ret['blacklist'] == ['DOGE/BTC', 'HOT/BTC', 'ETH/BTC'] + assert "StaticPairList" in ret["method"] + assert len(ret["blacklist"]) == 3 + assert ret["blacklist"] == default_conf["exchange"]["pair_blacklist"] + assert ret["blacklist"] == ["DOGE/BTC", "HOT/BTC", "ETH/BTC"] ret = rpc._rpc_blacklist(["ETH/BTC"]) - assert 'errors' in ret - assert isinstance(ret['errors'], dict) - assert ret['errors']['ETH/BTC']['error_msg'] == 'Pair ETH/BTC already in pairlist.' + assert "errors" in ret + assert isinstance(ret["errors"], dict) + assert ret["errors"]["ETH/BTC"]["error_msg"] == "Pair ETH/BTC already in pairlist." ret = rpc._rpc_blacklist(["*/BTC"]) - assert 'StaticPairList' in ret['method'] - assert len(ret['blacklist']) == 3 - assert ret['blacklist'] == default_conf['exchange']['pair_blacklist'] - assert ret['blacklist'] == ['DOGE/BTC', 'HOT/BTC', 'ETH/BTC'] - assert ret['blacklist_expanded'] == ['ETH/BTC'] - assert 'errors' in ret - assert isinstance(ret['errors'], dict) - assert ret['errors'] == {'*/BTC': {'error_msg': 'Pair */BTC is not a valid wildcard.'}} + assert "StaticPairList" in ret["method"] + assert len(ret["blacklist"]) == 3 + assert ret["blacklist"] == default_conf["exchange"]["pair_blacklist"] + assert ret["blacklist"] == ["DOGE/BTC", "HOT/BTC", "ETH/BTC"] + assert ret["blacklist_expanded"] == ["ETH/BTC"] + assert "errors" in ret + assert isinstance(ret["errors"], dict) + assert ret["errors"] == {"*/BTC": {"error_msg": "Pair */BTC is not a valid wildcard."}} ret = rpc._rpc_blacklist(["XRP/.*"]) - assert 'StaticPairList' in ret['method'] - assert len(ret['blacklist']) == 4 - assert ret['blacklist'] == default_conf['exchange']['pair_blacklist'] - assert ret['blacklist'] == ['DOGE/BTC', 'HOT/BTC', 'ETH/BTC', 'XRP/.*'] - assert ret['blacklist_expanded'] == ['ETH/BTC', 'XRP/BTC', 'XRP/USDT'] - assert 'errors' in ret - assert isinstance(ret['errors'], dict) + assert "StaticPairList" in ret["method"] + assert len(ret["blacklist"]) == 4 + assert ret["blacklist"] == default_conf["exchange"]["pair_blacklist"] + assert ret["blacklist"] == ["DOGE/BTC", "HOT/BTC", "ETH/BTC", "XRP/.*"] + assert ret["blacklist_expanded"] == ["ETH/BTC", "XRP/BTC", "XRP/USDT"] + assert "errors" in ret + assert isinstance(ret["errors"], dict) - ret = rpc._rpc_blacklist_delete(["DOGE/BTC", 'HOT/BTC']) + ret = rpc._rpc_blacklist_delete(["DOGE/BTC", "HOT/BTC"]) - assert 'StaticPairList' in ret['method'] - assert len(ret['blacklist']) == 2 - assert ret['blacklist'] == default_conf['exchange']['pair_blacklist'] - assert ret['blacklist'] == ['ETH/BTC', 'XRP/.*'] - assert ret['blacklist_expanded'] == ['ETH/BTC', 'XRP/BTC', 'XRP/USDT'] - assert 'errors' in ret - assert isinstance(ret['errors'], dict) + assert "StaticPairList" in ret["method"] + assert len(ret["blacklist"]) == 2 + assert ret["blacklist"] == default_conf["exchange"]["pair_blacklist"] + assert ret["blacklist"] == ["ETH/BTC", "XRP/.*"] + assert ret["blacklist_expanded"] == ["ETH/BTC", "XRP/BTC", "XRP/USDT"] + assert "errors" in ret + assert isinstance(ret["errors"], dict) def test_rpc_edge_disabled(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(freqtradebot) - with pytest.raises(RPCException, match=r'Edge is not enabled.'): + with pytest.raises(RPCException, match=r"Edge is not enabled."): rpc._rpc_edge() def test_rpc_edge_enabled(mocker, edge_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.02, 0.66, 3.71, 0.50, 1.71, 10, 60), - } - )) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.02, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) freqtradebot = get_patched_freqtradebot(mocker, edge_conf) rpc = RPC(freqtradebot) ret = rpc._rpc_edge() assert len(ret) == 1 - assert ret[0]['Pair'] == 'E/F' - assert ret[0]['Winrate'] == 0.66 - assert ret[0]['Expectancy'] == 1.71 - assert ret[0]['Stoploss'] == -0.02 + assert ret[0]["Pair"] == "E/F" + assert ret[0]["Winrate"] == 0.66 + assert ret[0]["Expectancy"] == 1.71 + assert ret[0]["Stoploss"] == -0.02 def test_rpc_health(mocker, default_conf) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) set_startup_time() rpc = RPC(freqtradebot) result = rpc.health() - assert result['last_process'] is None - assert result['last_process_ts'] is None + assert result["last_process"] is None + assert result["last_process_ts"] is None diff --git a/tests/rpc/test_rpc_apiserver.py b/tests/rpc/test_rpc_apiserver.py index 5b189371a..ffb2408f1 100644 --- a/tests/rpc/test_rpc_apiserver.py +++ b/tests/rpc/test_rpc_apiserver.py @@ -1,6 +1,7 @@ """ Unit test file for rpc/api_server.py """ + import asyncio import logging import time @@ -30,8 +31,16 @@ from freqtrade.rpc.api_server.api_auth import create_token, get_user_from_token from freqtrade.rpc.api_server.uvicorn_threaded import UvicornServer from freqtrade.rpc.api_server.webserver_bgwork import ApiBG from freqtrade.util.datetime_helpers import format_date -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, create_mock_trades, get_mock_coro, - get_patched_freqtradebot, log_has, log_has_re, patch_get_signal) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + create_mock_trades, + get_mock_coro, + get_patched_freqtradebot, + log_has, + log_has_re, + patch_get_signal, +) BASE_URI = "/api/v1" @@ -44,19 +53,24 @@ _TEST_WS_TOKEN = "secret_Ws_t0ken" def botclient(default_conf, mocker): setup_logging_pre() setup_logging(default_conf) - default_conf['runmode'] = RunMode.DRY_RUN - default_conf.update({"api_server": {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "CORS_origins": ['http://example.com'], - "username": _TEST_USER, - "password": _TEST_PASS, - "ws_token": _TEST_WS_TOKEN - }}) + default_conf["runmode"] = RunMode.DRY_RUN + default_conf.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "CORS_origins": ["http://example.com"], + "username": _TEST_USER, + "password": _TEST_PASS, + "ws_token": _TEST_WS_TOKEN, + } + } + ) ftbot = get_patched_freqtradebot(mocker, default_conf) rpc = RPC(ftbot) - mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api', MagicMock()) + mocker.patch("freqtrade.rpc.api_server.ApiServer.start_api", MagicMock()) apiserver = None try: apiserver = ApiServer(default_conf) @@ -73,47 +87,61 @@ def botclient(default_conf, mocker): def client_post(client: TestClient, url, data=None): - if data is None: data = {} - return client.post(url, - json=data, - headers={'Authorization': _basic_auth_str(_TEST_USER, _TEST_PASS), - 'Origin': 'http://example.com', - 'content-type': 'application/json' - }) + return client.post( + url, + json=data, + headers={ + "Authorization": _basic_auth_str(_TEST_USER, _TEST_PASS), + "Origin": "http://example.com", + "content-type": "application/json", + }, + ) def client_patch(client: TestClient, url, data=None): - if data is None: data = {} - return client.patch(url, - json=data, - headers={'Authorization': _basic_auth_str(_TEST_USER, _TEST_PASS), - 'Origin': 'http://example.com', - 'content-type': 'application/json' - }) + return client.patch( + url, + json=data, + headers={ + "Authorization": _basic_auth_str(_TEST_USER, _TEST_PASS), + "Origin": "http://example.com", + "content-type": "application/json", + }, + ) def client_get(client: TestClient, url): # Add fake Origin to ensure CORS kicks in - return client.get(url, headers={'Authorization': _basic_auth_str(_TEST_USER, _TEST_PASS), - 'Origin': 'http://example.com'}) + return client.get( + url, + headers={ + "Authorization": _basic_auth_str(_TEST_USER, _TEST_PASS), + "Origin": "http://example.com", + }, + ) def client_delete(client: TestClient, url): # Add fake Origin to ensure CORS kicks in - return client.delete(url, headers={'Authorization': _basic_auth_str(_TEST_USER, _TEST_PASS), - 'Origin': 'http://example.com'}) + return client.delete( + url, + headers={ + "Authorization": _basic_auth_str(_TEST_USER, _TEST_PASS), + "Origin": "http://example.com", + }, + ) def assert_response(response, expected_code=200, needs_cors=True): assert response.status_code == expected_code - assert response.headers.get('content-type') == "application/json" + assert response.headers.get("content-type") == "application/json" if needs_cors: - assert ('access-control-allow-credentials', 'true') in response.headers.items() - assert ('access-control-allow-origin', 'http://example.com') in response.headers.items() + assert ("access-control-allow-credentials", "true") in response.headers.items() + assert ("access-control-allow-origin", "http://example.com") in response.headers.items() def test_api_not_found(botclient): @@ -132,7 +160,7 @@ def test_api_ui_fallback(botclient, mocker): rc = client_get(client, "/fallback_file.html") assert rc.status_code == 200 - assert '`freqtrade install-ui`' in rc.text + assert "`freqtrade install-ui`" in rc.text # Forwarded to fallback_html or index.html (depending if it's installed or not) rc = client_get(client, "/something") @@ -142,45 +170,45 @@ def test_api_ui_fallback(botclient, mocker): assert rc.status_code == 200 # Test directory traversal without mock - rc = client_get(client, '%2F%2F%2Fetc/passwd') + rc = client_get(client, "%2F%2F%2Fetc/passwd") assert rc.status_code == 200 # Allow both fallback or real UI - assert '`freqtrade install-ui`' in rc.text or '' in rc.text + assert "`freqtrade install-ui`" in rc.text or "" in rc.text - mocker.patch.object(Path, 'is_file', MagicMock(side_effect=[True, False])) - rc = client_get(client, '%2F%2F%2Fetc/passwd') + mocker.patch.object(Path, "is_file", MagicMock(side_effect=[True, False])) + rc = client_get(client, "%2F%2F%2Fetc/passwd") assert rc.status_code == 200 - assert '`freqtrade install-ui`' in rc.text + assert "`freqtrade install-ui`" in rc.text def test_api_ui_version(botclient, mocker): _ftbot, client = botclient - mocker.patch('freqtrade.commands.deploy_commands.read_ui_version', return_value='0.1.2') + mocker.patch("freqtrade.commands.deploy_commands.read_ui_version", return_value="0.1.2") rc = client_get(client, "/ui_version") assert rc.status_code == 200 - assert rc.json()['version'] == '0.1.2' + assert rc.json()["version"] == "0.1.2" def test_api_auth(): with pytest.raises(ValueError): - create_token({'identity': {'u': 'Freqtrade'}}, 'secret1234', token_type="NotATokenType") + create_token({"identity": {"u": "Freqtrade"}}, "secret1234", token_type="NotATokenType") - token = create_token({'identity': {'u': 'Freqtrade'}}, 'secret1234') + token = create_token({"identity": {"u": "Freqtrade"}}, "secret1234") assert isinstance(token, str) - u = get_user_from_token(token, 'secret1234') - assert u == 'Freqtrade' + u = get_user_from_token(token, "secret1234") + assert u == "Freqtrade" with pytest.raises(HTTPException): - get_user_from_token(token, 'secret1234', token_type='refresh') + get_user_from_token(token, "secret1234", token_type="refresh") # Create invalid token - token = create_token({'identity': {'u1': 'Freqrade'}}, 'secret1234') + token = create_token({"identity": {"u1": "Freqrade"}}, "secret1234") with pytest.raises(HTTPException): - get_user_from_token(token, 'secret1234') + get_user_from_token(token, "secret1234") with pytest.raises(HTTPException): - get_user_from_token(b'not_a_token', 'secret1234') + get_user_from_token(b"not_a_token", "secret1234") def test_api_ws_auth(botclient): @@ -198,8 +226,8 @@ def test_api_ws_auth(botclient): with client.websocket_connect(url(good_token)) as websocket: pass - jwt_secret = ftbot.config['api_server'].get('jwt_secret_key', 'super-secret') - jwt_token = create_token({'identity': {'u': 'Freqtrade'}}, jwt_secret) + jwt_secret = ftbot.config["api_server"].get("jwt_secret_key", "super-secret") + jwt_token = create_token({"identity": {"u": "Freqtrade"}}, jwt_secret) with client.websocket_connect(url(jwt_token)) as websocket: pass @@ -208,50 +236,58 @@ def test_api_unauthorized(botclient): ftbot, client = botclient rc = client.get(f"{BASE_URI}/ping") assert_response(rc, needs_cors=False) - assert rc.json() == {'status': 'pong'} + assert rc.json() == {"status": "pong"} # Don't send user/pass information rc = client.get(f"{BASE_URI}/version") assert_response(rc, 401, needs_cors=False) - assert rc.json() == {'detail': 'Unauthorized'} + assert rc.json() == {"detail": "Unauthorized"} # Change only username - ftbot.config['api_server']['username'] = 'Ftrader' + ftbot.config["api_server"]["username"] = "Ftrader" rc = client_get(client, f"{BASE_URI}/version") assert_response(rc, 401) - assert rc.json() == {'detail': 'Unauthorized'} + assert rc.json() == {"detail": "Unauthorized"} # Change only password - ftbot.config['api_server']['username'] = _TEST_USER - ftbot.config['api_server']['password'] = 'WrongPassword' + ftbot.config["api_server"]["username"] = _TEST_USER + ftbot.config["api_server"]["password"] = "WrongPassword" rc = client_get(client, f"{BASE_URI}/version") assert_response(rc, 401) - assert rc.json() == {'detail': 'Unauthorized'} + assert rc.json() == {"detail": "Unauthorized"} - ftbot.config['api_server']['username'] = 'Ftrader' - ftbot.config['api_server']['password'] = 'WrongPassword' + ftbot.config["api_server"]["username"] = "Ftrader" + ftbot.config["api_server"]["password"] = "WrongPassword" rc = client_get(client, f"{BASE_URI}/version") assert_response(rc, 401) - assert rc.json() == {'detail': 'Unauthorized'} + assert rc.json() == {"detail": "Unauthorized"} def test_api_token_login(botclient): _ftbot, client = botclient - rc = client.post(f"{BASE_URI}/token/login", - data=None, - headers={'Authorization': _basic_auth_str('WRONG_USER', 'WRONG_PASS'), - 'Origin': 'http://example.com'}) + rc = client.post( + f"{BASE_URI}/token/login", + data=None, + headers={ + "Authorization": _basic_auth_str("WRONG_USER", "WRONG_PASS"), + "Origin": "http://example.com", + }, + ) assert_response(rc, 401) rc = client_post(client, f"{BASE_URI}/token/login") assert_response(rc) - assert 'access_token' in rc.json() - assert 'refresh_token' in rc.json() + assert "access_token" in rc.json() + assert "refresh_token" in rc.json() # test Authentication is working with JWT tokens too - rc = client.get(f"{BASE_URI}/count", - headers={'Authorization': f'Bearer {rc.json()["access_token"]}', - 'Origin': 'http://example.com'}) + rc = client.get( + f"{BASE_URI}/count", + headers={ + "Authorization": f'Bearer {rc.json()["access_token"]}', + "Origin": "http://example.com", + }, + ) assert_response(rc) @@ -259,13 +295,17 @@ def test_api_token_refresh(botclient): _ftbot, client = botclient rc = client_post(client, f"{BASE_URI}/token/login") assert_response(rc) - rc = client.post(f"{BASE_URI}/token/refresh", - data=None, - headers={'Authorization': f'Bearer {rc.json()["refresh_token"]}', - 'Origin': 'http://example.com'}) + rc = client.post( + f"{BASE_URI}/token/refresh", + data=None, + headers={ + "Authorization": f'Bearer {rc.json()["refresh_token"]}', + "Origin": "http://example.com", + }, + ) assert_response(rc) - assert 'access_token' in rc.json() - assert 'refresh_token' not in rc.json() + assert "access_token" in rc.json() + assert "refresh_token" not in rc.json() def test_api_stop_workflow(botclient): @@ -273,38 +313,43 @@ def test_api_stop_workflow(botclient): assert ftbot.state == State.RUNNING rc = client_post(client, f"{BASE_URI}/stop") assert_response(rc) - assert rc.json() == {'status': 'stopping trader ...'} + assert rc.json() == {"status": "stopping trader ..."} assert ftbot.state == State.STOPPED # Stop bot again rc = client_post(client, f"{BASE_URI}/stop") assert_response(rc) - assert rc.json() == {'status': 'already stopped'} + assert rc.json() == {"status": "already stopped"} # Start bot rc = client_post(client, f"{BASE_URI}/start") assert_response(rc) - assert rc.json() == {'status': 'starting trader ...'} + assert rc.json() == {"status": "starting trader ..."} assert ftbot.state == State.RUNNING # Call start again rc = client_post(client, f"{BASE_URI}/start") assert_response(rc) - assert rc.json() == {'status': 'already running'} + assert rc.json() == {"status": "already running"} def test_api__init__(default_conf, mocker): """ Test __init__() method """ - default_conf.update({"api_server": {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "username": "TestUser", - "password": "testPass", - }}) - mocker.patch('freqtrade.rpc.telegram.Telegram._init') - mocker.patch('freqtrade.rpc.api_server.webserver.ApiServer.start_api', MagicMock()) + default_conf.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "username": "TestUser", + "password": "testPass", + } + } + ) + mocker.patch("freqtrade.rpc.telegram.Telegram._init") + mocker.patch("freqtrade.rpc.api_server.webserver.ApiServer.start_api", MagicMock()) apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) assert apiserver._config == default_conf @@ -316,8 +361,8 @@ def test_api__init__(default_conf, mocker): def test_api_UvicornServer(mocker): - thread_mock = mocker.patch('freqtrade.rpc.api_server.uvicorn_threaded.threading.Thread') - s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host='127.0.0.1')) + thread_mock = mocker.patch("freqtrade.rpc.api_server.uvicorn_threaded.threading.Thread") + s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host="127.0.0.1")) assert thread_mock.call_count == 0 # Fake started to avoid sleeping forever @@ -330,9 +375,10 @@ def test_api_UvicornServer(mocker): def test_api_UvicornServer_run(mocker): - serve_mock = mocker.patch('freqtrade.rpc.api_server.uvicorn_threaded.UvicornServer.serve', - get_mock_coro(None)) - s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host='127.0.0.1')) + serve_mock = mocker.patch( + "freqtrade.rpc.api_server.uvicorn_threaded.UvicornServer.serve", get_mock_coro(None) + ) + s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host="127.0.0.1")) assert serve_mock.call_count == 0 # Fake started to avoid sleeping forever @@ -342,10 +388,11 @@ def test_api_UvicornServer_run(mocker): def test_api_UvicornServer_run_no_uvloop(mocker, import_fails): - serve_mock = mocker.patch('freqtrade.rpc.api_server.uvicorn_threaded.UvicornServer.serve', - get_mock_coro(None)) + serve_mock = mocker.patch( + "freqtrade.rpc.api_server.uvicorn_threaded.UvicornServer.serve", get_mock_coro(None) + ) asyncio.set_event_loop(asyncio.new_event_loop()) - s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host='127.0.0.1')) + s = UvicornServer(uvicorn.Config(MagicMock(), port=8080, host="127.0.0.1")) assert serve_mock.call_count == 0 # Fake started to avoid sleeping forever @@ -355,19 +402,24 @@ def test_api_UvicornServer_run_no_uvloop(mocker, import_fails): def test_api_run(default_conf, mocker, caplog): - default_conf.update({"api_server": {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "username": "TestUser", - "password": "testPass", - }}) - mocker.patch('freqtrade.rpc.telegram.Telegram._init') + default_conf.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "username": "TestUser", + "password": "testPass", + } + } + ) + mocker.patch("freqtrade.rpc.telegram.Telegram._init") server_inst_mock = MagicMock() server_inst_mock.run_in_thread = MagicMock() server_inst_mock.run = MagicMock() server_mock = MagicMock(return_value=server_inst_mock) - mocker.patch('freqtrade.rpc.api_server.webserver.UvicornServer', server_mock) + mocker.patch("freqtrade.rpc.api_server.webserver.UvicornServer", server_mock) apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) @@ -388,11 +440,16 @@ def test_api_run(default_conf, mocker, caplog): # Test binding to public caplog.clear() server_mock.reset_mock() - apiserver._config.update({"api_server": {"enabled": True, - "listen_ip_address": "0.0.0.0", - "listen_port": 8089, - "password": "", - }}) + apiserver._config.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "0.0.0.0", + "listen_port": 8089, + "password": "", + } + } + ) apiserver.start_api() assert server_mock.call_count == 1 @@ -403,12 +460,17 @@ def test_api_run(default_conf, mocker, caplog): assert isinstance(server_mock.call_args_list[0][0][0].app, FastAPI) assert log_has("Starting HTTP Server at 0.0.0.0:8089", caplog) assert log_has("Starting Local Rest Server.", caplog) - assert log_has("SECURITY WARNING - Local Rest Server listening to external connections", - caplog) - assert log_has("SECURITY WARNING - This is insecure please set to your loopback," - "e.g 127.0.0.1 in config.json", caplog) - assert log_has("SECURITY WARNING - No password for local REST Server defined. " - "Please make sure that this is intentional!", caplog) + assert log_has("SECURITY WARNING - Local Rest Server listening to external connections", caplog) + assert log_has( + "SECURITY WARNING - This is insecure please set to your loopback," + "e.g 127.0.0.1 in config.json", + caplog, + ) + assert log_has( + "SECURITY WARNING - No password for local REST Server defined. " + "Please make sure that this is intentional!", + caplog, + ) assert log_has_re("SECURITY WARNING - `jwt_secret_key` seems to be default.*", caplog) server_mock.reset_mock() @@ -424,8 +486,9 @@ def test_api_run(default_conf, mocker, caplog): # Test crashing API server caplog.clear() - mocker.patch('freqtrade.rpc.api_server.webserver.UvicornServer', - MagicMock(side_effect=Exception)) + mocker.patch( + "freqtrade.rpc.api_server.webserver.UvicornServer", MagicMock(side_effect=Exception) + ) apiserver.start_api() assert log_has("Api server failed to start.", caplog) apiserver.cleanup() @@ -433,17 +496,22 @@ def test_api_run(default_conf, mocker, caplog): def test_api_cleanup(default_conf, mocker, caplog): - default_conf.update({"api_server": {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "username": "TestUser", - "password": "testPass", - }}) - mocker.patch('freqtrade.rpc.telegram.Telegram._init') + default_conf.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "username": "TestUser", + "password": "testPass", + } + } + ) + mocker.patch("freqtrade.rpc.telegram.Telegram._init") server_mock = MagicMock() server_mock.cleanup = MagicMock() - mocker.patch('freqtrade.rpc.api_server.webserver.UvicornServer', server_mock) + mocker.patch("freqtrade.rpc.api_server.webserver.UvicornServer", server_mock) apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) @@ -459,35 +527,36 @@ def test_api_reloadconf(botclient): rc = client_post(client, f"{BASE_URI}/reload_config") assert_response(rc) - assert rc.json() == {'status': 'Reloading config ...'} + assert rc.json() == {"status": "Reloading config ..."} assert ftbot.state == State.RELOAD_CONFIG def test_api_stopentry(botclient): ftbot, client = botclient - assert ftbot.config['max_open_trades'] != 0 + assert ftbot.config["max_open_trades"] != 0 rc = client_post(client, f"{BASE_URI}/stopbuy") assert_response(rc) assert rc.json() == { - 'status': 'No more entries will occur from now. Run /reload_config to reset.'} - assert ftbot.config['max_open_trades'] == 0 + "status": "No more entries will occur from now. Run /reload_config to reset." + } + assert ftbot.config["max_open_trades"] == 0 rc = client_post(client, f"{BASE_URI}/stopentry") assert_response(rc) assert rc.json() == { - 'status': 'No more entries will occur from now. Run /reload_config to reset.'} - assert ftbot.config['max_open_trades'] == 0 + "status": "No more entries will occur from now. Run /reload_config to reset." + } + assert ftbot.config["max_open_trades"] == 0 def test_api_balance(botclient, mocker, rpc_balance, tickers): ftbot, client = botclient - ftbot.config['dry_run'] = False - mocker.patch(f'{EXMS}.get_balances', return_value=rpc_balance) - mocker.patch(f'{EXMS}.get_tickers', tickers) - mocker.patch(f'{EXMS}.get_valid_pair_combination', - side_effect=lambda a, b: f"{a}/{b}") + ftbot.config["dry_run"] = False + mocker.patch(f"{EXMS}.get_balances", return_value=rpc_balance) + mocker.patch(f"{EXMS}.get_tickers", tickers) + mocker.patch(f"{EXMS}.get_valid_pair_combination", side_effect=lambda a, b: f"{a}/{b}") ftbot.wallets.update() rc = client_get(client, f"{BASE_URI}/balance") @@ -495,30 +564,30 @@ def test_api_balance(botclient, mocker, rpc_balance, tickers): response = rc.json() assert "currencies" in response assert len(response["currencies"]) == 5 - assert response['currencies'][0] == { - 'currency': 'BTC', - 'free': 12.0, - 'balance': 12.0, - 'used': 0.0, - 'bot_owned': pytest.approx(11.879999), - 'est_stake': 12.0, - 'est_stake_bot': pytest.approx(11.879999), - 'stake': 'BTC', - 'is_position': False, - 'leverage': 1.0, - 'position': 0.0, - 'side': 'long', - 'is_bot_managed': True, + assert response["currencies"][0] == { + "currency": "BTC", + "free": 12.0, + "balance": 12.0, + "used": 0.0, + "bot_owned": pytest.approx(11.879999), + "est_stake": 12.0, + "est_stake_bot": pytest.approx(11.879999), + "stake": "BTC", + "is_position": False, + "leverage": 1.0, + "position": 0.0, + "side": "long", + "is_bot_managed": True, } - assert response['total'] == 12.159513094 - assert response['total_bot'] == pytest.approx(11.879999) - assert 'starting_capital' in response - assert 'starting_capital_fiat' in response - assert 'starting_capital_pct' in response - assert 'starting_capital_ratio' in response + assert response["total"] == 12.159513094 + assert response["total_bot"] == pytest.approx(11.879999) + assert "starting_capital" in response + assert "starting_capital_fiat" in response + assert "starting_capital_pct" in response + assert "starting_capital_ratio" in response -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_count(botclient, mocker, ticker, fee, markets, is_short): ftbot, client = botclient patch_get_signal(ftbot) @@ -527,7 +596,7 @@ def test_api_count(botclient, mocker, ticker, fee, markets, is_short): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) rc = client_get(client, f"{BASE_URI}/count") assert_response(rc) @@ -542,7 +611,7 @@ def test_api_count(botclient, mocker, ticker, fee, markets, is_short): assert rc.json()["current"] == 4 assert rc.json()["max"] == 1 - ftbot.config['max_open_trades'] = float('inf') + ftbot.config["max_open_trades"] = float("inf") rc = client_get(client, f"{BASE_URI}/count") assert rc.json()["max"] == -1 @@ -553,43 +622,47 @@ def test_api_locks(botclient): rc = client_get(client, f"{BASE_URI}/locks") assert_response(rc) - assert 'locks' in rc.json() + assert "locks" in rc.json() - assert rc.json()['lock_count'] == 0 - assert rc.json()['lock_count'] == len(rc.json()['locks']) + assert rc.json()["lock_count"] == 0 + assert rc.json()["lock_count"] == len(rc.json()["locks"]) - rc = client_post(client, f"{BASE_URI}/locks", [ - { - "pair": "ETH/BTC", - "until": f"{format_date(datetime.now(timezone.utc) + timedelta(minutes=4))}Z", - "reason": "randreason" - }, { - "pair": "XRP/BTC", - "until": f"{format_date(datetime.now(timezone.utc) + timedelta(minutes=20))}Z", - "reason": "deadbeef" - } - ]) + rc = client_post( + client, + f"{BASE_URI}/locks", + [ + { + "pair": "ETH/BTC", + "until": f"{format_date(datetime.now(timezone.utc) + timedelta(minutes=4))}Z", + "reason": "randreason", + }, + { + "pair": "XRP/BTC", + "until": f"{format_date(datetime.now(timezone.utc) + timedelta(minutes=20))}Z", + "reason": "deadbeef", + }, + ], + ) assert_response(rc) - assert rc.json()['lock_count'] == 2 + assert rc.json()["lock_count"] == 2 rc = client_get(client, f"{BASE_URI}/locks") assert_response(rc) - assert rc.json()['lock_count'] == 2 - assert rc.json()['lock_count'] == len(rc.json()['locks']) - assert 'ETH/BTC' in (rc.json()['locks'][0]['pair'], rc.json()['locks'][1]['pair']) - assert 'randreason' in (rc.json()['locks'][0]['reason'], rc.json()['locks'][1]['reason']) - assert 'deadbeef' in (rc.json()['locks'][0]['reason'], rc.json()['locks'][1]['reason']) + assert rc.json()["lock_count"] == 2 + assert rc.json()["lock_count"] == len(rc.json()["locks"]) + assert "ETH/BTC" in (rc.json()["locks"][0]["pair"], rc.json()["locks"][1]["pair"]) + assert "randreason" in (rc.json()["locks"][0]["reason"], rc.json()["locks"][1]["reason"]) + assert "deadbeef" in (rc.json()["locks"][0]["reason"], rc.json()["locks"][1]["reason"]) # Test deletions rc = client_delete(client, f"{BASE_URI}/locks/1") assert_response(rc) - assert rc.json()['lock_count'] == 1 + assert rc.json()["lock_count"] == 1 - rc = client_post(client, f"{BASE_URI}/locks/delete", - data={"pair": "XRP/BTC"}) + rc = client_post(client, f"{BASE_URI}/locks/delete", data={"pair": "XRP/BTC"}) assert_response(rc) - assert rc.json()['lock_count'] == 0 + assert rc.json()["lock_count"] == 0 def test_api_show_config(botclient): @@ -599,22 +672,22 @@ def test_api_show_config(botclient): rc = client_get(client, f"{BASE_URI}/show_config") assert_response(rc) response = rc.json() - assert 'dry_run' in response - assert response['exchange'] == 'binance' - assert response['timeframe'] == '5m' - assert response['timeframe_ms'] == 300000 - assert response['timeframe_min'] == 5 - assert response['state'] == 'running' - assert response['bot_name'] == 'freqtrade' - assert response['trading_mode'] == 'spot' - assert response['strategy_version'] is None - assert not response['trailing_stop'] - assert 'entry_pricing' in response - assert 'exit_pricing' in response - assert 'unfilledtimeout' in response - assert 'version' in response - assert 'api_version' in response - assert 2.1 <= response['api_version'] < 3.0 + assert "dry_run" in response + assert response["exchange"] == "binance" + assert response["timeframe"] == "5m" + assert response["timeframe_ms"] == 300000 + assert response["timeframe_min"] == 5 + assert response["state"] == "running" + assert response["bot_name"] == "freqtrade" + assert response["trading_mode"] == "spot" + assert response["strategy_version"] is None + assert not response["trailing_stop"] + assert "entry_pricing" in response + assert "exit_pricing" in response + assert "unfilledtimeout" in response + assert "version" in response + assert "api_version" in response + assert 2.1 <= response["api_version"] < 3.0 def test_api_daily(botclient, mocker, ticker, fee, markets): @@ -625,14 +698,14 @@ def test_api_daily(botclient, mocker, ticker, fee, markets): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) rc = client_get(client, f"{BASE_URI}/daily") assert_response(rc) - assert len(rc.json()['data']) == 7 - assert rc.json()['stake_currency'] == 'BTC' - assert rc.json()['fiat_display_currency'] == 'USD' - assert rc.json()['data'][0]['date'] == str(datetime.now(timezone.utc).date()) + assert len(rc.json()["data"]) == 7 + assert rc.json()["stake_currency"] == "BTC" + assert rc.json()["fiat_display_currency"] == "USD" + assert rc.json()["data"][0]["date"] == str(datetime.now(timezone.utc).date()) def test_api_weekly(botclient, mocker, ticker, fee, markets, time_machine): @@ -643,17 +716,17 @@ def test_api_weekly(botclient, mocker, ticker, fee, markets, time_machine): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) time_machine.move_to("2023-03-31 21:45:05 +00:00") rc = client_get(client, f"{BASE_URI}/weekly") assert_response(rc) - assert len(rc.json()['data']) == 4 - assert rc.json()['stake_currency'] == 'BTC' - assert rc.json()['fiat_display_currency'] == 'USD' + assert len(rc.json()["data"]) == 4 + assert rc.json()["stake_currency"] == "BTC" + assert rc.json()["fiat_display_currency"] == "USD" # Moved to monday - assert rc.json()['data'][0]['date'] == '2023-03-27' - assert rc.json()['data'][1]['date'] == '2023-03-20' + assert rc.json()["data"][0]["date"] == "2023-03-27" + assert rc.json()["data"][1]["date"] == "2023-03-20" def test_api_monthly(botclient, mocker, ticker, fee, markets, time_machine): @@ -664,50 +737,47 @@ def test_api_monthly(botclient, mocker, ticker, fee, markets, time_machine): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) time_machine.move_to("2023-03-31 21:45:05 +00:00") rc = client_get(client, f"{BASE_URI}/monthly") assert_response(rc) - assert len(rc.json()['data']) == 3 - assert rc.json()['stake_currency'] == 'BTC' - assert rc.json()['fiat_display_currency'] == 'USD' - assert rc.json()['data'][0]['date'] == '2023-03-01' - assert rc.json()['data'][1]['date'] == '2023-02-01' + assert len(rc.json()["data"]) == 3 + assert rc.json()["stake_currency"] == "BTC" + assert rc.json()["fiat_display_currency"] == "USD" + assert rc.json()["data"][0]["date"] == "2023-03-01" + assert rc.json()["data"][1]["date"] == "2023-02-01" -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_trades(botclient, mocker, fee, markets, is_short): ftbot, client = botclient patch_get_signal(ftbot) - mocker.patch.multiple( - EXMS, - markets=PropertyMock(return_value=markets) - ) + mocker.patch.multiple(EXMS, markets=PropertyMock(return_value=markets)) rc = client_get(client, f"{BASE_URI}/trades") assert_response(rc) assert len(rc.json()) == 4 - assert rc.json()['trades_count'] == 0 - assert rc.json()['total_trades'] == 0 - assert rc.json()['offset'] == 0 + assert rc.json()["trades_count"] == 0 + assert rc.json()["total_trades"] == 0 + assert rc.json()["offset"] == 0 create_mock_trades(fee, is_short=is_short) Trade.session.flush() rc = client_get(client, f"{BASE_URI}/trades") assert_response(rc) - assert len(rc.json()['trades']) == 2 - assert rc.json()['trades_count'] == 2 - assert rc.json()['total_trades'] == 2 - assert rc.json()['trades'][0]['is_short'] == is_short + assert len(rc.json()["trades"]) == 2 + assert rc.json()["trades_count"] == 2 + assert rc.json()["total_trades"] == 2 + assert rc.json()["trades"][0]["is_short"] == is_short rc = client_get(client, f"{BASE_URI}/trades?limit=1") assert_response(rc) - assert len(rc.json()['trades']) == 1 - assert rc.json()['trades_count'] == 1 - assert rc.json()['total_trades'] == 2 + assert len(rc.json()["trades"]) == 1 + assert rc.json()["trades_count"] == 1 + assert rc.json()["total_trades"] == 2 -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_trade_single(botclient, mocker, fee, ticker, markets, is_short): ftbot, client = botclient patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short) @@ -718,18 +788,18 @@ def test_api_trade_single(botclient, mocker, fee, ticker, markets, is_short): ) rc = client_get(client, f"{BASE_URI}/trade/3") assert_response(rc, 404) - assert rc.json()['detail'] == 'Trade not found.' + assert rc.json()["detail"] == "Trade not found." Trade.rollback() create_mock_trades(fee, is_short=is_short) rc = client_get(client, f"{BASE_URI}/trade/3") assert_response(rc) - assert rc.json()['trade_id'] == 3 - assert rc.json()['is_short'] == is_short + assert rc.json()["trade_id"] == 3 + assert rc.json()["is_short"] == is_short -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_delete_trade(botclient, mocker, fee, markets, is_short): ftbot, client = botclient patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short) @@ -744,14 +814,14 @@ def test_api_delete_trade(botclient, mocker, fee, markets, is_short): create_mock_trades(fee, is_short=is_short) - ftbot.strategy.order_types['stoploss_on_exchange'] = True + ftbot.strategy.order_types["stoploss_on_exchange"] = True trades = Trade.session.scalars(select(Trade)).all() Trade.commit() assert len(trades) > 2 rc = client_delete(client, f"{BASE_URI}/trades/1") assert_response(rc) - assert rc.json()['result_msg'] == 'Deleted trade 1. Closed 1 open orders.' + assert rc.json()["result_msg"] == "Deleted trade 1. Closed 1 open orders." assert len(trades) - 1 == len(Trade.session.scalars(select(Trade)).all()) assert cancel_mock.call_count == 1 @@ -764,7 +834,7 @@ def test_api_delete_trade(botclient, mocker, fee, markets, is_short): assert len(trades) - 1 == len(Trade.session.scalars(select(Trade)).all()) rc = client_delete(client, f"{BASE_URI}/trades/5") assert_response(rc) - assert rc.json()['result_msg'] == 'Deleted trade 5. Closed 1 open orders.' + assert rc.json()["result_msg"] == "Deleted trade 5. Closed 1 open orders." assert len(trades) - 2 == len(Trade.session.scalars(select(Trade)).all()) assert stoploss_mock.call_count == 1 @@ -773,7 +843,7 @@ def test_api_delete_trade(botclient, mocker, fee, markets, is_short): assert_response(rc, 502) -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_delete_open_order(botclient, mocker, fee, markets, ticker, is_short): ftbot, client = botclient patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short) @@ -789,29 +859,29 @@ def test_api_delete_open_order(botclient, mocker, fee, markets, ticker, is_short rc = client_delete(client, f"{BASE_URI}/trades/10/open-order") assert_response(rc, 502) - assert 'Invalid trade_id.' in rc.json()['error'] + assert "Invalid trade_id." in rc.json()["error"] create_mock_trades(fee, is_short=is_short) Trade.commit() rc = client_delete(client, f"{BASE_URI}/trades/5/open-order") assert_response(rc, 502) - assert 'No open order for trade_id' in rc.json()['error'] + assert "No open order for trade_id" in rc.json()["error"] trade = Trade.get_trades([Trade.id == 6]).first() - mocker.patch(f'{EXMS}.fetch_order', side_effect=ExchangeError) + mocker.patch(f"{EXMS}.fetch_order", side_effect=ExchangeError) rc = client_delete(client, f"{BASE_URI}/trades/6/open-order") assert_response(rc, 502) - assert 'Order not found.' in rc.json()['error'] + assert "Order not found." in rc.json()["error"] trade = Trade.get_trades([Trade.id == 6]).first() - mocker.patch(f'{EXMS}.fetch_order', return_value=trade.orders[-1].to_ccxt_object()) + mocker.patch(f"{EXMS}.fetch_order", return_value=trade.orders[-1].to_ccxt_object()) rc = client_delete(client, f"{BASE_URI}/trades/6/open-order") assert_response(rc) assert cancel_mock.call_count == 1 -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_trade_reload_trade(botclient, mocker, fee, markets, ticker, is_short): ftbot, client = botclient patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short) @@ -828,7 +898,7 @@ def test_api_trade_reload_trade(botclient, mocker, fee, markets, ticker, is_shor rc = client_post(client, f"{BASE_URI}/trades/10/reload") assert_response(rc, 502) - assert 'Could not find trade with id 10.' in rc.json()['error'] + assert "Could not find trade with id 10." in rc.json()["error"] assert ftbot.handle_onexchange_order.call_count == 0 create_mock_trades(fee, is_short=is_short) @@ -843,31 +913,31 @@ def test_api_logs(botclient): rc = client_get(client, f"{BASE_URI}/logs") assert_response(rc) assert len(rc.json()) == 2 - assert 'logs' in rc.json() + assert "logs" in rc.json() # Using a fixed comparison here would make this test fail! - assert rc.json()['log_count'] > 1 - assert len(rc.json()['logs']) == rc.json()['log_count'] + assert rc.json()["log_count"] > 1 + assert len(rc.json()["logs"]) == rc.json()["log_count"] - assert isinstance(rc.json()['logs'][0], list) + assert isinstance(rc.json()["logs"][0], list) # date - assert isinstance(rc.json()['logs'][0][0], str) + assert isinstance(rc.json()["logs"][0][0], str) # created_timestamp - assert isinstance(rc.json()['logs'][0][1], float) - assert isinstance(rc.json()['logs'][0][2], str) - assert isinstance(rc.json()['logs'][0][3], str) - assert isinstance(rc.json()['logs'][0][4], str) + assert isinstance(rc.json()["logs"][0][1], float) + assert isinstance(rc.json()["logs"][0][2], str) + assert isinstance(rc.json()["logs"][0][3], str) + assert isinstance(rc.json()["logs"][0][4], str) rc1 = client_get(client, f"{BASE_URI}/logs?limit=5") assert_response(rc1) assert len(rc1.json()) == 2 - assert 'logs' in rc1.json() + assert "logs" in rc1.json() # Using a fixed comparison here would make this test fail! - if rc1.json()['log_count'] < 5: + if rc1.json()["log_count"] < 5: # Help debugging random test failure print(f"rc={rc.json()}") print(f"rc1={rc1.json()}") - assert rc1.json()['log_count'] > 2 - assert len(rc1.json()['logs']) == rc1.json()['log_count'] + assert rc1.json()["log_count"] > 2 + assert len(rc1.json()["logs"]) == rc1.json()["log_count"] def test_api_edge_disabled(botclient, mocker, ticker, fee, markets): @@ -878,64 +948,111 @@ def test_api_edge_disabled(botclient, mocker, ticker, fee, markets): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) rc = client_get(client, f"{BASE_URI}/edge") assert_response(rc, 502) assert rc.json() == {"error": "Error querying /api/v1/edge: Edge is not enabled."} -@pytest.mark.parametrize('is_short,expected', [ - ( - True, - {'best_pair': 'ETC/BTC', 'best_rate': -0.5, 'best_pair_profit_ratio': -0.005, - 'profit_all_coin': 45.561959, - 'profit_all_fiat': 562462.39126200, 'profit_all_percent_mean': 66.41, - 'profit_all_ratio_mean': 0.664109545, 'profit_all_percent_sum': 398.47, - 'profit_all_ratio_sum': 3.98465727, 'profit_all_percent': 4.56, - 'profit_all_ratio': 0.04556147, 'profit_closed_coin': -0.00673913, - 'profit_closed_fiat': -83.19455985, 'profit_closed_ratio_mean': -0.0075, - 'profit_closed_percent_mean': -0.75, 'profit_closed_ratio_sum': -0.015, - 'profit_closed_percent_sum': -1.5, 'profit_closed_ratio': -6.739057628404269e-06, - 'profit_closed_percent': -0.0, 'winning_trades': 0, 'losing_trades': 2, - 'profit_factor': 0.0, 'winrate': 0.0, 'expectancy': -0.0033695635, - 'expectancy_ratio': -1.0, 'trading_volume': 75.945, - } - ), - ( - False, - {'best_pair': 'XRP/BTC', 'best_rate': 1.0, 'best_pair_profit_ratio': 0.01, - 'profit_all_coin': -45.79641127, - 'profit_all_fiat': -565356.69712815, 'profit_all_percent_mean': -66.41, - 'profit_all_ratio_mean': -0.6641100666666667, 'profit_all_percent_sum': -398.47, - 'profit_all_ratio_sum': -3.9846604, 'profit_all_percent': -4.58, - 'profit_all_ratio': -0.045796261934205953, 'profit_closed_coin': 0.00073913, - 'profit_closed_fiat': 9.124559849999999, 'profit_closed_ratio_mean': 0.0075, - 'profit_closed_percent_mean': 0.75, 'profit_closed_ratio_sum': 0.015, - 'profit_closed_percent_sum': 1.5, 'profit_closed_ratio': 7.391275897987988e-07, - 'profit_closed_percent': 0.0, 'winning_trades': 2, 'losing_trades': 0, - 'profit_factor': None, 'winrate': 1.0, 'expectancy': 0.0003695635, - 'expectancy_ratio': 100, 'trading_volume': 75.945, - } - ), - ( - None, - {'best_pair': 'XRP/BTC', 'best_rate': 1.0, 'best_pair_profit_ratio': 0.01, - 'profit_all_coin': -14.94732578, - 'profit_all_fiat': -184524.7367541, 'profit_all_percent_mean': 0.08, - 'profit_all_ratio_mean': 0.000835751666666662, 'profit_all_percent_sum': 0.5, - 'profit_all_ratio_sum': 0.005014509999999972, 'profit_all_percent': -1.49, - 'profit_all_ratio': -0.014947184841095841, 'profit_closed_coin': -0.00542913, - 'profit_closed_fiat': -67.02260985, 'profit_closed_ratio_mean': 0.0025, - 'profit_closed_percent_mean': 0.25, 'profit_closed_ratio_sum': 0.005, - 'profit_closed_percent_sum': 0.5, 'profit_closed_ratio': -5.429078808526421e-06, - 'profit_closed_percent': -0.0, 'winning_trades': 1, 'losing_trades': 1, - 'profit_factor': 0.02775724835771106, 'winrate': 0.5, - 'expectancy': -0.0027145635000000003, 'expectancy_ratio': -0.48612137582114445, - 'trading_volume': 75.945, - } - ) -]) +@pytest.mark.parametrize( + "is_short,expected", + [ + ( + True, + { + "best_pair": "ETC/BTC", + "best_rate": -0.5, + "best_pair_profit_ratio": -0.005, + "profit_all_coin": 15.382312, + "profit_all_fiat": 189894.6470718, + "profit_all_percent_mean": 49.62, + "profit_all_ratio_mean": 0.49620917, + "profit_all_percent_sum": 198.48, + "profit_all_ratio_sum": 1.98483671, + "profit_all_percent": 1.54, + "profit_all_ratio": 0.01538214, + "profit_closed_coin": -0.00673913, + "profit_closed_fiat": -83.19455985, + "profit_closed_ratio_mean": -0.0075, + "profit_closed_percent_mean": -0.75, + "profit_closed_ratio_sum": -0.015, + "profit_closed_percent_sum": -1.5, + "profit_closed_ratio": -6.739057628404269e-06, + "profit_closed_percent": -0.0, + "winning_trades": 0, + "losing_trades": 2, + "profit_factor": 0.0, + "winrate": 0.0, + "expectancy": -0.0033695635, + "expectancy_ratio": -1.0, + "trading_volume": 75.945, + }, + ), + ( + False, + { + "best_pair": "XRP/BTC", + "best_rate": 1.0, + "best_pair_profit_ratio": 0.01, + "profit_all_coin": -15.46546305, + "profit_all_fiat": -190921.14135225, + "profit_all_percent_mean": -49.62, + "profit_all_ratio_mean": -0.49620955, + "profit_all_percent_sum": -198.48, + "profit_all_ratio_sum": -1.9848382, + "profit_all_percent": -1.55, + "profit_all_ratio": -0.0154654126, + "profit_closed_coin": 0.00073913, + "profit_closed_fiat": 9.124559849999999, + "profit_closed_ratio_mean": 0.0075, + "profit_closed_percent_mean": 0.75, + "profit_closed_ratio_sum": 0.015, + "profit_closed_percent_sum": 1.5, + "profit_closed_ratio": 7.391275897987988e-07, + "profit_closed_percent": 0.0, + "winning_trades": 2, + "losing_trades": 0, + "profit_factor": None, + "winrate": 1.0, + "expectancy": 0.0003695635, + "expectancy_ratio": 100, + "trading_volume": 75.945, + }, + ), + ( + None, + { + "best_pair": "XRP/BTC", + "best_rate": 1.0, + "best_pair_profit_ratio": 0.01, + "profit_all_coin": -14.87167525, + "profit_all_fiat": -183590.83096125, + "profit_all_percent_mean": 0.13, + "profit_all_ratio_mean": 0.0012538324, + "profit_all_percent_sum": 0.5, + "profit_all_ratio_sum": 0.005015329, + "profit_all_percent": -1.49, + "profit_all_ratio": -0.0148715350, + "profit_closed_coin": -0.00542913, + "profit_closed_fiat": -67.02260985, + "profit_closed_ratio_mean": 0.0025, + "profit_closed_percent_mean": 0.25, + "profit_closed_ratio_sum": 0.005, + "profit_closed_percent_sum": 0.5, + "profit_closed_ratio": -5.429078808526421e-06, + "profit_closed_percent": -0.0, + "winning_trades": 1, + "losing_trades": 1, + "profit_factor": 0.02775724835771106, + "winrate": 0.5, + "expectancy": -0.0027145635000000003, + "expectancy_ratio": -0.48612137582114445, + "trading_volume": 75.945, + }, + ), + ], +) def test_api_profit(botclient, mocker, ticker, fee, markets, is_short, expected): ftbot, client = botclient patch_get_signal(ftbot) @@ -944,12 +1061,12 @@ def test_api_profit(botclient, mocker, ticker, fee, markets, is_short, expected) get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) rc = client_get(client, f"{BASE_URI}/profit") assert_response(rc, 200) - assert rc.json()['trade_count'] == 0 + assert rc.json()["trade_count"] == 0 create_mock_trades(fee, is_short=is_short) # Simulate fulfilled LIMIT_BUY order for trade @@ -958,53 +1075,53 @@ def test_api_profit(botclient, mocker, ticker, fee, markets, is_short, expected) assert_response(rc) # raise ValueError(rc.json()) assert rc.json() == { - 'avg_duration': ANY, - 'best_pair': expected['best_pair'], - 'best_pair_profit_ratio': expected['best_pair_profit_ratio'], - 'best_rate': expected['best_rate'], - 'first_trade_date': ANY, - 'first_trade_humanized': ANY, - 'first_trade_timestamp': ANY, - 'latest_trade_date': ANY, - 'latest_trade_humanized': '5 minutes ago', - 'latest_trade_timestamp': ANY, - 'profit_all_coin': pytest.approx(expected['profit_all_coin']), - 'profit_all_fiat': pytest.approx(expected['profit_all_fiat']), - 'profit_all_percent_mean': pytest.approx(expected['profit_all_percent_mean']), - 'profit_all_ratio_mean': pytest.approx(expected['profit_all_ratio_mean']), - 'profit_all_percent_sum': pytest.approx(expected['profit_all_percent_sum']), - 'profit_all_ratio_sum': pytest.approx(expected['profit_all_ratio_sum']), - 'profit_all_percent': pytest.approx(expected['profit_all_percent']), - 'profit_all_ratio': pytest.approx(expected['profit_all_ratio']), - 'profit_closed_coin': pytest.approx(expected['profit_closed_coin']), - 'profit_closed_fiat': pytest.approx(expected['profit_closed_fiat']), - 'profit_closed_ratio_mean': pytest.approx(expected['profit_closed_ratio_mean']), - 'profit_closed_percent_mean': pytest.approx(expected['profit_closed_percent_mean']), - 'profit_closed_ratio_sum': pytest.approx(expected['profit_closed_ratio_sum']), - 'profit_closed_percent_sum': pytest.approx(expected['profit_closed_percent_sum']), - 'profit_closed_ratio': pytest.approx(expected['profit_closed_ratio']), - 'profit_closed_percent': pytest.approx(expected['profit_closed_percent']), - 'trade_count': 6, - 'closed_trade_count': 2, - 'winning_trades': expected['winning_trades'], - 'losing_trades': expected['losing_trades'], - 'profit_factor': expected['profit_factor'], - 'winrate': expected['winrate'], - 'expectancy': expected['expectancy'], - 'expectancy_ratio': expected['expectancy_ratio'], - 'max_drawdown': ANY, - 'max_drawdown_abs': ANY, - 'max_drawdown_start': ANY, - 'max_drawdown_start_timestamp': ANY, - 'max_drawdown_end': ANY, - 'max_drawdown_end_timestamp': ANY, - 'trading_volume': expected['trading_volume'], - 'bot_start_timestamp': 0, - 'bot_start_date': '', + "avg_duration": ANY, + "best_pair": expected["best_pair"], + "best_pair_profit_ratio": expected["best_pair_profit_ratio"], + "best_rate": expected["best_rate"], + "first_trade_date": ANY, + "first_trade_humanized": ANY, + "first_trade_timestamp": ANY, + "latest_trade_date": ANY, + "latest_trade_humanized": "5 minutes ago", + "latest_trade_timestamp": ANY, + "profit_all_coin": pytest.approx(expected["profit_all_coin"]), + "profit_all_fiat": pytest.approx(expected["profit_all_fiat"]), + "profit_all_percent_mean": pytest.approx(expected["profit_all_percent_mean"]), + "profit_all_ratio_mean": pytest.approx(expected["profit_all_ratio_mean"]), + "profit_all_percent_sum": pytest.approx(expected["profit_all_percent_sum"]), + "profit_all_ratio_sum": pytest.approx(expected["profit_all_ratio_sum"]), + "profit_all_percent": pytest.approx(expected["profit_all_percent"]), + "profit_all_ratio": pytest.approx(expected["profit_all_ratio"]), + "profit_closed_coin": pytest.approx(expected["profit_closed_coin"]), + "profit_closed_fiat": pytest.approx(expected["profit_closed_fiat"]), + "profit_closed_ratio_mean": pytest.approx(expected["profit_closed_ratio_mean"]), + "profit_closed_percent_mean": pytest.approx(expected["profit_closed_percent_mean"]), + "profit_closed_ratio_sum": pytest.approx(expected["profit_closed_ratio_sum"]), + "profit_closed_percent_sum": pytest.approx(expected["profit_closed_percent_sum"]), + "profit_closed_ratio": pytest.approx(expected["profit_closed_ratio"]), + "profit_closed_percent": pytest.approx(expected["profit_closed_percent"]), + "trade_count": 6, + "closed_trade_count": 2, + "winning_trades": expected["winning_trades"], + "losing_trades": expected["losing_trades"], + "profit_factor": expected["profit_factor"], + "winrate": expected["winrate"], + "expectancy": expected["expectancy"], + "expectancy_ratio": expected["expectancy_ratio"], + "max_drawdown": ANY, + "max_drawdown_abs": ANY, + "max_drawdown_start": ANY, + "max_drawdown_start_timestamp": ANY, + "max_drawdown_end": ANY, + "max_drawdown_end_timestamp": ANY, + "trading_volume": expected["trading_volume"], + "bot_start_timestamp": 0, + "bot_start_date": "", } -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) def test_api_stats(botclient, mocker, ticker, fee, markets, is_short): ftbot, client = botclient patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short) @@ -1013,24 +1130,24 @@ def test_api_stats(botclient, mocker, ticker, fee, markets, is_short): get_balances=MagicMock(return_value=ticker), fetch_ticker=ticker, get_fee=fee, - markets=PropertyMock(return_value=markets) + markets=PropertyMock(return_value=markets), ) rc = client_get(client, f"{BASE_URI}/stats") assert_response(rc, 200) - assert 'durations' in rc.json() - assert 'exit_reasons' in rc.json() + assert "durations" in rc.json() + assert "exit_reasons" in rc.json() create_mock_trades(fee, is_short=is_short) rc = client_get(client, f"{BASE_URI}/stats") assert_response(rc, 200) - assert 'durations' in rc.json() - assert 'exit_reasons' in rc.json() + assert "durations" in rc.json() + assert "exit_reasons" in rc.json() - assert 'wins' in rc.json()['durations'] - assert 'losses' in rc.json()['durations'] - assert 'draws' in rc.json()['durations'] + assert "wins" in rc.json()["durations"] + assert "losses" in rc.json()["durations"] + assert "draws" in rc.json()["durations"] def test_api_performance(botclient, fee): @@ -1038,9 +1155,9 @@ def test_api_performance(botclient, fee): patch_get_signal(ftbot) trade = Trade( - pair='LTC/ETH', + pair="LTC/ETH", amount=1, - exchange='binance', + exchange="binance", stake_amount=1, open_rate=0.245441, is_open=False, @@ -1054,10 +1171,10 @@ def test_api_performance(botclient, fee): Trade.session.add(trade) trade = Trade( - pair='XRP/ETH', + pair="XRP/ETH", amount=5, stake_amount=1, - exchange='binance', + exchange="binance", open_rate=0.412, is_open=False, fee_close=fee.return_value, @@ -1074,10 +1191,24 @@ def test_api_performance(botclient, fee): rc = client_get(client, f"{BASE_URI}/performance") assert_response(rc) assert len(rc.json()) == 2 - assert rc.json() == [{'count': 1, 'pair': 'LTC/ETH', 'profit': 7.61, 'profit_pct': 7.61, - 'profit_ratio': 0.07609203, 'profit_abs': 0.0187228}, - {'count': 1, 'pair': 'XRP/ETH', 'profit': -5.57, 'profit_pct': -5.57, - 'profit_ratio': -0.05570419, 'profit_abs': -0.1150375}] + assert rc.json() == [ + { + "count": 1, + "pair": "LTC/ETH", + "profit": 7.61, + "profit_pct": 7.61, + "profit_ratio": 0.07609203, + "profit_abs": 0.0187228, + }, + { + "count": 1, + "pair": "XRP/ETH", + "profit": -5.57, + "profit_pct": -5.57, + "profit_ratio": -0.05570419, + "profit_abs": -0.1150375, + }, + ] def test_api_entries(botclient, fee): @@ -1094,9 +1225,9 @@ def test_api_entries(botclient, fee): response = rc.json() assert len(response) == 2 resp = response[0] - assert resp['enter_tag'] == 'TEST1' - assert resp['count'] == 1 - assert resp['profit_pct'] == 0.5 + assert resp["enter_tag"] == "TEST1" + assert resp["count"] == 1 + assert resp["profit_pct"] == 0.5 def test_api_exits(botclient, fee): @@ -1113,9 +1244,9 @@ def test_api_exits(botclient, fee): response = rc.json() assert len(response) == 2 resp = response[0] - assert resp['exit_reason'] == 'sell_signal' - assert resp['count'] == 1 - assert resp['profit_pct'] == 0.5 + assert resp["exit_reason"] == "sell_signal" + assert resp["count"] == 1 + assert resp["profit_pct"] == 0.5 def test_api_mix_tag(botclient, fee): @@ -1132,17 +1263,18 @@ def test_api_mix_tag(botclient, fee): response = rc.json() assert len(response) == 2 resp = response[0] - assert resp['mix_tag'] == 'TEST1 sell_signal' - assert resp['count'] == 1 - assert resp['profit_pct'] == 0.5 + assert resp["mix_tag"] == "TEST1 sell_signal" + assert resp["count"] == 1 + assert resp["profit_pct"] == 0.5 @pytest.mark.parametrize( - 'is_short,current_rate,open_trade_value', - [(True, 1.098e-05, 15.0911775), - (False, 1.099e-05, 15.1668225)]) -def test_api_status(botclient, mocker, ticker, fee, markets, is_short, - current_rate, open_trade_value): + "is_short,current_rate,open_trade_value", + [(True, 1.098e-05, 15.0911775), (False, 1.099e-05, 15.1668225)], +) +def test_api_status( + botclient, mocker, ticker, fee, markets, is_short, current_rate, open_trade_value +): ftbot, client = botclient patch_get_signal(ftbot) mocker.patch.multiple( @@ -1163,87 +1295,88 @@ def test_api_status(botclient, mocker, ticker, fee, markets, is_short, assert_response(rc) assert len(rc.json()) == 4 assert rc.json()[0] == { - 'amount': 123.0, - 'amount_requested': 123.0, - 'close_date': None, - 'close_timestamp': None, - 'close_profit': None, - 'close_profit_pct': None, - 'close_profit_abs': None, - 'close_rate': None, - 'profit_ratio': ANY, - 'profit_pct': ANY, - 'profit_abs': ANY, - 'profit_fiat': ANY, - 'total_profit_abs': ANY, - 'total_profit_fiat': ANY, - 'total_profit_ratio': ANY, - 'realized_profit': 0.0, - 'realized_profit_ratio': None, - 'current_rate': current_rate, - 'open_date': ANY, - 'open_timestamp': ANY, - 'open_fill_date': ANY, - 'open_fill_timestamp': ANY, - 'open_rate': 0.123, - 'pair': 'ETH/BTC', - 'base_currency': 'ETH', - 'quote_currency': 'BTC', - 'stake_amount': 0.001, - 'max_stake_amount': ANY, - 'stop_loss_abs': ANY, - 'stop_loss_pct': ANY, - 'stop_loss_ratio': ANY, - 'stoploss_last_update': ANY, - 'stoploss_last_update_timestamp': ANY, - 'initial_stop_loss_abs': 0.0, - 'initial_stop_loss_pct': ANY, - 'initial_stop_loss_ratio': ANY, - 'stoploss_current_dist': ANY, - 'stoploss_current_dist_ratio': ANY, - 'stoploss_current_dist_pct': ANY, - 'stoploss_entry_dist': ANY, - 'stoploss_entry_dist_ratio': ANY, - 'trade_id': 1, - 'close_rate_requested': ANY, - 'fee_close': 0.0025, - 'fee_close_cost': None, - 'fee_close_currency': None, - 'fee_open': 0.0025, - 'fee_open_cost': None, - 'fee_open_currency': None, - 'is_open': True, + "amount": 123.0, + "amount_requested": 123.0, + "close_date": None, + "close_timestamp": None, + "close_profit": None, + "close_profit_pct": None, + "close_profit_abs": None, + "close_rate": None, + "profit_ratio": ANY, + "profit_pct": ANY, + "profit_abs": ANY, + "profit_fiat": ANY, + "total_profit_abs": ANY, + "total_profit_fiat": ANY, + "total_profit_ratio": ANY, + "realized_profit": 0.0, + "realized_profit_ratio": None, + "current_rate": current_rate, + "open_date": ANY, + "open_timestamp": ANY, + "open_fill_date": ANY, + "open_fill_timestamp": ANY, + "open_rate": 0.123, + "pair": "ETH/BTC", + "base_currency": "ETH", + "quote_currency": "BTC", + "stake_amount": 0.001, + "max_stake_amount": ANY, + "stop_loss_abs": ANY, + "stop_loss_pct": ANY, + "stop_loss_ratio": ANY, + "stoploss_last_update": ANY, + "stoploss_last_update_timestamp": ANY, + "initial_stop_loss_abs": 0.0, + "initial_stop_loss_pct": ANY, + "initial_stop_loss_ratio": ANY, + "stoploss_current_dist": ANY, + "stoploss_current_dist_ratio": ANY, + "stoploss_current_dist_pct": ANY, + "stoploss_entry_dist": ANY, + "stoploss_entry_dist_ratio": ANY, + "trade_id": 1, + "close_rate_requested": ANY, + "fee_close": 0.0025, + "fee_close_cost": None, + "fee_close_currency": None, + "fee_open": 0.0025, + "fee_open_cost": None, + "fee_open_currency": None, + "is_open": True, "is_short": is_short, - 'max_rate': ANY, - 'min_rate': ANY, - 'open_rate_requested': ANY, - 'open_trade_value': open_trade_value, - 'exit_reason': None, - 'exit_order_status': None, - 'strategy': CURRENT_TEST_STRATEGY, - 'enter_tag': None, - 'timeframe': 5, - 'exchange': 'binance', - 'leverage': 1.0, - 'interest_rate': 0.0, - 'liquidation_price': None, - 'funding_fees': None, - 'trading_mode': ANY, - 'amount_precision': None, - 'price_precision': None, - 'precision_mode': None, - 'orders': [ANY], - 'has_open_orders': True, + "max_rate": ANY, + "min_rate": ANY, + "open_rate_requested": ANY, + "open_trade_value": open_trade_value, + "exit_reason": None, + "exit_order_status": None, + "strategy": CURRENT_TEST_STRATEGY, + "enter_tag": None, + "timeframe": 5, + "exchange": "binance", + "leverage": 1.0, + "interest_rate": 0.0, + "liquidation_price": None, + "funding_fees": None, + "trading_mode": ANY, + "amount_precision": None, + "price_precision": None, + "precision_mode": None, + "orders": [ANY], + "has_open_orders": True, } - mocker.patch(f'{EXMS}.get_rate', - MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available"))) + mocker.patch( + f"{EXMS}.get_rate", MagicMock(side_effect=ExchangeError("Pair 'ETH/BTC' not available")) + ) rc = client_get(client, f"{BASE_URI}/status") assert_response(rc) resp_values = rc.json() assert len(resp_values) == 4 - assert resp_values[0]['profit_abs'] == 0.0 + assert resp_values[0]["profit_abs"] == 0.0 def test_api_version(botclient): @@ -1260,65 +1393,67 @@ def test_api_blacklist(botclient, mocker): rc = client_get(client, f"{BASE_URI}/blacklist") assert_response(rc) # DOGE and HOT are not in the markets mock! - assert rc.json() == {"blacklist": ["DOGE/BTC", "HOT/BTC"], - "blacklist_expanded": [], - "length": 2, - "method": ["StaticPairList"], - "errors": {}, - } + assert rc.json() == { + "blacklist": ["DOGE/BTC", "HOT/BTC"], + "blacklist_expanded": [], + "length": 2, + "method": ["StaticPairList"], + "errors": {}, + } # Add ETH/BTC to blacklist - rc = client_post(client, f"{BASE_URI}/blacklist", - data={"blacklist": ["ETH/BTC"]}) + rc = client_post(client, f"{BASE_URI}/blacklist", data={"blacklist": ["ETH/BTC"]}) assert_response(rc) - assert rc.json() == {"blacklist": ["DOGE/BTC", "HOT/BTC", "ETH/BTC"], - "blacklist_expanded": ["ETH/BTC"], - "length": 3, - "method": ["StaticPairList"], - "errors": {}, - } + assert rc.json() == { + "blacklist": ["DOGE/BTC", "HOT/BTC", "ETH/BTC"], + "blacklist_expanded": ["ETH/BTC"], + "length": 3, + "method": ["StaticPairList"], + "errors": {}, + } - rc = client_post(client, f"{BASE_URI}/blacklist", - data={"blacklist": ["XRP/.*"]}) + rc = client_post(client, f"{BASE_URI}/blacklist", data={"blacklist": ["XRP/.*"]}) assert_response(rc) - assert rc.json() == {"blacklist": ["DOGE/BTC", "HOT/BTC", "ETH/BTC", "XRP/.*"], - "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], - "length": 4, - "method": ["StaticPairList"], - "errors": {}, - } + assert rc.json() == { + "blacklist": ["DOGE/BTC", "HOT/BTC", "ETH/BTC", "XRP/.*"], + "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], + "length": 4, + "method": ["StaticPairList"], + "errors": {}, + } rc = client_delete(client, f"{BASE_URI}/blacklist?pairs_to_delete=DOGE/BTC") assert_response(rc) - assert rc.json() == {"blacklist": ["HOT/BTC", "ETH/BTC", "XRP/.*"], - "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], - "length": 3, - "method": ["StaticPairList"], - "errors": {}, - } + assert rc.json() == { + "blacklist": ["HOT/BTC", "ETH/BTC", "XRP/.*"], + "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], + "length": 3, + "method": ["StaticPairList"], + "errors": {}, + } rc = client_delete(client, f"{BASE_URI}/blacklist?pairs_to_delete=NOTHING/BTC") assert_response(rc) - assert rc.json() == {"blacklist": ["HOT/BTC", "ETH/BTC", "XRP/.*"], - "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], - "length": 3, - "method": ["StaticPairList"], - "errors": { - "NOTHING/BTC": { - "error_msg": "Pair NOTHING/BTC is not in the current blacklist." - } - }, + assert rc.json() == { + "blacklist": ["HOT/BTC", "ETH/BTC", "XRP/.*"], + "blacklist_expanded": ["ETH/BTC", "XRP/BTC", "XRP/USDT"], + "length": 3, + "method": ["StaticPairList"], + "errors": { + "NOTHING/BTC": {"error_msg": "Pair NOTHING/BTC is not in the current blacklist."} + }, } rc = client_delete( - client, - f"{BASE_URI}/blacklist?pairs_to_delete=HOT/BTC&pairs_to_delete=ETH/BTC") + client, f"{BASE_URI}/blacklist?pairs_to_delete=HOT/BTC&pairs_to_delete=ETH/BTC" + ) assert_response(rc) - assert rc.json() == {"blacklist": ["XRP/.*"], - "blacklist_expanded": ["XRP/BTC", "XRP/USDT"], - "length": 1, - "method": ["StaticPairList"], - "errors": {}, - } + assert rc.json() == { + "blacklist": ["XRP/.*"], + "blacklist_expanded": ["XRP/BTC", "XRP/USDT"], + "length": 1, + "method": ["StaticPairList"], + "errors": {}, + } def test_api_whitelist(botclient): @@ -1327,121 +1462,123 @@ def test_api_whitelist(botclient): rc = client_get(client, f"{BASE_URI}/whitelist") assert_response(rc) assert rc.json() == { - "whitelist": ['ETH/BTC', 'LTC/BTC', 'XRP/BTC', 'NEO/BTC'], + "whitelist": ["ETH/BTC", "LTC/BTC", "XRP/BTC", "NEO/BTC"], "length": 4, - "method": ["StaticPairList"] + "method": ["StaticPairList"], } -@pytest.mark.parametrize('endpoint', [ - 'forcebuy', - 'forceenter', -]) +@pytest.mark.parametrize( + "endpoint", + [ + "forcebuy", + "forceenter", + ], +) def test_api_force_entry(botclient, mocker, fee, endpoint): ftbot, client = botclient - rc = client_post(client, f"{BASE_URI}/{endpoint}", - data={"pair": "ETH/BTC"}) + rc = client_post(client, f"{BASE_URI}/{endpoint}", data={"pair": "ETH/BTC"}) assert_response(rc, 502) assert rc.json() == {"error": f"Error querying /api/v1/{endpoint}: Force_entry not enabled."} # enable forcebuy - ftbot.config['force_entry_enable'] = True + ftbot.config["force_entry_enable"] = True fbuy_mock = MagicMock(return_value=None) mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_entry", fbuy_mock) - rc = client_post(client, f"{BASE_URI}/{endpoint}", - data={"pair": "ETH/BTC"}) + rc = client_post(client, f"{BASE_URI}/{endpoint}", data={"pair": "ETH/BTC"}) assert_response(rc) assert rc.json() == {"status": "Error entering long trade for pair ETH/BTC."} # Test creating trade - fbuy_mock = MagicMock(return_value=Trade( - pair='ETH/BTC', - amount=1, - amount_requested=1, - exchange='binance', - stake_amount=1, - open_rate=0.245441, - open_date=datetime.now(timezone.utc), - is_open=False, - is_short=False, - fee_close=fee.return_value, - fee_open=fee.return_value, - close_rate=0.265441, - id=22, - timeframe=5, - strategy=CURRENT_TEST_STRATEGY, - trading_mode=TradingMode.SPOT - )) + fbuy_mock = MagicMock( + return_value=Trade( + pair="ETH/BTC", + amount=1, + amount_requested=1, + exchange="binance", + stake_amount=1, + open_rate=0.245441, + open_date=datetime.now(timezone.utc), + is_open=False, + is_short=False, + fee_close=fee.return_value, + fee_open=fee.return_value, + close_rate=0.265441, + id=22, + timeframe=5, + strategy=CURRENT_TEST_STRATEGY, + trading_mode=TradingMode.SPOT, + ) + ) mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_entry", fbuy_mock) - rc = client_post(client, f"{BASE_URI}/{endpoint}", - data={"pair": "ETH/BTC"}) + rc = client_post(client, f"{BASE_URI}/{endpoint}", data={"pair": "ETH/BTC"}) assert_response(rc) assert rc.json() == { - 'amount': 1.0, - 'amount_requested': 1.0, - 'trade_id': 22, - 'close_date': None, - 'close_timestamp': None, - 'close_rate': 0.265441, - 'open_date': ANY, - 'open_timestamp': ANY, - 'open_fill_date': ANY, - 'open_fill_timestamp': ANY, - 'open_rate': 0.245441, - 'pair': 'ETH/BTC', - 'base_currency': 'ETH', - 'quote_currency': 'BTC', - 'stake_amount': 1, - 'max_stake_amount': ANY, - 'stop_loss_abs': None, - 'stop_loss_pct': None, - 'stop_loss_ratio': None, - 'stoploss_last_update': None, - 'stoploss_last_update_timestamp': None, - 'initial_stop_loss_abs': None, - 'initial_stop_loss_pct': None, - 'initial_stop_loss_ratio': None, - 'close_profit': None, - 'close_profit_pct': None, - 'close_profit_abs': None, - 'close_rate_requested': None, - 'profit_ratio': None, - 'profit_pct': None, - 'profit_abs': None, - 'profit_fiat': None, - 'realized_profit': 0.0, - 'realized_profit_ratio': None, - 'fee_close': 0.0025, - 'fee_close_cost': None, - 'fee_close_currency': None, - 'fee_open': 0.0025, - 'fee_open_cost': None, - 'fee_open_currency': None, - 'is_open': False, - 'is_short': False, - 'max_rate': None, - 'min_rate': None, - 'open_rate_requested': None, - 'open_trade_value': 0.24605460, - 'exit_reason': None, - 'exit_order_status': None, - 'strategy': CURRENT_TEST_STRATEGY, - 'enter_tag': None, - 'timeframe': 5, - 'exchange': 'binance', - 'leverage': None, - 'interest_rate': None, - 'liquidation_price': None, - 'funding_fees': None, - 'trading_mode': 'spot', - 'amount_precision': None, - 'price_precision': None, - 'precision_mode': None, - 'has_open_orders': False, - 'orders': [], + "amount": 1.0, + "amount_requested": 1.0, + "trade_id": 22, + "close_date": None, + "close_timestamp": None, + "close_rate": 0.265441, + "open_date": ANY, + "open_timestamp": ANY, + "open_fill_date": ANY, + "open_fill_timestamp": ANY, + "open_rate": 0.245441, + "pair": "ETH/BTC", + "base_currency": "ETH", + "quote_currency": "BTC", + "stake_amount": 1, + "max_stake_amount": ANY, + "stop_loss_abs": None, + "stop_loss_pct": None, + "stop_loss_ratio": None, + "stoploss_last_update": None, + "stoploss_last_update_timestamp": None, + "initial_stop_loss_abs": None, + "initial_stop_loss_pct": None, + "initial_stop_loss_ratio": None, + "close_profit": None, + "close_profit_pct": None, + "close_profit_abs": None, + "close_rate_requested": None, + "profit_ratio": None, + "profit_pct": None, + "profit_abs": None, + "profit_fiat": None, + "realized_profit": 0.0, + "realized_profit_ratio": None, + "fee_close": 0.0025, + "fee_close_cost": None, + "fee_close_currency": None, + "fee_open": 0.0025, + "fee_open_cost": None, + "fee_open_currency": None, + "is_open": False, + "is_short": False, + "max_rate": None, + "min_rate": None, + "open_rate_requested": None, + "open_trade_value": 0.24605460, + "exit_reason": None, + "exit_order_status": None, + "strategy": CURRENT_TEST_STRATEGY, + "enter_tag": None, + "timeframe": 5, + "exchange": "binance", + "leverage": None, + "interest_rate": None, + "liquidation_price": None, + "funding_fees": None, + "trading_mode": "spot", + "amount_precision": None, + "price_precision": None, + "precision_mode": None, + "has_open_orders": False, + "orders": [], } @@ -1457,8 +1594,7 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets): ) patch_get_signal(ftbot) - rc = client_post(client, f"{BASE_URI}/forceexit", - data={"tradeid": "1"}) + rc = client_post(client, f"{BASE_URI}/forceexit", data={"tradeid": "1"}) assert_response(rc, 502) assert rc.json() == {"error": "Error querying /api/v1/forceexit: invalid argument"} Trade.rollback() @@ -1466,20 +1602,20 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets): create_mock_trades(fee) trade = Trade.get_trades([Trade.id == 5]).first() assert pytest.approx(trade.amount) == 123 - rc = client_post(client, f"{BASE_URI}/forceexit", - data={"tradeid": "5", "ordertype": "market", "amount": 23}) + rc = client_post( + client, f"{BASE_URI}/forceexit", data={"tradeid": "5", "ordertype": "market", "amount": 23} + ) assert_response(rc) - assert rc.json() == {'result': 'Created exit order for trade 5.'} + assert rc.json() == {"result": "Created exit order for trade 5."} Trade.rollback() trade = Trade.get_trades([Trade.id == 5]).first() assert pytest.approx(trade.amount) == 100 assert trade.is_open is True - rc = client_post(client, f"{BASE_URI}/forceexit", - data={"tradeid": "5"}) + rc = client_post(client, f"{BASE_URI}/forceexit", data={"tradeid": "5"}) assert_response(rc) - assert rc.json() == {'result': 'Created exit order for trade 5.'} + assert rc.json() == {"result": "Created exit order for trade 5."} Trade.rollback() trade = Trade.get_trades([Trade.id == 5]).first() @@ -1488,208 +1624,463 @@ def test_api_forceexit(botclient, mocker, ticker, fee, markets): def test_api_pair_candles(botclient, ohlcv_history): ftbot, client = botclient - timeframe = '5m' + timeframe = "5m" amount = 3 # No pair - rc = client_get(client, - f"{BASE_URI}/pair_candles?limit={amount}&timeframe={timeframe}") + rc = client_get(client, f"{BASE_URI}/pair_candles?limit={amount}&timeframe={timeframe}") assert_response(rc, 422) # No timeframe - rc = client_get(client, - f"{BASE_URI}/pair_candles?pair=XRP%2FBTC") + rc = client_get(client, f"{BASE_URI}/pair_candles?pair=XRP%2FBTC") assert_response(rc, 422) - rc = client_get(client, - f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}") + rc = client_get( + client, f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}" + ) assert_response(rc) - assert 'columns' in rc.json() - assert 'data_start_ts' in rc.json() - assert 'data_start' in rc.json() - assert 'data_stop' in rc.json() - assert 'data_stop_ts' in rc.json() - assert len(rc.json()['data']) == 0 - ohlcv_history['sma'] = ohlcv_history['close'].rolling(2).mean() - ohlcv_history['enter_long'] = 0 - ohlcv_history.loc[1, 'enter_long'] = 1 - ohlcv_history['exit_long'] = 0 - ohlcv_history['enter_short'] = 0 - ohlcv_history['exit_short'] = 0 + assert "columns" in rc.json() + assert "data_start_ts" in rc.json() + assert "data_start" in rc.json() + assert "data_stop" in rc.json() + assert "data_stop_ts" in rc.json() + assert len(rc.json()["data"]) == 0 + ohlcv_history["sma"] = ohlcv_history["close"].rolling(2).mean() + ohlcv_history["sma2"] = ohlcv_history["close"].rolling(2).mean() + ohlcv_history["enter_long"] = 0 + ohlcv_history.loc[1, "enter_long"] = 1 + ohlcv_history["exit_long"] = 0 + ohlcv_history["enter_short"] = 0 + ohlcv_history["exit_short"] = 0 ftbot.dataprovider._set_cached_df("XRP/BTC", timeframe, ohlcv_history, CandleType.SPOT) + for call in ("get", "post"): + if call == "get": + rc = client_get( + client, + f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}", + ) + else: + rc = client_post( + client, + f"{BASE_URI}/pair_candles", + data={ + "pair": "XRP/BTC", + "timeframe": timeframe, + "limit": amount, + "columns": ["sma"], + }, + ) + assert_response(rc) + resp = rc.json() + assert "strategy" in resp + assert resp["strategy"] == CURRENT_TEST_STRATEGY + assert "columns" in resp + assert "data_start_ts" in resp + assert "data_start" in resp + assert "data_stop" in resp + assert "data_stop_ts" in resp + assert resp["data_start"] == "2017-11-26 08:50:00+00:00" + assert resp["data_start_ts"] == 1511686200000 + assert resp["data_stop"] == "2017-11-26 09:00:00+00:00" + assert resp["data_stop_ts"] == 1511686800000 + assert isinstance(resp["columns"], list) + base_cols = { + "date", + "open", + "high", + "low", + "close", + "volume", + "sma", + "enter_long", + "exit_long", + "enter_short", + "exit_short", + "__date_ts", + "_enter_long_signal_close", + "_exit_long_signal_close", + "_enter_short_signal_close", + "_exit_short_signal_close", + } + if call == "get": + assert set(resp["columns"]) == base_cols.union({"sma2"}) + else: + assert set(resp["columns"]) == base_cols - rc = client_get(client, - f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}") - assert_response(rc) - assert 'strategy' in rc.json() - assert rc.json()['strategy'] == CURRENT_TEST_STRATEGY - assert 'columns' in rc.json() - assert 'data_start_ts' in rc.json() - assert 'data_start' in rc.json() - assert 'data_stop' in rc.json() - assert 'data_stop_ts' in rc.json() - assert rc.json()['data_start'] == '2017-11-26 08:50:00+00:00' - assert rc.json()['data_start_ts'] == 1511686200000 - assert rc.json()['data_stop'] == '2017-11-26 09:00:00+00:00' - assert rc.json()['data_stop_ts'] == 1511686800000 - assert isinstance(rc.json()['columns'], list) - assert set(rc.json()['columns']) == { - 'date', 'open', 'high', 'low', 'close', 'volume', - 'sma', 'enter_long', 'exit_long', 'enter_short', 'exit_short', '__date_ts', - '_enter_long_signal_close', '_exit_long_signal_close', - '_enter_short_signal_close', '_exit_short_signal_close' - } - assert 'pair' in rc.json() - assert rc.json()['pair'] == 'XRP/BTC' + # All columns doesn't include the internal columns + assert set(resp["all_columns"]) == { + "date", + "open", + "high", + "low", + "close", + "volume", + "sma", + "sma2", + "enter_long", + "exit_long", + "enter_short", + "exit_short", + } + assert "pair" in resp + assert resp["pair"] == "XRP/BTC" - assert 'data' in rc.json() - assert len(rc.json()['data']) == amount + assert "data" in resp + assert len(resp["data"]) == amount + if call == "get": + assert len(resp["data"][0]) == 17 + assert resp["data"] == [ + [ + "2017-11-26T08:50:00Z", + 8.794e-05, + 8.948e-05, + 8.794e-05, + 8.88e-05, + 0.0877869, + None, + None, + 0, + 0, + 0, + 0, + 1511686200000, + None, + None, + None, + None, + ], + [ + "2017-11-26T08:55:00Z", + 8.88e-05, + 8.942e-05, + 8.88e-05, + 8.893e-05, + 0.05874751, + 8.886500000000001e-05, + 8.886500000000001e-05, + 1, + 0, + 0, + 0, + 1511686500000, + 8.893e-05, + None, + None, + None, + ], + [ + "2017-11-26T09:00:00Z", + 8.891e-05, + 8.893e-05, + 8.875e-05, + 8.877e-05, + 0.7039405, + 8.885e-05, + 8.885e-05, + 0, + 0, + 0, + 0, + 1511686800000, + None, + None, + None, + None, + ], + ] + else: + assert len(resp["data"][0]) == 16 + assert resp["data"] == [ + [ + "2017-11-26T08:50:00Z", + 8.794e-05, + 8.948e-05, + 8.794e-05, + 8.88e-05, + 0.0877869, + None, + 0, + 0, + 0, + 0, + 1511686200000, + None, + None, + None, + None, + ], + [ + "2017-11-26T08:55:00Z", + 8.88e-05, + 8.942e-05, + 8.88e-05, + 8.893e-05, + 0.05874751, + 8.886500000000001e-05, + 1, + 0, + 0, + 0, + 1511686500000, + 8.893e-05, + None, + None, + None, + ], + [ + "2017-11-26T09:00:00Z", + 8.891e-05, + 8.893e-05, + 8.875e-05, + 8.877e-05, + 0.7039405, + 8.885e-05, + 0, + 0, + 0, + 0, + 1511686800000, + None, + None, + None, + None, + ], + ] - assert (rc.json()['data'] == - [['2017-11-26T08:50:00Z', 8.794e-05, 8.948e-05, 8.794e-05, 8.88e-05, 0.0877869, - None, 0, 0, 0, 0, 1511686200000, None, None, None, None], - ['2017-11-26T08:55:00Z', 8.88e-05, 8.942e-05, 8.88e-05, - 8.893e-05, 0.05874751, 8.886500000000001e-05, 1, 0, 0, 0, 1511686500000, 8.893e-05, - None, None, None], - ['2017-11-26T09:00:00Z', 8.891e-05, 8.893e-05, 8.875e-05, 8.877e-05, - 0.7039405, 8.885e-05, 0, 0, 0, 0, 1511686800000, None, None, None, None] - - ]) - ohlcv_history['exit_long'] = ohlcv_history['exit_long'].astype('float64') - ohlcv_history.at[0, 'exit_long'] = float('inf') - ohlcv_history['date1'] = ohlcv_history['date'] - ohlcv_history.at[0, 'date1'] = pd.NaT + # prep for next test + ohlcv_history["exit_long"] = ohlcv_history["exit_long"].astype("float64") + ohlcv_history.at[0, "exit_long"] = float("inf") + ohlcv_history["date1"] = ohlcv_history["date"] + ohlcv_history.at[0, "date1"] = pd.NaT ftbot.dataprovider._set_cached_df("XRP/BTC", timeframe, ohlcv_history, CandleType.SPOT) - rc = client_get(client, - f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}") + rc = client_get( + client, f"{BASE_URI}/pair_candles?limit={amount}&pair=XRP%2FBTC&timeframe={timeframe}" + ) assert_response(rc) - assert (rc.json()['data'] == - [['2017-11-26T08:50:00Z', 8.794e-05, 8.948e-05, 8.794e-05, 8.88e-05, 0.0877869, - None, 0, None, 0, 0, None, 1511686200000, None, None, None, None], - ['2017-11-26T08:55:00Z', 8.88e-05, 8.942e-05, 8.88e-05, - 8.893e-05, 0.05874751, 8.886500000000001e-05, 1, 0.0, 0, 0, '2017-11-26T08:55:00Z', - 1511686500000, 8.893e-05, None, None, None], - ['2017-11-26T09:00:00Z', 8.891e-05, 8.893e-05, 8.875e-05, 8.877e-05, - 0.7039405, 8.885e-05, 0, 0.0, 0, 0, '2017-11-26T09:00:00Z', 1511686800000, - None, None, None, None] - ]) + assert rc.json()["data"] == [ + [ + "2017-11-26T08:50:00Z", + 8.794e-05, + 8.948e-05, + 8.794e-05, + 8.88e-05, + 0.0877869, + None, + None, + 0, + None, + 0, + 0, + None, + 1511686200000, + None, + None, + None, + None, + ], + [ + "2017-11-26T08:55:00Z", + 8.88e-05, + 8.942e-05, + 8.88e-05, + 8.893e-05, + 0.05874751, + 8.886500000000001e-05, + 8.886500000000001e-05, + 1, + 0.0, + 0, + 0, + "2017-11-26T08:55:00Z", + 1511686500000, + 8.893e-05, + None, + None, + None, + ], + [ + "2017-11-26T09:00:00Z", + 8.891e-05, + 8.893e-05, + 8.875e-05, + 8.877e-05, + 0.7039405, + 8.885e-05, + 8.885e-05, + 0, + 0.0, + 0, + 0, + "2017-11-26T09:00:00Z", + 1511686800000, + None, + None, + None, + None, + ], + ] def test_api_pair_history(botclient, tmp_path, mocker): _ftbot, client = botclient - _ftbot.config['user_data_dir'] = tmp_path + _ftbot.config["user_data_dir"] = tmp_path - timeframe = '5m' - lfm = mocker.patch('freqtrade.strategy.interface.IStrategy.load_freqAI_model') + timeframe = "5m" + lfm = mocker.patch("freqtrade.strategy.interface.IStrategy.load_freqAI_model") # No pair - rc = client_get(client, - f"{BASE_URI}/pair_history?timeframe={timeframe}" - f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}") + rc = client_get( + client, + f"{BASE_URI}/pair_history?timeframe={timeframe}" + f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}", + ) assert_response(rc, 422) # No Timeframe - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC" - f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}") + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC" + f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}", + ) assert_response(rc, 422) # No timerange - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" - f"&strategy={CURRENT_TEST_STRATEGY}") + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" + f"&strategy={CURRENT_TEST_STRATEGY}", + ) assert_response(rc, 422) # No strategy - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" - "&timerange=20180111-20180112") + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" + "&timerange=20180111-20180112", + ) assert_response(rc, 422) # Invalid strategy - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" - "&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}11") + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" + "&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}11", + ) assert_response(rc, 502) # Working - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" - f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}") - assert_response(rc, 200) - result = rc.json() - assert result['length'] == 289 - assert len(result['data']) == result['length'] - assert 'columns' in result - assert 'data' in result - data = result['data'] - assert len(data) == 289 - # analyzed DF has 30 columns - assert len(result['columns']) == 30 - assert len(data[0]) == 30 - date_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'date'][0] - rsi_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'rsi'][0] + for call in ("get", "post"): + if call == "get": + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" + f"&timerange=20180111-20180112&strategy={CURRENT_TEST_STRATEGY}", + ) + else: + rc = client_post( + client, + f"{BASE_URI}/pair_history", + data={ + "pair": "UNITTEST/BTC", + "timeframe": timeframe, + "timerange": "20180111-20180112", + "strategy": CURRENT_TEST_STRATEGY, + "columns": ["rsi", "fastd", "fastk"], + }, + ) - assert data[0][date_col_idx] == '2018-01-11T00:00:00Z' - assert data[0][rsi_col_idx] is not None - assert data[0][rsi_col_idx] > 0 - assert lfm.call_count == 1 - assert result['pair'] == 'UNITTEST/BTC' - assert result['strategy'] == CURRENT_TEST_STRATEGY - assert result['data_start'] == '2018-01-11 00:00:00+00:00' - assert result['data_start_ts'] == 1515628800000 - assert result['data_stop'] == '2018-01-12 00:00:00+00:00' - assert result['data_stop_ts'] == 1515715200000 + assert_response(rc, 200) + result = rc.json() + assert result["length"] == 289 + assert len(result["data"]) == result["length"] + assert "columns" in result + assert "data" in result + data = result["data"] + assert len(data) == 289 + col_count = 30 if call == "get" else 18 + # analyzed DF has 30 columns + assert len(result["columns"]) == col_count + assert len(result["all_columns"]) == 25 + assert len(data[0]) == col_count + date_col_idx = [idx for idx, c in enumerate(result["columns"]) if c == "date"][0] + rsi_col_idx = [idx for idx, c in enumerate(result["columns"]) if c == "rsi"][0] - # No data found - rc = client_get(client, - f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" - f"&timerange=20200111-20200112&strategy={CURRENT_TEST_STRATEGY}") - assert_response(rc, 502) - assert rc.json()['detail'] == ("No data for UNITTEST/BTC, 5m in 20200111-20200112 found.") + assert data[0][date_col_idx] == "2018-01-11T00:00:00Z" + assert data[0][rsi_col_idx] is not None + assert data[0][rsi_col_idx] > 0 + assert lfm.call_count == 1 + assert result["pair"] == "UNITTEST/BTC" + assert result["strategy"] == CURRENT_TEST_STRATEGY + assert result["data_start"] == "2018-01-11 00:00:00+00:00" + assert result["data_start_ts"] == 1515628800000 + assert result["data_stop"] == "2018-01-12 00:00:00+00:00" + assert result["data_stop_ts"] == 1515715200000 + lfm.reset_mock() + + # No data found + if call == "get": + rc = client_get( + client, + f"{BASE_URI}/pair_history?pair=UNITTEST%2FBTC&timeframe={timeframe}" + f"&timerange=20200111-20200112&strategy={CURRENT_TEST_STRATEGY}", + ) + else: + rc = client_post( + client, + f"{BASE_URI}/pair_history", + data={ + "pair": "UNITTEST/BTC", + "timeframe": timeframe, + "timerange": "20200111-20200112", + "strategy": CURRENT_TEST_STRATEGY, + "columns": ["rsi", "fastd", "fastk"], + }, + ) + assert_response(rc, 502) + assert rc.json()["detail"] == ("No data for UNITTEST/BTC, 5m in 20200111-20200112 found.") def test_api_plot_config(botclient, mocker, tmp_path): ftbot, client = botclient - ftbot.config['user_data_dir'] = tmp_path + ftbot.config["user_data_dir"] = tmp_path rc = client_get(client, f"{BASE_URI}/plot_config") assert_response(rc) assert rc.json() == {} ftbot.strategy.plot_config = { - 'main_plot': {'sma': {}}, - 'subplots': {'RSI': {'rsi': {'color': 'red'}}} + "main_plot": {"sma": {}}, + "subplots": {"RSI": {"rsi": {"color": "red"}}}, } rc = client_get(client, f"{BASE_URI}/plot_config") assert_response(rc) assert rc.json() == ftbot.strategy.plot_config - assert isinstance(rc.json()['main_plot'], dict) - assert isinstance(rc.json()['subplots'], dict) + assert isinstance(rc.json()["main_plot"], dict) + assert isinstance(rc.json()["subplots"], dict) - ftbot.strategy.plot_config = {'main_plot': {'sma': {}}} + ftbot.strategy.plot_config = {"main_plot": {"sma": {}}} rc = client_get(client, f"{BASE_URI}/plot_config") assert_response(rc) - assert isinstance(rc.json()['main_plot'], dict) - assert isinstance(rc.json()['subplots'], dict) + assert isinstance(rc.json()["main_plot"], dict) + assert isinstance(rc.json()["subplots"], dict) rc = client_get(client, f"{BASE_URI}/plot_config?strategy=freqai_test_classifier") assert_response(rc) res = rc.json() - assert 'target_roi' in res['subplots'] - assert 'do_predict' in res['subplots'] + assert "target_roi" in res["subplots"] + assert "do_predict" in res["subplots"] rc = client_get(client, f"{BASE_URI}/plot_config?strategy=HyperoptableStrategy") assert_response(rc) - assert rc.json()['subplots'] == {} + assert rc.json()["subplots"] == {} rc = client_get(client, f"{BASE_URI}/plot_config?strategy=NotAStrategy") assert_response(rc, 502) - assert rc.json()['detail'] is not None + assert rc.json()["detail"] is not None - mocker.patch('freqtrade.rpc.api_server.api_v1.get_rpc_optional', return_value=None) + mocker.patch("freqtrade.rpc.api_server.api_v1.get_rpc_optional", return_value=None) rc = client_get(client, f"{BASE_URI}/plot_config") assert_response(rc) @@ -1697,40 +2088,42 @@ def test_api_plot_config(botclient, mocker, tmp_path): def test_api_strategies(botclient, tmp_path): ftbot, client = botclient - ftbot.config['user_data_dir'] = tmp_path + ftbot.config["user_data_dir"] = tmp_path rc = client_get(client, f"{BASE_URI}/strategies") assert_response(rc) - assert rc.json() == {'strategies': [ - 'HyperoptableStrategy', - 'HyperoptableStrategyV2', - 'InformativeDecoratorTest', - 'StrategyTestV2', - 'StrategyTestV3', - 'StrategyTestV3CustomEntryPrice', - 'StrategyTestV3Futures', - 'freqai_rl_test_strat', - 'freqai_test_classifier', - 'freqai_test_multimodel_classifier_strat', - 'freqai_test_multimodel_strat', - 'freqai_test_strat', - 'strategy_test_v3_recursive_issue' - ]} + assert rc.json() == { + "strategies": [ + "HyperoptableStrategy", + "HyperoptableStrategyV2", + "InformativeDecoratorTest", + "StrategyTestV2", + "StrategyTestV3", + "StrategyTestV3CustomEntryPrice", + "StrategyTestV3Futures", + "freqai_rl_test_strat", + "freqai_test_classifier", + "freqai_test_multimodel_classifier_strat", + "freqai_test_multimodel_strat", + "freqai_test_strat", + "strategy_test_v3_recursive_issue", + ] + } -def test_api_strategy(botclient, tmp_path): +def test_api_strategy(botclient, tmp_path, mocker): _ftbot, client = botclient - _ftbot.config['user_data_dir'] = tmp_path + _ftbot.config["user_data_dir"] = tmp_path rc = client_get(client, f"{BASE_URI}/strategy/{CURRENT_TEST_STRATEGY}") assert_response(rc) - assert rc.json()['strategy'] == CURRENT_TEST_STRATEGY + assert rc.json()["strategy"] == CURRENT_TEST_STRATEGY data = (Path(__file__).parents[1] / "strategy/strats/strategy_test_v3.py").read_text() - assert rc.json()['code'] == data + assert rc.json()["code"] == data rc = client_get(client, f"{BASE_URI}/strategy/NoStrat") assert_response(rc, 404) @@ -1738,6 +2131,13 @@ def test_api_strategy(botclient, tmp_path): # Disallow base64 strategies rc = client_get(client, f"{BASE_URI}/strategy/xx:cHJpbnQoImhlbGxvIHdvcmxkIik=") assert_response(rc, 500) + mocker.patch( + "freqtrade.resolvers.strategy_resolver.StrategyResolver._load_strategy", + side_effect=Exception("Test"), + ) + + rc = client_get(client, f"{BASE_URI}/strategy/NoStrat") + assert_response(rc, 502) def test_api_exchanges(botclient): @@ -1746,183 +2146,190 @@ def test_api_exchanges(botclient): rc = client_get(client, f"{BASE_URI}/exchanges") assert_response(rc) response = rc.json() - assert isinstance(response['exchanges'], list) - assert len(response['exchanges']) > 20 - okx = [x for x in response['exchanges'] if x['name'] == 'okx'][0] + assert isinstance(response["exchanges"], list) + assert len(response["exchanges"]) > 20 + okx = [x for x in response["exchanges"] if x["name"] == "okx"][0] assert okx == { "name": "okx", "valid": True, "supported": True, "comment": "", "trade_modes": [ - { - "trading_mode": "spot", - "margin_mode": "" - }, - { - "trading_mode": "futures", - "margin_mode": "isolated" - } - ] + {"trading_mode": "spot", "margin_mode": ""}, + {"trading_mode": "futures", "margin_mode": "isolated"}, + ], } - mexc = [x for x in response['exchanges'] if x['name'] == 'mexc'][0] + mexc = [x for x in response["exchanges"] if x["name"] == "mexc"][0] assert mexc == { "name": "mexc", "valid": True, "supported": False, "comment": "", - "trade_modes": [ - { - "trading_mode": "spot", - "margin_mode": "" - } - ] + "trade_modes": [{"trading_mode": "spot", "margin_mode": ""}], } def test_api_freqaimodels(botclient, tmp_path, mocker): ftbot, client = botclient - ftbot.config['user_data_dir'] = tmp_path + ftbot.config["user_data_dir"] = tmp_path mocker.patch( "freqtrade.resolvers.freqaimodel_resolver.FreqaiModelResolver.search_all_objects", return_value=[ - {'name': 'LightGBMClassifier'}, - {'name': 'LightGBMClassifierMultiTarget'}, - {'name': 'LightGBMRegressor'}, - {'name': 'LightGBMRegressorMultiTarget'}, - {'name': 'ReinforcementLearner'}, - {'name': 'ReinforcementLearner_multiproc'}, - {'name': 'SKlearnRandomForestClassifier'}, - {'name': 'XGBoostClassifier'}, - {'name': 'XGBoostRFClassifier'}, - {'name': 'XGBoostRFRegressor'}, - {'name': 'XGBoostRegressor'}, - {'name': 'XGBoostRegressorMultiTarget'}, - ]) + {"name": "LightGBMClassifier"}, + {"name": "LightGBMClassifierMultiTarget"}, + {"name": "LightGBMRegressor"}, + {"name": "LightGBMRegressorMultiTarget"}, + {"name": "ReinforcementLearner"}, + {"name": "ReinforcementLearner_multiproc"}, + {"name": "SKlearnRandomForestClassifier"}, + {"name": "XGBoostClassifier"}, + {"name": "XGBoostRFClassifier"}, + {"name": "XGBoostRFRegressor"}, + {"name": "XGBoostRegressor"}, + {"name": "XGBoostRegressorMultiTarget"}, + ], + ) rc = client_get(client, f"{BASE_URI}/freqaimodels") assert_response(rc) - assert rc.json() == {'freqaimodels': [ - 'LightGBMClassifier', - 'LightGBMClassifierMultiTarget', - 'LightGBMRegressor', - 'LightGBMRegressorMultiTarget', - 'ReinforcementLearner', - 'ReinforcementLearner_multiproc', - 'SKlearnRandomForestClassifier', - 'XGBoostClassifier', - 'XGBoostRFClassifier', - 'XGBoostRFRegressor', - 'XGBoostRegressor', - 'XGBoostRegressorMultiTarget' - ]} + assert rc.json() == { + "freqaimodels": [ + "LightGBMClassifier", + "LightGBMClassifierMultiTarget", + "LightGBMRegressor", + "LightGBMRegressorMultiTarget", + "ReinforcementLearner", + "ReinforcementLearner_multiproc", + "SKlearnRandomForestClassifier", + "XGBoostClassifier", + "XGBoostRFClassifier", + "XGBoostRFRegressor", + "XGBoostRegressor", + "XGBoostRegressorMultiTarget", + ] + } def test_api_pairlists_available(botclient, tmp_path): ftbot, client = botclient - ftbot.config['user_data_dir'] = tmp_path + ftbot.config["user_data_dir"] = tmp_path rc = client_get(client, f"{BASE_URI}/pairlists/available") assert_response(rc, 503) - assert rc.json()['detail'] == 'Bot is not in the correct state.' + assert rc.json()["detail"] == "Bot is not in the correct state." - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["runmode"] = RunMode.WEBSERVER rc = client_get(client, f"{BASE_URI}/pairlists/available") assert_response(rc) response = rc.json() - assert isinstance(response['pairlists'], list) - assert len(response['pairlists']) > 0 + assert isinstance(response["pairlists"], list) + assert len(response["pairlists"]) > 0 - assert len([r for r in response['pairlists'] if r['name'] == 'AgeFilter']) == 1 - assert len([r for r in response['pairlists'] if r['name'] == 'VolumePairList']) == 1 - assert len([r for r in response['pairlists'] if r['name'] == 'StaticPairList']) == 1 + assert len([r for r in response["pairlists"] if r["name"] == "AgeFilter"]) == 1 + assert len([r for r in response["pairlists"] if r["name"] == "VolumePairList"]) == 1 + assert len([r for r in response["pairlists"] if r["name"] == "StaticPairList"]) == 1 - volumepl = [r for r in response['pairlists'] if r['name'] == 'VolumePairList'][0] - assert volumepl['is_pairlist_generator'] is True - assert len(volumepl['params']) > 1 - age_pl = [r for r in response['pairlists'] if r['name'] == 'AgeFilter'][0] - assert age_pl['is_pairlist_generator'] is False - assert len(volumepl['params']) > 2 + volumepl = [r for r in response["pairlists"] if r["name"] == "VolumePairList"][0] + assert volumepl["is_pairlist_generator"] is True + assert len(volumepl["params"]) > 1 + age_pl = [r for r in response["pairlists"] if r["name"] == "AgeFilter"][0] + assert age_pl["is_pairlist_generator"] is False + assert len(volumepl["params"]) > 2 def test_api_pairlists_evaluate(botclient, tmp_path, mocker): ftbot, client = botclient - ftbot.config['user_data_dir'] = tmp_path + ftbot.config["user_data_dir"] = tmp_path rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob") assert_response(rc, 503) - assert rc.json()['detail'] == 'Bot is not in the correct state.' + assert rc.json()["detail"] == "Bot is not in the correct state." - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["runmode"] = RunMode.WEBSERVER rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob") assert_response(rc, 404) - assert rc.json()['detail'] == 'Job not found.' + assert rc.json()["detail"] == "Job not found." body = { "pairlists": [ - {"method": "StaticPairList", }, + { + "method": "StaticPairList", + }, ], - "blacklist": [ - ], - "stake_currency": "BTC" + "blacklist": [], + "stake_currency": "BTC", } # Fail, already running ApiBG.pairlist_running = True rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) assert_response(rc, 400) - assert rc.json()['detail'] == 'Pairlist evaluation is already running.' + assert rc.json()["detail"] == "Pairlist evaluation is already running." # should start the run ApiBG.pairlist_running = False rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) assert_response(rc) - assert rc.json()['status'] == 'Pairlist evaluation started in background.' - job_id = rc.json()['job_id'] + assert rc.json()["status"] == "Pairlist evaluation started in background." + job_id = rc.json()["job_id"] rc = client_get(client, f"{BASE_URI}/background/RandomJob") assert_response(rc, 404) - assert rc.json()['detail'] == 'Job not found.' + assert rc.json()["detail"] == "Job not found." + # Background list + rc = client_get(client, f"{BASE_URI}/background") + assert_response(rc) + response = rc.json() + assert isinstance(response, list) + assert len(response) == 1 + assert response[0]["job_id"] == job_id + + # Get individual job rc = client_get(client, f"{BASE_URI}/background/{job_id}") assert_response(rc) response = rc.json() - assert response['job_id'] == job_id - assert response['job_category'] == 'pairlist' + assert response["job_id"] == job_id + assert response["job_category"] == "pairlist" rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/{job_id}") assert_response(rc) response = rc.json() - assert response['result']['whitelist'] == ['ETH/BTC', 'LTC/BTC', 'XRP/BTC', 'NEO/BTC'] - assert response['result']['length'] == 4 + assert response["result"]["whitelist"] == ["ETH/BTC", "LTC/BTC", "XRP/BTC", "NEO/BTC"] + assert response["result"]["length"] == 4 # Restart with additional filter, reducing the list to 2 - body['pairlists'].append({"method": "OffsetFilter", "number_assets": 2}) + body["pairlists"].append({"method": "OffsetFilter", "number_assets": 2}) rc = client_post(client, f"{BASE_URI}/pairlists/evaluate", body) assert_response(rc) - assert rc.json()['status'] == 'Pairlist evaluation started in background.' - job_id = rc.json()['job_id'] + assert rc.json()["status"] == "Pairlist evaluation started in background." + job_id = rc.json()["job_id"] rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/{job_id}") assert_response(rc) response = rc.json() - assert response['result']['whitelist'] == ['ETH/BTC', 'LTC/BTC', ] - assert response['result']['length'] == 2 + assert response["result"]["whitelist"] == [ + "ETH/BTC", + "LTC/BTC", + ] + assert response["result"]["length"] == 2 # Patch __run_pairlists - plm = mocker.patch('freqtrade.rpc.api_server.api_background_tasks.__run_pairlist', - return_value=None) + plm = mocker.patch( + "freqtrade.rpc.api_server.api_background_tasks.__run_pairlist", return_value=None + ) body = { "pairlists": [ - {"method": "StaticPairList", }, - ], - "blacklist": [ + { + "method": "StaticPairList", + }, ], + "blacklist": [], "stake_currency": "BTC", "exchange": "randomExchange", "trading_mode": "futures", @@ -1932,9 +2339,9 @@ def test_api_pairlists_evaluate(botclient, tmp_path, mocker): assert_response(rc) assert plm.call_count == 1 call_config = plm.call_args_list[0][0][1] - assert call_config['exchange']['name'] == 'randomExchange' - assert call_config['trading_mode'] == 'futures' - assert call_config['margin_mode'] == 'isolated' + assert call_config["exchange"]["name"] == "randomExchange" + assert call_config["trading_mode"] == "futures" + assert call_config["margin_mode"] == "isolated" def test_list_available_pairs(botclient): @@ -1943,38 +2350,36 @@ def test_list_available_pairs(botclient): rc = client_get(client, f"{BASE_URI}/available_pairs") assert_response(rc) - assert rc.json()['length'] == 12 - assert isinstance(rc.json()['pairs'], list) + assert rc.json()["length"] == 12 + assert isinstance(rc.json()["pairs"], list) rc = client_get(client, f"{BASE_URI}/available_pairs?timeframe=5m") assert_response(rc) - assert rc.json()['length'] == 12 + assert rc.json()["length"] == 12 rc = client_get(client, f"{BASE_URI}/available_pairs?stake_currency=ETH") assert_response(rc) - assert rc.json()['length'] == 1 - assert rc.json()['pairs'] == ['XRP/ETH'] - assert len(rc.json()['pair_interval']) == 2 + assert rc.json()["length"] == 1 + assert rc.json()["pairs"] == ["XRP/ETH"] + assert len(rc.json()["pair_interval"]) == 2 rc = client_get(client, f"{BASE_URI}/available_pairs?stake_currency=ETH&timeframe=5m") assert_response(rc) - assert rc.json()['length'] == 1 - assert rc.json()['pairs'] == ['XRP/ETH'] - assert len(rc.json()['pair_interval']) == 1 + assert rc.json()["length"] == 1 + assert rc.json()["pairs"] == ["XRP/ETH"] + assert len(rc.json()["pair_interval"]) == 1 - ftbot.config['trading_mode'] = 'futures' - rc = client_get( - client, f"{BASE_URI}/available_pairs?timeframe=1h") + ftbot.config["trading_mode"] = "futures" + rc = client_get(client, f"{BASE_URI}/available_pairs?timeframe=1h") assert_response(rc) - assert rc.json()['length'] == 1 - assert rc.json()['pairs'] == ['XRP/USDT:USDT'] + assert rc.json()["length"] == 1 + assert rc.json()["pairs"] == ["XRP/USDT:USDT"] - rc = client_get( - client, f"{BASE_URI}/available_pairs?timeframe=1h&candletype=mark") + rc = client_get(client, f"{BASE_URI}/available_pairs?timeframe=1h&candletype=mark") assert_response(rc) - assert rc.json()['length'] == 2 - assert rc.json()['pairs'] == ['UNITTEST/USDT:USDT', 'XRP/USDT:USDT'] - assert len(rc.json()['pair_interval']) == 2 + assert rc.json()["length"] == 2 + assert rc.json()["pairs"] == ["UNITTEST/USDT:USDT", "XRP/USDT:USDT"] + assert len(rc.json()["pair_interval"]) == 2 def test_sysinfo(botclient): @@ -1983,43 +2388,43 @@ def test_sysinfo(botclient): rc = client_get(client, f"{BASE_URI}/sysinfo") assert_response(rc) result = rc.json() - assert 'cpu_pct' in result - assert 'ram_pct' in result + assert "cpu_pct" in result + assert "ram_pct" in result def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path): try: ftbot, client = botclient - mocker.patch(f'{EXMS}.get_fee', fee) + mocker.patch(f"{EXMS}.get_fee", fee) rc = client_get(client, f"{BASE_URI}/backtest") # Backtest prevented in default mode assert_response(rc, 503) - assert rc.json()['detail'] == 'Bot is not in the correct state.' + assert rc.json()["detail"] == "Bot is not in the correct state." - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["runmode"] = RunMode.WEBSERVER # Backtesting not started yet rc = client_get(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'not_started' - assert not result['running'] - assert result['status_msg'] == 'Backtest not yet executed' - assert result['progress'] == 0 + assert result["status"] == "not_started" + assert not result["running"] + assert result["status_msg"] == "Backtest not yet executed" + assert result["progress"] == 0 # Reset backtesting rc = client_delete(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'reset' - assert not result['running'] - assert result['status_msg'] == 'Backtest reset' - ftbot.config['export'] = 'trades' - ftbot.config['backtest_cache'] = 'day' - ftbot.config['user_data_dir'] = tmp_path - ftbot.config['exportfilename'] = tmp_path / "backtest_results" - ftbot.config['exportfilename'].mkdir() + assert result["status"] == "reset" + assert not result["running"] + assert result["status_msg"] == "Backtest reset" + ftbot.config["export"] = "trades" + ftbot.config["backtest_cache"] = "day" + ftbot.config["user_data_dir"] = tmp_path + ftbot.config["exportfilename"] = tmp_path / "backtest_results" + ftbot.config["exportfilename"].mkdir() # start backtesting data = { @@ -2029,63 +2434,63 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path): "max_open_trades": 3, "stake_amount": 100, "dry_run_wallet": 1000, - "enable_protections": False + "enable_protections": False, } rc = client_post(client, f"{BASE_URI}/backtest", data=data) assert_response(rc) result = rc.json() - assert result['status'] == 'running' - assert result['progress'] == 0 - assert result['running'] - assert result['status_msg'] == 'Backtest started' + assert result["status"] == "running" + assert result["progress"] == 0 + assert result["running"] + assert result["status_msg"] == "Backtest started" rc = client_get(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'ended' - assert not result['running'] - assert result['status_msg'] == 'Backtest ended' - assert result['progress'] == 1 - assert result['backtest_result'] + assert result["status"] == "ended" + assert not result["running"] + assert result["status_msg"] == "Backtest ended" + assert result["progress"] == 1 + assert result["backtest_result"] rc = client_get(client, f"{BASE_URI}/backtest/abort") assert_response(rc) result = rc.json() - assert result['status'] == 'not_running' - assert not result['running'] - assert result['status_msg'] == 'Backtest ended' + assert result["status"] == "not_running" + assert not result["running"] + assert result["status_msg"] == "Backtest ended" # Simulate running backtest ApiBG.bgtask_running = True rc = client_get(client, f"{BASE_URI}/backtest/abort") assert_response(rc) result = rc.json() - assert result['status'] == 'stopping' - assert not result['running'] - assert result['status_msg'] == 'Backtest ended' + assert result["status"] == "stopping" + assert not result["running"] + assert result["status_msg"] == "Backtest ended" # Get running backtest... rc = client_get(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'running' - assert result['running'] - assert result['step'] == "backtest" - assert result['status_msg'] == "Backtest running" + assert result["status"] == "running" + assert result["running"] + assert result["step"] == "backtest" + assert result["status_msg"] == "Backtest running" # Try delete with task still running rc = client_delete(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'running' + assert result["status"] == "running" # Post to backtest that's still running rc = client_post(client, f"{BASE_URI}/backtest", data=data) assert_response(rc, 502) result = rc.json() - assert 'Bot Background task already running' in result['error'] + assert "Bot Background task already running" in result["error"] ApiBG.bgtask_running = False @@ -2093,32 +2498,34 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path): rc = client_post(client, f"{BASE_URI}/backtest", data=data) assert_response(rc) result = rc.json() - assert log_has_re('Reusing result of previous backtest.*', caplog) + assert log_has_re("Reusing result of previous backtest.*", caplog) - data['stake_amount'] = 101 + data["stake_amount"] = 101 - mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest_one_strategy', - side_effect=DependencyException('DeadBeef')) + mocker.patch( + "freqtrade.optimize.backtesting.Backtesting.backtest_one_strategy", + side_effect=DependencyException("DeadBeef"), + ) rc = client_post(client, f"{BASE_URI}/backtest", data=data) assert log_has("Backtesting caused an error: DeadBeef", caplog) rc = client_get(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'error' - assert 'Backtest failed' in result['status_msg'] + assert result["status"] == "error" + assert "Backtest failed" in result["status_msg"] # Delete backtesting to avoid leakage since the backtest-object may stick around. rc = client_delete(client, f"{BASE_URI}/backtest") assert_response(rc) result = rc.json() - assert result['status'] == 'reset' - assert not result['running'] - assert result['status_msg'] == 'Backtest reset' + assert result["status"] == "reset" + assert not result["running"] + assert result["status_msg"] == "Backtest reset" # Disallow base64 strategies - data['strategy'] = "xx:cHJpbnQoImhlbGxvIHdvcmxkIik=" + data["strategy"] = "xx:cHJpbnQoImhlbGxvIHdvcmxkIik=" rc = client_post(client, f"{BASE_URI}/backtest", data=data) assert_response(rc, 500) finally: @@ -2127,37 +2534,39 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path): def test_api_backtest_history(botclient, mocker, testdatadir): ftbot, client = botclient - mocker.patch('freqtrade.data.btanalysis._get_backtest_files', - return_value=[ - testdatadir / 'backtest_results/backtest-result_multistrat.json', - testdatadir / 'backtest_results/backtest-result.json' - ]) + mocker.patch( + "freqtrade.data.btanalysis._get_backtest_files", + return_value=[ + testdatadir / "backtest_results/backtest-result_multistrat.json", + testdatadir / "backtest_results/backtest-result.json", + ], + ) rc = client_get(client, f"{BASE_URI}/backtest/history") assert_response(rc, 503) - assert rc.json()['detail'] == 'Bot is not in the correct state.' + assert rc.json()["detail"] == "Bot is not in the correct state." - ftbot.config['user_data_dir'] = testdatadir - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["user_data_dir"] = testdatadir + ftbot.config["runmode"] = RunMode.WEBSERVER rc = client_get(client, f"{BASE_URI}/backtest/history") assert_response(rc) result = rc.json() assert len(result) == 3 - fn = result[0]['filename'] + fn = result[0]["filename"] assert fn == "backtest-result_multistrat" - assert result[0]['notes'] == '' - strategy = result[0]['strategy'] + assert result[0]["notes"] == "" + strategy = result[0]["strategy"] rc = client_get(client, f"{BASE_URI}/backtest/history/result?filename={fn}&strategy={strategy}") assert_response(rc) result2 = rc.json() assert result2 - assert result2['status'] == 'ended' - assert not result2['running'] - assert result2['progress'] == 1 + assert result2["status"] == "ended" + assert not result2["running"] + assert result2["progress"] == 1 # Only one strategy loaded - even though we use multiresult - assert len(result2['backtest_result']['strategy']) == 1 - assert result2['backtest_result']['strategy'][strategy] + assert len(result2["backtest_result"]["strategy"]) == 1 + assert result2["backtest_result"]["strategy"][strategy] def test_api_delete_backtest_history_entry(botclient, tmp_path: Path): @@ -2168,18 +2577,18 @@ def test_api_delete_backtest_history_entry(botclient, tmp_path: Path): bt_results_base.mkdir() file_path = bt_results_base / "test.json" file_path.touch() - meta_path = file_path.with_suffix('.meta.json') + meta_path = file_path.with_suffix(".meta.json") meta_path.touch() rc = client_delete(client, f"{BASE_URI}/backtest/history/randomFile.json") assert_response(rc, 503) - assert rc.json()['detail'] == 'Bot is not in the correct state.' + assert rc.json()["detail"] == "Bot is not in the correct state." - ftbot.config['user_data_dir'] = tmp_path - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["user_data_dir"] = tmp_path + ftbot.config["runmode"] = RunMode.WEBSERVER rc = client_delete(client, f"{BASE_URI}/backtest/history/randomFile.json") assert rc.status_code == 404 - assert rc.json()['detail'] == 'File not found.' + assert rc.json()["detail"] == "File not found." rc = client_delete(client, f"{BASE_URI}/backtest/history/{file_path.name}") assert rc.status_code == 200 @@ -2196,65 +2605,85 @@ def test_api_patch_backtest_history_entry(botclient, tmp_path: Path): bt_results_base.mkdir() file_path = bt_results_base / "test.json" file_path.touch() - meta_path = file_path.with_suffix('.meta.json') - with meta_path.open('w') as metafile: - rapidjson.dump({ - CURRENT_TEST_STRATEGY: { - "run_id": "6e542efc8d5e62cef6e5be0ffbc29be81a6e751d", - "backtest_start_time": 1690176003} - }, metafile) + meta_path = file_path.with_suffix(".meta.json") + with meta_path.open("w") as metafile: + rapidjson.dump( + { + CURRENT_TEST_STRATEGY: { + "run_id": "6e542efc8d5e62cef6e5be0ffbc29be81a6e751d", + "backtest_start_time": 1690176003, + } + }, + metafile, + ) def read_metadata(): - with meta_path.open('r') as metafile: + with meta_path.open("r") as metafile: return rapidjson.load(metafile) rc = client_patch(client, f"{BASE_URI}/backtest/history/randomFile.json") assert_response(rc, 503) - ftbot.config['user_data_dir'] = tmp_path - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["user_data_dir"] = tmp_path + ftbot.config["runmode"] = RunMode.WEBSERVER - rc = client_patch(client, f"{BASE_URI}/backtest/history/randomFile.json", { - "strategy": CURRENT_TEST_STRATEGY, - }) + rc = client_patch( + client, + f"{BASE_URI}/backtest/history/randomFile.json", + { + "strategy": CURRENT_TEST_STRATEGY, + }, + ) assert rc.status_code == 404 # Nonexisting strategy - rc = client_patch(client, f"{BASE_URI}/backtest/history/{file_path.name}", { - "strategy": f"{CURRENT_TEST_STRATEGY}xxx", - }) + rc = client_patch( + client, + f"{BASE_URI}/backtest/history/{file_path.name}", + { + "strategy": f"{CURRENT_TEST_STRATEGY}xxx", + }, + ) assert rc.status_code == 400 - assert rc.json()['detail'] == 'Strategy not in metadata.' + assert rc.json()["detail"] == "Strategy not in metadata." # no Notes - rc = client_patch(client, f"{BASE_URI}/backtest/history/{file_path.name}", { - "strategy": CURRENT_TEST_STRATEGY, - }) + rc = client_patch( + client, + f"{BASE_URI}/backtest/history/{file_path.name}", + { + "strategy": CURRENT_TEST_STRATEGY, + }, + ) assert rc.status_code == 200 res = rc.json() assert isinstance(res, list) assert len(res) == 1 - assert res[0]['strategy'] == CURRENT_TEST_STRATEGY - assert res[0]['notes'] == '' + assert res[0]["strategy"] == CURRENT_TEST_STRATEGY + assert res[0]["notes"] == "" fileres = read_metadata() - assert fileres[CURRENT_TEST_STRATEGY]['run_id'] == res[0]['run_id'] - assert fileres[CURRENT_TEST_STRATEGY]['notes'] == '' + assert fileres[CURRENT_TEST_STRATEGY]["run_id"] == res[0]["run_id"] + assert fileres[CURRENT_TEST_STRATEGY]["notes"] == "" - rc = client_patch(client, f"{BASE_URI}/backtest/history/{file_path.name}", { - "strategy": CURRENT_TEST_STRATEGY, - "notes": "FooBar", - }) + rc = client_patch( + client, + f"{BASE_URI}/backtest/history/{file_path.name}", + { + "strategy": CURRENT_TEST_STRATEGY, + "notes": "FooBar", + }, + ) assert rc.status_code == 200 res = rc.json() assert isinstance(res, list) assert len(res) == 1 - assert res[0]['strategy'] == CURRENT_TEST_STRATEGY - assert res[0]['notes'] == 'FooBar' + assert res[0]["strategy"] == CURRENT_TEST_STRATEGY + assert res[0]["notes"] == "FooBar" fileres = read_metadata() - assert fileres[CURRENT_TEST_STRATEGY]['run_id'] == res[0]['run_id'] - assert fileres[CURRENT_TEST_STRATEGY]['notes'] == 'FooBar' + assert fileres[CURRENT_TEST_STRATEGY]["run_id"] == res[0]["run_id"] + assert fileres[CURRENT_TEST_STRATEGY]["notes"] == "FooBar" def test_api_patch_backtest_market_change(botclient, tmp_path: Path): @@ -2264,20 +2693,22 @@ def test_api_patch_backtest_market_change(botclient, tmp_path: Path): bt_results_base = tmp_path / "backtest_results" bt_results_base.mkdir() file_path = bt_results_base / "test_22_market_change.feather" - df = pd.DataFrame({ - 'date': ['2018-01-01T00:00:00Z', '2018-01-01T00:05:00Z'], - 'count': [2, 4], - 'mean': [2555, 2556], - 'rel_mean': [0, 0.022], - }) - df['date'] = pd.to_datetime(df['date']) - df.to_feather(file_path, compression_level=9, compression='lz4') + df = pd.DataFrame( + { + "date": ["2018-01-01T00:00:00Z", "2018-01-01T00:05:00Z"], + "count": [2, 4], + "mean": [2555, 2556], + "rel_mean": [0, 0.022], + } + ) + df["date"] = pd.to_datetime(df["date"]) + df.to_feather(file_path, compression_level=9, compression="lz4") # Nonexisting file rc = client_get(client, f"{BASE_URI}/backtest/history/randomFile.json/market_change") assert_response(rc, 503) - ftbot.config['user_data_dir'] = tmp_path - ftbot.config['runmode'] = RunMode.WEBSERVER + ftbot.config["user_data_dir"] = tmp_path + ftbot.config["runmode"] = RunMode.WEBSERVER rc = client_get(client, f"{BASE_URI}/backtest/history/randomFile.json/market_change") assert_response(rc, 404) @@ -2285,11 +2716,11 @@ def test_api_patch_backtest_market_change(botclient, tmp_path: Path): rc = client_get(client, f"{BASE_URI}/backtest/history/test_22/market_change") assert_response(rc, 200) result = rc.json() - assert result['length'] == 2 - assert result['columns'] == ['date', 'count', 'mean', 'rel_mean', '__date_ts'] - assert result['data'] == [ - ['2018-01-01T00:00:00Z', 2, 2555, 0.0, 1514764800000], - ['2018-01-01T00:05:00Z', 4, 2556, 0.022, 1514765100000] + assert result["length"] == 2 + assert result["columns"] == ["date", "count", "mean", "rel_mean", "__date_ts"] + assert result["data"] == [ + ["2018-01-01T00:00:00Z", 2, 2555, 0.0, 1514764800000], + ["2018-01-01T00:05:00Z", 4, 2556, 0.022, 1514765100000], ] @@ -2308,17 +2739,17 @@ def test_api_ws_subscribe(botclient, mocker): _ftbot, client = botclient ws_url = f"/api/v1/message/ws?token={_TEST_WS_TOKEN}" - sub_mock = mocker.patch('freqtrade.rpc.api_server.ws.WebSocketChannel.set_subscriptions') + sub_mock = mocker.patch("freqtrade.rpc.api_server.ws.WebSocketChannel.set_subscriptions") with client.websocket_connect(ws_url) as ws: - ws.send_json({'type': 'subscribe', 'data': ['whitelist']}) + ws.send_json({"type": "subscribe", "data": ["whitelist"]}) time.sleep(0.2) # Check call count is now 1 as we sent a valid subscribe request assert sub_mock.call_count == 1 with client.websocket_connect(ws_url) as ws: - ws.send_json({'type': 'subscribe', 'data': 'whitelist'}) + ws.send_json({"type": "subscribe", "data": "whitelist"}) time.sleep(0.2) # Call count hasn't changed as the subscribe request was invalid @@ -2337,7 +2768,7 @@ def test_api_ws_requests(botclient, caplog): response = ws.receive_json() assert log_has_re(r"Request of type whitelist from.+", caplog) - assert response['type'] == "whitelist" + assert response["type"] == "whitelist" # Test analyzed_df request with client.websocket_connect(ws_url) as ws: @@ -2345,7 +2776,7 @@ def test_api_ws_requests(botclient, caplog): response = ws.receive_json() assert log_has_re(r"Request of type analyzed_df from.+", caplog) - assert response['type'] == "analyzed_df" + assert response["type"] == "analyzed_df" caplog.clear() # Test analyzed_df request with data @@ -2354,23 +2785,28 @@ def test_api_ws_requests(botclient, caplog): response = ws.receive_json() assert log_has_re(r"Request of type analyzed_df from.+", caplog) - assert response['type'] == "analyzed_df" + assert response["type"] == "analyzed_df" def test_api_ws_send_msg(default_conf, mocker, caplog): try: caplog.set_level(logging.DEBUG) - default_conf.update({"api_server": {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "CORS_origins": ['http://example.com'], - "username": _TEST_USER, - "password": _TEST_PASS, - "ws_token": _TEST_WS_TOKEN - }}) - mocker.patch('freqtrade.rpc.telegram.Telegram._init') - mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api') + default_conf.update( + { + "api_server": { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "CORS_origins": ["http://example.com"], + "username": _TEST_USER, + "password": _TEST_PASS, + "ws_token": _TEST_WS_TOKEN, + } + } + ) + mocker.patch("freqtrade.rpc.telegram.Telegram._init") + mocker.patch("freqtrade.rpc.api_server.ApiServer.start_api") apiserver = ApiServer(default_conf) apiserver.add_rpc_handler(RPC(get_patched_freqtradebot(mocker, default_conf))) diff --git a/tests/rpc/test_rpc_emc.py b/tests/rpc/test_rpc_emc.py index 4cfa3e9db..678379e68 100644 --- a/tests/rpc/test_rpc_emc.py +++ b/tests/rpc/test_rpc_emc.py @@ -1,6 +1,7 @@ """ Unit test file for rpc/external_message_consumer.py """ + import asyncio import logging from datetime import datetime, timezone @@ -21,19 +22,16 @@ _TEST_WS_PORT = 9989 @pytest.fixture def patched_emc(default_conf, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": "null", - "port": 9891, - "ws_token": _TEST_WS_TOKEN - } - ] + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + {"name": "default", "host": "null", "port": 9891, "ws_token": _TEST_WS_TOKEN} + ], + } } - }) + ) dataprovider = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dataprovider) @@ -81,7 +79,7 @@ def test_emc_init(patched_emc): # Parametrize this? def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): test_producer = {"name": "test", "url": "ws://test", "ws_token": "test"} - producer_name = test_producer['name'] + producer_name = test_producer["name"] invalid_msg = r"Invalid message .+" caplog.set_level(logging.DEBUG) @@ -92,7 +90,8 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): assert log_has(f"Received message of type `whitelist` from `{producer_name}`", caplog) assert log_has( - f"Consumed message from `{producer_name}` of type `RPCMessageType.WHITELIST`", caplog) + f"Consumed message from `{producer_name}` of type `RPCMessageType.WHITELIST`", caplog + ) # Test handle analyzed_df single candle message df_message = { @@ -100,8 +99,8 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): "data": { "key": ("BTC/USDT", "5m", "spot"), "df": ohlcv_history, - "la": datetime.now(timezone.utc) - } + "la": datetime.now(timezone.utc), + }, } patched_emc.handle_producer_message(test_producer, df_message) @@ -124,11 +123,7 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): malformed_message = { "type": "analyzed_df", - "data": { - "key": "BTC/USDT", - "df": ohlcv_history, - "la": datetime.now(timezone.utc) - } + "data": {"key": "BTC/USDT", "df": ohlcv_history, "la": datetime.now(timezone.utc)}, } patched_emc.handle_producer_message(test_producer, malformed_message) @@ -138,13 +133,13 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): # Empty dataframe malformed_message = { - "type": "analyzed_df", - "data": { - "key": ("BTC/USDT", "5m", "spot"), - "df": ohlcv_history.loc[ohlcv_history['open'] < 0], - "la": datetime.now(timezone.utc) - } - } + "type": "analyzed_df", + "data": { + "key": ("BTC/USDT", "5m", "spot"), + "df": ohlcv_history.loc[ohlcv_history["open"] < 0], + "la": datetime.now(timezone.utc), + }, + } patched_emc.handle_producer_message(test_producer, malformed_message) assert log_has(f"Received message of type `analyzed_df` from `{producer_name}`", caplog) @@ -166,29 +161,32 @@ def test_emc_handle_producer_message(patched_emc, caplog, ohlcv_history): async def test_emc_create_connection_success(default_conf, caplog, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 60, - "ping_timeout": 60, - "sleep_timeout": 60 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 60, + "ping_timeout": 60, + "sleep_timeout": 60, + } } - }) + ) - mocker.patch('freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start', - MagicMock()) + mocker.patch( + "freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start", MagicMock() + ) dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) - test_producer = default_conf['external_message_consumer']['producers'][0] + test_producer = default_conf["external_message_consumer"]["producers"][0] lock = asyncio.Lock() emc._running = True @@ -205,27 +203,27 @@ async def test_emc_create_connection_success(default_conf, caplog, mocker): emc.shutdown() -@pytest.mark.parametrize('host,port', [ - (_TEST_WS_HOST, -1), - ("10000.1241..2121/", _TEST_WS_PORT), -]) +@pytest.mark.parametrize( + "host,port", + [ + (_TEST_WS_HOST, -1), + ("10000.1241..2121/", _TEST_WS_PORT), + ], +) async def test_emc_create_connection_invalid_url(default_conf, caplog, mocker, host, port): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": host, - "port": port, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 60, - "ping_timeout": 60, - "sleep_timeout": 60 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + {"name": "default", "host": host, "port": port, "ws_token": _TEST_WS_TOKEN} + ], + "wait_timeout": 60, + "ping_timeout": 60, + "sleep_timeout": 60, + } } - }) + ) dp = DataProvider(default_conf, None, None, None) # Handle start explicitly to avoid messing with threading in tests @@ -242,25 +240,27 @@ async def test_emc_create_connection_invalid_url(default_conf, caplog, mocker, h async def test_emc_create_connection_error(default_conf, caplog, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 60, - "ping_timeout": 60, - "sleep_timeout": 60 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 60, + "ping_timeout": 60, + "sleep_timeout": 60, + } } - }) + ) # Test unexpected error - mocker.patch('websockets.connect', side_effect=RuntimeError) + mocker.patch("websockets.connect", side_effect=RuntimeError) dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) @@ -275,28 +275,31 @@ async def test_emc_create_connection_error(default_conf, caplog, mocker): async def test_emc_receive_messages_valid(default_conf, caplog, mocker): caplog.set_level(logging.DEBUG) - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 1, - "ping_timeout": 60, - "sleep_time": 60 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 1, + "ping_timeout": 60, + "sleep_time": 60, + } } - }) + ) - mocker.patch('freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start', - MagicMock()) + mocker.patch( + "freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start", MagicMock() + ) lock = asyncio.Lock() - test_producer = default_conf['external_message_consumer']['producers'][0] + test_producer = default_conf["external_message_consumer"]["producers"][0] dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) @@ -319,28 +322,31 @@ async def test_emc_receive_messages_valid(default_conf, caplog, mocker): async def test_emc_receive_messages_invalid(default_conf, caplog, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 1, - "ping_timeout": 60, - "sleep_time": 60 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 1, + "ping_timeout": 60, + "sleep_time": 60, + } } - }) + ) - mocker.patch('freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start', - MagicMock()) + mocker.patch( + "freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start", MagicMock() + ) lock = asyncio.Lock() - test_producer = default_conf['external_message_consumer']['producers'][0] + test_producer = default_conf["external_message_consumer"]["producers"][0] dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) @@ -363,28 +369,31 @@ async def test_emc_receive_messages_invalid(default_conf, caplog, mocker): async def test_emc_receive_messages_timeout(default_conf, caplog, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 0.1, - "ping_timeout": 1, - "sleep_time": 1 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 0.1, + "ping_timeout": 1, + "sleep_time": 1, + } } - }) + ) - mocker.patch('freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start', - MagicMock()) + mocker.patch( + "freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start", MagicMock() + ) lock = asyncio.Lock() - test_producer = default_conf['external_message_consumer']['producers'][0] + test_producer = default_conf["external_message_consumer"]["producers"][0] dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) @@ -411,28 +420,31 @@ async def test_emc_receive_messages_timeout(default_conf, caplog, mocker): async def test_emc_receive_messages_handle_error(default_conf, caplog, mocker): - default_conf.update({ - "external_message_consumer": { - "enabled": True, - "producers": [ - { - "name": "default", - "host": _TEST_WS_HOST, - "port": _TEST_WS_PORT, - "ws_token": _TEST_WS_TOKEN - } - ], - "wait_timeout": 1, - "ping_timeout": 1, - "sleep_time": 1 + default_conf.update( + { + "external_message_consumer": { + "enabled": True, + "producers": [ + { + "name": "default", + "host": _TEST_WS_HOST, + "port": _TEST_WS_PORT, + "ws_token": _TEST_WS_TOKEN, + } + ], + "wait_timeout": 1, + "ping_timeout": 1, + "sleep_time": 1, + } } - }) + ) - mocker.patch('freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start', - MagicMock()) + mocker.patch( + "freqtrade.rpc.external_message_consumer.ExternalMessageConsumer.start", MagicMock() + ) lock = asyncio.Lock() - test_producer = default_conf['external_message_consumer']['producers'][0] + test_producer = default_conf["external_message_consumer"]["producers"][0] dp = DataProvider(default_conf, None, None, None) emc = ExternalMessageConsumer(default_conf, dp) diff --git a/tests/rpc/test_rpc_manager.py b/tests/rpc/test_rpc_manager.py index f0bb72fc9..2792fd082 100644 --- a/tests/rpc/test_rpc_manager.py +++ b/tests/rpc/test_rpc_manager.py @@ -11,7 +11,7 @@ from tests.conftest import get_patched_freqtradebot, log_has def test__init__(mocker, default_conf) -> None: - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) assert rpc_manager.registered_modules == [] @@ -19,97 +19,91 @@ def test__init__(mocker, default_conf) -> None: def test_init_telegram_disabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert not log_has('Enabling rpc.telegram ...', caplog) + assert not log_has("Enabling rpc.telegram ...", caplog) assert rpc_manager.registered_modules == [] def test_init_telegram_enabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = True - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) + default_conf["telegram"]["enabled"] = True + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert log_has('Enabling rpc.telegram ...', caplog) + assert log_has("Enabling rpc.telegram ...", caplog) len_modules = len(rpc_manager.registered_modules) assert len_modules == 1 - assert 'telegram' in [mod.name for mod in rpc_manager.registered_modules] + assert "telegram" in [mod.name for mod in rpc_manager.registered_modules] def test_cleanup_telegram_disabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.cleanup', MagicMock()) - default_conf['telegram']['enabled'] = False + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.cleanup", MagicMock()) + default_conf["telegram"]["enabled"] = False freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) rpc_manager.cleanup() - assert not log_has('Cleaning up rpc.telegram ...', caplog) + assert not log_has("Cleaning up rpc.telegram ...", caplog) assert telegram_mock.call_count == 0 def test_cleanup_telegram_enabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = True - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.cleanup', MagicMock()) + default_conf["telegram"]["enabled"] = True + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.cleanup", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) # Check we have Telegram as a registered modules - assert 'telegram' in [mod.name for mod in rpc_manager.registered_modules] + assert "telegram" in [mod.name for mod in rpc_manager.registered_modules] rpc_manager.cleanup() - assert log_has('Cleaning up rpc.telegram ...', caplog) - assert 'telegram' not in [mod.name for mod in rpc_manager.registered_modules] + assert log_has("Cleaning up rpc.telegram ...", caplog) + assert "telegram" not in [mod.name for mod in rpc_manager.registered_modules] assert telegram_mock.call_count == 1 def test_send_msg_telegram_disabled(mocker, default_conf, caplog) -> None: - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) - default_conf['telegram']['enabled'] = False + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", MagicMock()) + default_conf["telegram"]["enabled"] = False freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) - rpc_manager.send_msg({ - 'type': RPCMessageType.STATUS, - 'status': 'test' - }) + rpc_manager.send_msg({"type": RPCMessageType.STATUS, "status": "test"}) assert log_has("Sending rpc message: {'type': status, 'status': 'test'}", caplog) assert telegram_mock.call_count == 0 def test_send_msg_telegram_error(mocker, default_conf, caplog) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', side_effect=ValueError()) - default_conf['telegram']['enabled'] = True + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", side_effect=ValueError()) + default_conf["telegram"]["enabled"] = True freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) - rpc_manager.send_msg({ - 'type': RPCMessageType.STATUS, - 'status': 'test' - }) + rpc_manager.send_msg({"type": RPCMessageType.STATUS, "status": "test"}) assert log_has("Sending rpc message: {'type': status, 'status': 'test'}", caplog) assert log_has("Exception occurred within RPC module telegram", caplog) def test_process_msg_queue(mocker, default_conf, caplog) -> None: - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg') - default_conf['telegram']['enabled'] = True - default_conf['telegram']['allow_custom_messages'] = True - mocker.patch('freqtrade.rpc.telegram.Telegram._init') + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg") + default_conf["telegram"]["enabled"] = True + default_conf["telegram"]["allow_custom_messages"] = True + mocker.patch("freqtrade.rpc.telegram.Telegram._init") freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) queue = deque() - queue.append('Test message') - queue.append('Test message 2') + queue.append("Test message") + queue.append("Test message 2") rpc_manager.process_msg_queue(queue) assert log_has("Sending rpc strategy_msg: Test message", caplog) @@ -118,15 +112,12 @@ def test_process_msg_queue(mocker, default_conf, caplog) -> None: def test_send_msg_telegram_enabled(mocker, default_conf, caplog) -> None: - default_conf['telegram']['enabled'] = True - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg') - mocker.patch('freqtrade.rpc.telegram.Telegram._init') + default_conf["telegram"]["enabled"] = True + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg") + mocker.patch("freqtrade.rpc.telegram.Telegram._init") freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) - rpc_manager.send_msg({ - 'type': RPCMessageType.STATUS, - 'status': 'test' - }) + rpc_manager.send_msg({"type": RPCMessageType.STATUS, "status": "test"}) assert log_has("Sending rpc message: {'type': status, 'status': 'test'}", caplog) assert telegram_mock.call_count == 1 @@ -134,76 +125,73 @@ def test_send_msg_telegram_enabled(mocker, default_conf, caplog) -> None: def test_init_webhook_disabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = False - default_conf['webhook'] = {'enabled': False} + default_conf["telegram"]["enabled"] = False + default_conf["webhook"] = {"enabled": False} rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert not log_has('Enabling rpc.webhook ...', caplog) + assert not log_has("Enabling rpc.webhook ...", caplog) assert rpc_manager.registered_modules == [] def test_init_webhook_enabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = False - default_conf['webhook'] = {'enabled': True, 'url': "https://DEADBEEF.com"} + default_conf["telegram"]["enabled"] = False + default_conf["webhook"] = {"enabled": True, "url": "https://DEADBEEF.com"} rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert log_has('Enabling rpc.webhook ...', caplog) + assert log_has("Enabling rpc.webhook ...", caplog) assert len(rpc_manager.registered_modules) == 1 - assert 'webhook' in [mod.name for mod in rpc_manager.registered_modules] + assert "webhook" in [mod.name for mod in rpc_manager.registered_modules] def test_send_msg_webhook_CustomMessagetype(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = False - default_conf['webhook'] = {'enabled': True, 'url': "https://DEADBEEF.com"} - mocker.patch('freqtrade.rpc.webhook.Webhook.send_msg', - MagicMock(side_effect=NotImplementedError)) + default_conf["telegram"]["enabled"] = False + default_conf["webhook"] = {"enabled": True, "url": "https://DEADBEEF.com"} + mocker.patch( + "freqtrade.rpc.webhook.Webhook.send_msg", MagicMock(side_effect=NotImplementedError) + ) rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert 'webhook' in [mod.name for mod in rpc_manager.registered_modules] - rpc_manager.send_msg({'type': RPCMessageType.STARTUP, - 'status': 'TestMessage'}) - assert log_has( - "Message type 'startup' not implemented by handler webhook.", - caplog) + assert "webhook" in [mod.name for mod in rpc_manager.registered_modules] + rpc_manager.send_msg({"type": RPCMessageType.STARTUP, "status": "TestMessage"}) + assert log_has("Message type 'startup' not implemented by handler webhook.", caplog) def test_startupmessages_telegram_enabled(mocker, default_conf) -> None: - default_conf['telegram']['enabled'] = True - telegram_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) + default_conf["telegram"]["enabled"] = True + telegram_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) freqtradebot = get_patched_freqtradebot(mocker, default_conf) rpc_manager = RPCManager(freqtradebot) rpc_manager.startup_messages(default_conf, freqtradebot.pairlists, freqtradebot.protections) assert telegram_mock.call_count == 3 - assert "*Exchange:* `binance`" in telegram_mock.call_args_list[1][0][0]['status'] + assert "*Exchange:* `binance`" in telegram_mock.call_args_list[1][0][0]["status"] telegram_mock.reset_mock() - default_conf['dry_run'] = True - default_conf['whitelist'] = {'method': 'VolumePairList', - 'config': {'number_assets': 20} - } - default_conf['protections'] = [{"method": "StoplossGuard", - "lookback_period": 60, "trade_limit": 2, "stop_duration": 60}] + default_conf["dry_run"] = True + default_conf["whitelist"] = {"method": "VolumePairList", "config": {"number_assets": 20}} + default_conf["protections"] = [ + {"method": "StoplossGuard", "lookback_period": 60, "trade_limit": 2, "stop_duration": 60} + ] freqtradebot = get_patched_freqtradebot(mocker, default_conf) - rpc_manager.startup_messages(default_conf, freqtradebot.pairlists, freqtradebot.protections) + rpc_manager.startup_messages(default_conf, freqtradebot.pairlists, freqtradebot.protections) assert telegram_mock.call_count == 4 - assert "Dry run is enabled." in telegram_mock.call_args_list[0][0][0]['status'] - assert 'StoplossGuard' in telegram_mock.call_args_list[-1][0][0]['status'] + assert "Dry run is enabled." in telegram_mock.call_args_list[0][0][0]["status"] + assert "StoplossGuard" in telegram_mock.call_args_list[-1][0][0]["status"] def test_init_apiserver_disabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) run_mock = MagicMock() - mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api', run_mock) - default_conf['telegram']['enabled'] = False + mocker.patch("freqtrade.rpc.api_server.ApiServer.start_api", run_mock) + default_conf["telegram"]["enabled"] = False rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) - assert not log_has('Enabling rpc.api_server', caplog) + assert not log_has("Enabling rpc.api_server", caplog) assert rpc_manager.registered_modules == [] assert run_mock.call_count == 0 @@ -211,21 +199,22 @@ def test_init_apiserver_disabled(mocker, default_conf, caplog) -> None: def test_init_apiserver_enabled(mocker, default_conf, caplog) -> None: caplog.set_level(logging.DEBUG) run_mock = MagicMock() - mocker.patch('freqtrade.rpc.api_server.ApiServer.start_api', run_mock) + mocker.patch("freqtrade.rpc.api_server.ApiServer.start_api", run_mock) default_conf["telegram"]["enabled"] = False - default_conf["api_server"] = {"enabled": True, - "listen_ip_address": "127.0.0.1", - "listen_port": 8080, - "username": "TestUser", - "password": "TestPass", - } + default_conf["api_server"] = { + "enabled": True, + "listen_ip_address": "127.0.0.1", + "listen_port": 8080, + "username": "TestUser", + "password": "TestPass", + } rpc_manager = RPCManager(get_patched_freqtradebot(mocker, default_conf)) # Sleep to allow the thread to start time.sleep(0.5) - assert log_has('Enabling rpc.api_server', caplog) + assert log_has("Enabling rpc.api_server", caplog) assert len(rpc_manager.registered_modules) == 1 - assert 'apiserver' in [mod.name for mod in rpc_manager.registered_modules] + assert "apiserver" in [mod.name for mod in rpc_manager.registered_modules] assert run_mock.call_count == 1 ApiServer.shutdown() diff --git a/tests/rpc/test_rpc_telegram.py b/tests/rpc/test_rpc_telegram.py index 9a3b713e2..6e4aa3384 100644 --- a/tests/rpc/test_rpc_telegram.py +++ b/tests/rpc/test_rpc_telegram.py @@ -22,8 +22,14 @@ from telegram.error import BadRequest, NetworkError, TelegramError from freqtrade import __version__ from freqtrade.constants import CANCEL_REASON from freqtrade.edge import PairInfo -from freqtrade.enums import (ExitType, MarketDirection, RPCMessageType, RunMode, SignalDirection, - State) +from freqtrade.enums import ( + ExitType, + MarketDirection, + RPCMessageType, + RunMode, + SignalDirection, + State, +) from freqtrade.exceptions import OperationalException from freqtrade.freqtradebot import FreqtradeBot from freqtrade.loggers import setup_logging @@ -33,20 +39,29 @@ from freqtrade.rpc import RPC from freqtrade.rpc.rpc import RPCException from freqtrade.rpc.telegram import Telegram, authorized_only from freqtrade.util.datetime_helpers import dt_now -from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, create_mock_trades, - create_mock_trades_usdt, get_patched_freqtradebot, log_has, log_has_re, - patch_exchange, patch_get_signal, patch_whitelist) +from tests.conftest import ( + CURRENT_TEST_STRATEGY, + EXMS, + create_mock_trades, + create_mock_trades_usdt, + get_patched_freqtradebot, + log_has, + log_has_re, + patch_exchange, + patch_get_signal, + patch_whitelist, +) @pytest.fixture(autouse=True) def mock_exchange_loop(mocker): - mocker.patch('freqtrade.exchange.exchange.Exchange._init_async_loop') + mocker.patch("freqtrade.exchange.exchange.Exchange._init_async_loop") @pytest.fixture def default_conf(default_conf) -> dict: # Telegram is enabled by default - default_conf['telegram']['enabled'] = True + default_conf["telegram"]["enabled"] = True return default_conf @@ -66,6 +81,7 @@ def patch_eventloop_threading(telegrambot): telegrambot._loop = asyncio.new_event_loop() is_init = True telegrambot._loop.run_forever() + x = threading.Thread(target=thread_fuck, daemon=True) x.start() while not is_init: @@ -79,7 +95,7 @@ class DummyCls(Telegram): def __init__(self, rpc: RPC, config) -> None: super().__init__(rpc, config) - self.state = {'called': False} + self.state = {"called": False} def _init(self): pass @@ -89,21 +105,21 @@ class DummyCls(Telegram): """ Fake method that only change the state of the object """ - self.state['called'] = True + self.state["called"] = True @authorized_only async def dummy_exception(self, *args, **kwargs) -> None: """ Fake method that throw an exception """ - raise Exception('test') + raise Exception("test") def get_telegram_testobject(mocker, default_conf, mock=True, ftbot=None): msg_mock = AsyncMock() if mock: mocker.patch.multiple( - 'freqtrade.rpc.telegram.Telegram', + "freqtrade.rpc.telegram.Telegram", _init=MagicMock(), _send_msg=msg_mock, _start_thread=MagicMock(), @@ -119,7 +135,7 @@ def get_telegram_testobject(mocker, default_conf, mock=True, ftbot=None): def test_telegram__init__(default_conf, mocker) -> None: - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) telegram, _, _ = get_telegram_testobject(mocker, default_conf) assert telegram._config == default_conf @@ -127,9 +143,9 @@ def test_telegram__init__(default_conf, mocker) -> None: def test_telegram_init(default_conf, mocker, caplog) -> None: app_mock = MagicMock() - mocker.patch('freqtrade.rpc.telegram.Telegram._start_thread', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram._init_telegram_app', return_value=app_mock) - mocker.patch('freqtrade.rpc.telegram.Telegram._startup_telegram', AsyncMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._start_thread", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init_telegram_app", return_value=app_mock) + mocker.patch("freqtrade.rpc.telegram.Telegram._startup_telegram", AsyncMock()) telegram, _, _ = get_telegram_testobject(mocker, default_conf, mock=False) telegram._init() @@ -139,18 +155,20 @@ def test_telegram_init(default_conf, mocker, caplog) -> None: assert app_mock.add_handler.call_count > 0 # assert start_polling.start_polling.call_count == 1 - message_str = ("rpc.telegram is listening for following commands: [['status'], ['profit'], " - "['balance'], ['start'], ['stop'], " - "['forceexit', 'forcesell', 'fx'], ['forcebuy', 'forcelong'], ['forceshort'], " - "['reload_trade'], ['trades'], ['delete'], ['cancel_open_order', 'coo'], " - "['performance'], ['buys', 'entries'], ['exits', 'sells'], ['mix_tags'], " - "['stats'], ['daily'], ['weekly'], ['monthly'], " - "['count'], ['locks'], ['delete_locks', 'unlock'], " - "['reload_conf', 'reload_config'], ['show_conf', 'show_config'], " - "['stopbuy', 'stopentry'], ['whitelist'], ['blacklist'], " - "['bl_delete', 'blacklist_delete'], " - "['logs'], ['edge'], ['health'], ['help'], ['version'], ['marketdir'], " - "['order'], ['list_custom_data']]") + message_str = ( + "rpc.telegram is listening for following commands: [['status'], ['profit'], " + "['balance'], ['start'], ['stop'], " + "['forceexit', 'forcesell', 'fx'], ['forcebuy', 'forcelong'], ['forceshort'], " + "['reload_trade'], ['trades'], ['delete'], ['cancel_open_order', 'coo'], " + "['performance'], ['buys', 'entries'], ['exits', 'sells'], ['mix_tags'], " + "['stats'], ['daily'], ['weekly'], ['monthly'], " + "['count'], ['locks'], ['delete_locks', 'unlock'], " + "['reload_conf', 'reload_config'], ['show_conf', 'show_config'], " + "['stopbuy', 'stopentry'], ['whitelist'], ['blacklist'], " + "['bl_delete', 'blacklist_delete'], " + "['logs'], ['edge'], ['health'], ['help'], ['version'], ['marketdir'], " + "['order'], ['list_custom_data']]" + ) assert log_has(message_str, caplog) @@ -161,7 +179,7 @@ async def test_telegram_startup(default_conf, mocker) -> None: app_mock.start = AsyncMock() app_mock.updater.start_polling = AsyncMock() app_mock.updater.running = False - sleep_mock = mocker.patch('freqtrade.rpc.telegram.asyncio.sleep', AsyncMock()) + sleep_mock = mocker.patch("freqtrade.rpc.telegram.asyncio.sleep", AsyncMock()) telegram, _, _ = get_telegram_testobject(mocker, default_conf) telegram._app = app_mock @@ -172,7 +190,10 @@ async def test_telegram_startup(default_conf, mocker) -> None: assert sleep_mock.call_count == 1 -async def test_telegram_cleanup(default_conf, mocker, ) -> None: +async def test_telegram_cleanup( + default_conf, + mocker, +) -> None: app_mock = MagicMock() app_mock.stop = AsyncMock() app_mock.initialize = AsyncMock() @@ -195,43 +216,43 @@ async def test_telegram_cleanup(default_conf, mocker, ) -> None: async def test_authorized_only(default_conf, mocker, caplog, update) -> None: patch_exchange(mocker) caplog.set_level(logging.DEBUG) - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) dummy = DummyCls(rpc, default_conf) patch_get_signal(bot) await dummy.dummy_handler(update=update, context=MagicMock()) - assert dummy.state['called'] is True - assert log_has('Executing handler: dummy_handler for chat_id: 0', caplog) - assert not log_has('Rejected unauthorized message from: 0', caplog) - assert not log_has('Exception occurred within Telegram module', caplog) + assert dummy.state["called"] is True + assert log_has("Executing handler: dummy_handler for chat_id: 0", caplog) + assert not log_has("Rejected unauthorized message from: 0", caplog) + assert not log_has("Exception occurred within Telegram module", caplog) async def test_authorized_only_unauthorized(default_conf, mocker, caplog) -> None: patch_exchange(mocker) caplog.set_level(logging.DEBUG) - chat = Chat(0xdeadbeef, 0) + chat = Chat(0xDEADBEEF, 0) message = Message(randint(1, 100), datetime.now(timezone.utc), chat) update = Update(randint(1, 100), message=message) - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) dummy = DummyCls(rpc, default_conf) patch_get_signal(bot) await dummy.dummy_handler(update=update, context=MagicMock()) - assert dummy.state['called'] is False - assert not log_has('Executing handler: dummy_handler for chat_id: 3735928559', caplog) - assert log_has('Rejected unauthorized message from: 3735928559', caplog) - assert not log_has('Exception occurred within Telegram module', caplog) + assert dummy.state["called"] is False + assert not log_has("Executing handler: dummy_handler for chat_id: 3735928559", caplog) + assert log_has("Rejected unauthorized message from: 3735928559", caplog) + assert not log_has("Exception occurred within Telegram module", caplog) async def test_authorized_only_exception(default_conf, mocker, caplog, update) -> None: patch_exchange(mocker) - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False bot = FreqtradeBot(default_conf) rpc = RPC(bot) @@ -239,54 +260,58 @@ async def test_authorized_only_exception(default_conf, mocker, caplog, update) - patch_get_signal(bot) await dummy.dummy_exception(update=update, context=MagicMock()) - assert dummy.state['called'] is False - assert not log_has('Executing handler: dummy_handler for chat_id: 0', caplog) - assert not log_has('Rejected unauthorized message from: 0', caplog) - assert log_has('Exception occurred within Telegram module', caplog) + assert dummy.state["called"] is False + assert not log_has("Executing handler: dummy_handler for chat_id: 0", caplog) + assert not log_has("Rejected unauthorized message from: 0", caplog) + assert log_has("Exception occurred within Telegram module", caplog) async def test_telegram_status(default_conf, update, mocker) -> None: - default_conf['telegram']['enabled'] = False + default_conf["telegram"]["enabled"] = False status_table = MagicMock() - mocker.patch('freqtrade.rpc.telegram.Telegram._status_table', status_table) + mocker.patch("freqtrade.rpc.telegram.Telegram._status_table", status_table) mocker.patch.multiple( - 'freqtrade.rpc.rpc.RPC', - _rpc_trade_status=MagicMock(return_value=[{ - 'trade_id': 1, - 'pair': 'ETH/BTC', - 'base_currency': 'ETH', - 'quote_currency': 'BTC', - 'open_date': dt_now(), - 'close_date': None, - 'open_rate': 1.099e-05, - 'close_rate': None, - 'current_rate': 1.098e-05, - 'amount': 90.99181074, - 'stake_amount': 90.99181074, - 'max_stake_amount': 90.99181074, - 'buy_tag': None, - 'enter_tag': None, - 'close_profit_ratio': None, - 'profit': -0.0059, - 'profit_ratio': -0.0059, - 'profit_abs': -0.225, - 'realized_profit': 0.0, - 'total_profit_abs': -0.225, - 'initial_stop_loss_abs': 1.098e-05, - 'stop_loss_abs': 1.099e-05, - 'exit_order_status': None, - 'initial_stop_loss_ratio': -0.0005, - 'stoploss_current_dist': 1e-08, - 'stoploss_current_dist_ratio': -0.0002, - 'stop_loss_ratio': -0.0001, - 'open_order': '(limit buy rem=0.00000000)', - 'is_open': True, - 'is_short': False, - 'filled_entry_orders': [], - 'orders': [] - }]), + "freqtrade.rpc.rpc.RPC", + _rpc_trade_status=MagicMock( + return_value=[ + { + "trade_id": 1, + "pair": "ETH/BTC", + "base_currency": "ETH", + "quote_currency": "BTC", + "open_date": dt_now(), + "close_date": None, + "open_rate": 1.099e-05, + "close_rate": None, + "current_rate": 1.098e-05, + "amount": 90.99181074, + "stake_amount": 90.99181074, + "max_stake_amount": 90.99181074, + "buy_tag": None, + "enter_tag": None, + "close_profit_ratio": None, + "profit": -0.0059, + "profit_ratio": -0.0059, + "profit_abs": -0.225, + "realized_profit": 0.0, + "total_profit_abs": -0.225, + "initial_stop_loss_abs": 1.098e-05, + "stop_loss_abs": 1.099e-05, + "exit_order_status": None, + "initial_stop_loss_ratio": -0.0005, + "stoploss_current_dist": 1e-08, + "stoploss_current_dist_ratio": -0.0002, + "stop_loss_ratio": -0.0001, + "open_order": "(limit buy rem=0.00000000)", + "is_open": True, + "is_short": False, + "filled_entry_orders": [], + "orders": [], + } + ] + ), ) telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) @@ -303,8 +328,8 @@ async def test_telegram_status(default_conf, update, mocker) -> None: @pytest.mark.usefixtures("init_persistence") async def test_telegram_status_multi_entry(default_conf, update, mocker, fee) -> None: - default_conf['telegram']['enabled'] = False - default_conf['position_adjustment_enable'] = True + default_conf["telegram"]["enabled"] = False + default_conf["position_adjustment_enable"] = True mocker.patch.multiple( EXMS, fetch_order=MagicMock(return_value=None), @@ -318,25 +343,26 @@ async def test_telegram_status_multi_entry(default_conf, update, mocker, fee) -> trade = trades[3] # Average may be empty on some exchanges trade.orders[0].average = 0 - trade.orders.append(Order( - order_id='5412vbb', - ft_order_side='buy', - ft_pair=trade.pair, - ft_is_open=False, - ft_amount=trade.amount, - ft_price=trade.open_rate, - status="closed", - symbol=trade.pair, - order_type="market", - side="buy", - price=trade.open_rate * 0.95, - average=0, - filled=trade.amount, - remaining=0, - cost=trade.amount, - order_date=trade.open_date, - order_filled_date=trade.open_date, - ) + trade.orders.append( + Order( + order_id="5412vbb", + ft_order_side="buy", + ft_pair=trade.pair, + ft_is_open=False, + ft_amount=trade.amount, + ft_price=trade.open_rate, + status="closed", + symbol=trade.pair, + order_type="market", + side="buy", + price=trade.open_rate * 0.95, + average=0, + filled=trade.amount, + remaining=0, + cost=trade.amount, + order_date=trade.open_date, + order_filled_date=trade.open_date, + ) ) trade.recalc_trade_from_orders() Trade.commit() @@ -344,15 +370,15 @@ async def test_telegram_status_multi_entry(default_conf, update, mocker, fee) -> await telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 4 msg = msg_mock.call_args_list[3][0][0] - assert re.search(r'Number of Entries.*2', msg) - assert re.search(r'Number of Exits.*1', msg) - assert re.search(r'Close Date:', msg) is None - assert re.search(r'Close Profit:', msg) is None + assert re.search(r"Number of Entries.*2", msg) + assert re.search(r"Number of Exits.*1", msg) + assert re.search(r"Close Date:", msg) is None + assert re.search(r"Close Profit:", msg) is None @pytest.mark.usefixtures("init_persistence") async def test_telegram_status_closed_trade(default_conf, update, mocker, fee) -> None: - default_conf['position_adjustment_enable'] = True + default_conf["position_adjustment_enable"] = True mocker.patch.multiple( EXMS, fetch_order=MagicMock(return_value=None), @@ -368,12 +394,12 @@ async def test_telegram_status_closed_trade(default_conf, update, mocker, fee) - await telegram._status(update=update, context=context) assert msg_mock.call_count == 1 msg = msg_mock.call_args_list[0][0][0] - assert re.search(r'Close Date:', msg) - assert re.search(r'Close Profit:', msg) + assert re.search(r"Close Date:", msg) + assert re.search(r"Close Profit:", msg) async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None: - default_conf['max_open_trades'] = 3 + default_conf["max_open_trades"] = 3 mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -382,7 +408,7 @@ async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None: ) status_table = MagicMock() mocker.patch.multiple( - 'freqtrade.rpc.telegram.Telegram', + "freqtrade.rpc.telegram.Telegram", _status_table=status_table, ) @@ -396,7 +422,7 @@ async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None: # Create some test data freqtradebot.enter_positions() - mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 500) + mocker.patch("freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH", 500) msg_mock.reset_mock() context = MagicMock() @@ -407,10 +433,10 @@ async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None: msg1 = msg_mock.call_args_list[0][0][0] - assert 'Order List for Trade #*`2`' in msg1 + assert "Order List for Trade #*`2`" in msg1 msg_mock.reset_mock() - mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 50) + mocker.patch("freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH", 50) context = MagicMock() context.args = ["2"] await telegram._order(update=update, context=context) @@ -420,14 +446,14 @@ async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None: msg1 = msg_mock.call_args_list[0][0][0] msg2 = msg_mock.call_args_list[1][0][0] - assert 'Order List for Trade #*`2`' in msg1 - assert '*Order List for Trade #*`2` - continued' in msg2 + assert "Order List for Trade #*`2`" in msg1 + assert "*Order List for Trade #*`2` - continued" in msg2 @pytest.mark.usefixtures("init_persistence") async def test_telegram_order_multi_entry(default_conf, update, mocker, fee) -> None: - default_conf['telegram']['enabled'] = False - default_conf['position_adjustment_enable'] = True + default_conf["telegram"]["enabled"] = False + default_conf["position_adjustment_enable"] = True mocker.patch.multiple( EXMS, fetch_order=MagicMock(return_value=None), @@ -441,25 +467,26 @@ async def test_telegram_order_multi_entry(default_conf, update, mocker, fee) -> trade = trades[3] # Average may be empty on some exchanges trade.orders[0].average = 0 - trade.orders.append(Order( - order_id='5412vbb', - ft_order_side='buy', - ft_pair=trade.pair, - ft_is_open=False, - ft_amount=trade.amount, - ft_price=trade.open_rate, - status="closed", - symbol=trade.pair, - order_type="market", - side="buy", - price=trade.open_rate * 0.95, - average=0, - filled=trade.amount, - remaining=0, - cost=trade.amount, - order_date=trade.open_date, - order_filled_date=trade.open_date, - ) + trade.orders.append( + Order( + order_id="5412vbb", + ft_order_side="buy", + ft_pair=trade.pair, + ft_is_open=False, + ft_amount=trade.amount, + ft_price=trade.open_rate, + status="closed", + symbol=trade.pair, + order_type="market", + side="buy", + price=trade.open_rate * 0.95, + average=0, + filled=trade.amount, + remaining=0, + cost=trade.amount, + order_date=trade.open_date, + order_filled_date=trade.open_date, + ) ) trade.recalc_trade_from_orders() Trade.commit() @@ -467,12 +494,12 @@ async def test_telegram_order_multi_entry(default_conf, update, mocker, fee) -> await telegram._order(update=update, context=MagicMock()) assert msg_mock.call_count == 4 msg = msg_mock.call_args_list[3][0][0] - assert re.search(r'from 1st entry rate', msg) - assert re.search(r'Order Filled', msg) + assert re.search(r"from 1st entry rate", msg) + assert re.search(r"Order Filled", msg) async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: - default_conf['max_open_trades'] = 3 + default_conf["max_open_trades"] = 3 mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -481,7 +508,7 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: ) status_table = MagicMock() mocker.patch.multiple( - 'freqtrade.rpc.telegram.Telegram', + "freqtrade.rpc.telegram.Telegram", _status_table=status_table, ) @@ -493,13 +520,13 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: # Status is also enabled when stopped await telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'no active trade' in msg_mock.call_args_list[0][0][0] + assert "no active trade" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING await telegram._status(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'no active trade' in msg_mock.call_args_list[0][0][0] + assert "no active trade" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data @@ -509,14 +536,14 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: # close_rate should not be included in the message as the trade is not closed # and no line should be empty - lines = msg_mock.call_args_list[0][0][0].split('\n') - assert '' not in lines[:-1] - assert 'Close Rate' not in ''.join(lines) - assert 'Close Profit' not in ''.join(lines) + lines = msg_mock.call_args_list[0][0][0].split("\n") + assert "" not in lines[:-1] + assert "Close Rate" not in "".join(lines) + assert "Close Profit" not in "".join(lines) assert msg_mock.call_count == 3 - assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] - assert 'LTC/BTC' in msg_mock.call_args_list[1][0][0] + assert "ETH/BTC" in msg_mock.call_args_list[0][0][0] + assert "LTC/BTC" in msg_mock.call_args_list[1][0][0] msg_mock.reset_mock() context = MagicMock() @@ -524,15 +551,15 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: await telegram._status(update=update, context=context) - lines = msg_mock.call_args_list[0][0][0].split('\n') - assert '' not in lines[:-1] - assert 'Close Rate' not in ''.join(lines) - assert 'Close Profit' not in ''.join(lines) + lines = msg_mock.call_args_list[0][0][0].split("\n") + assert "" not in lines[:-1] + assert "Close Rate" not in "".join(lines) + assert "Close Profit" not in "".join(lines) assert msg_mock.call_count == 2 - assert 'LTC/BTC' in msg_mock.call_args_list[0][0][0] + assert "LTC/BTC" in msg_mock.call_args_list[0][0][0] - mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 500) + mocker.patch("freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH", 500) msg_mock.reset_mock() context = MagicMock() @@ -543,8 +570,8 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: msg1 = msg_mock.call_args_list[0][0][0] - assert 'Close Rate' not in msg1 - assert 'Trade ID:* `2`' in msg1 + assert "Close Rate" not in msg1 + assert "Trade ID:* `2`" in msg1 async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> None: @@ -554,7 +581,7 @@ async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> get_fee=fee, ) - default_conf['stake_amount'] = 15.0 + default_conf["stake_amount"] = 15.0 telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) @@ -564,13 +591,13 @@ async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> # Status table is also enabled when stopped await telegram._status_table(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'no active trade' in msg_mock.call_args_list[0][0][0] + assert "no active trade" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING await telegram._status_table(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'no active trade' in msg_mock.call_args_list[0][0][0] + assert "no active trade" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data @@ -578,21 +605,18 @@ async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> await telegram._status_table(update=update, context=MagicMock()) - text = re.sub('', '', msg_mock.call_args_list[-1][0][0]) + text = re.sub("", "", msg_mock.call_args_list[-1][0][0]) line = text.split("\n") - fields = re.sub('[ ]+', ' ', line[2].strip()).split(' ') + fields = re.sub("[ ]+", " ", line[2].strip()).split(" ") assert int(fields[0]) == 1 # assert 'L' in fields[1] - assert 'ETH/BTC' in fields[1] + assert "ETH/BTC" in fields[1] assert msg_mock.call_count == 1 async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time_machine) -> None: - mocker.patch( - 'freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', - return_value=1.1 - ) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.1) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -602,7 +626,7 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time telegram, _freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt) # Move date to within day - time_machine.move_to('2022-06-11 08:00:00+00:00') + time_machine.move_to("2022-06-11 08:00:00+00:00") # Create some test data create_mock_trades_usdt(fee) @@ -613,13 +637,13 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time await telegram._daily(update=update, context=context) assert msg_mock.call_count == 1 assert "Daily Profit over the last 2 days:" in msg_mock.call_args_list[0][0][0] - assert 'Day ' in msg_mock.call_args_list[0][0][0] + assert "Day " in msg_mock.call_args_list[0][0][0] assert str(datetime.now(timezone.utc).date()) in msg_mock.call_args_list[0][0][0] - assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0] - assert '(2)' in msg_mock.call_args_list[0][0][0] - assert '(2) 6.83 USDT 7.51 USD 0.64%' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert " 6.83 USDT" in msg_mock.call_args_list[0][0][0] + assert " 7.51 USD" in msg_mock.call_args_list[0][0][0] + assert "(2)" in msg_mock.call_args_list[0][0][0] + assert "(2) 6.83 USDT 7.51 USD 0.64%" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() @@ -628,13 +652,15 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time assert msg_mock.call_count == 1 assert "Daily Profit over the last 7 days:" in msg_mock.call_args_list[0][0][0] assert str(datetime.now(timezone.utc).date()) in msg_mock.call_args_list[0][0][0] - assert str((datetime.now(timezone.utc) - timedelta(days=5)).date() - ) in msg_mock.call_args_list[0][0][0] - assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0] - assert '(2)' in msg_mock.call_args_list[0][0][0] - assert '(1)' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert ( + str((datetime.now(timezone.utc) - timedelta(days=5)).date()) + in msg_mock.call_args_list[0][0][0] + ) + assert " 6.83 USDT" in msg_mock.call_args_list[0][0][0] + assert " 7.51 USD" in msg_mock.call_args_list[0][0][0] + assert "(2)" in msg_mock.call_args_list[0][0][0] + assert "(1)" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() @@ -643,16 +669,13 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time context = MagicMock() context.args = ["1"] await telegram._daily(update=update, context=context) - assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0] - assert '(2)' in msg_mock.call_args_list[0][0][0] + assert " 6.83 USDT" in msg_mock.call_args_list[0][0][0] + assert " 7.51 USD" in msg_mock.call_args_list[0][0][0] + assert "(2)" in msg_mock.call_args_list[0][0][0] async def test_daily_wrong_input(default_conf, update, ticker, mocker) -> None: - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -665,7 +688,7 @@ async def test_daily_wrong_input(default_conf, update, ticker, mocker) -> None: context.args = ["-2"] await telegram._daily(update=update, context=context) assert msg_mock.call_count == 1 - assert 'must be an integer greater than 0' in msg_mock.call_args_list[0][0][0] + assert "must be an integer greater than 0" in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() @@ -674,15 +697,12 @@ async def test_daily_wrong_input(default_conf, update, ticker, mocker) -> None: context = MagicMock() context.args = ["today"] await telegram._daily(update=update, context=context) - assert 'Daily Profit over the last 7 days:' in msg_mock.call_args_list[0][0][0] + assert "Daily Profit over the last 7 days:" in msg_mock.call_args_list[0][0][0] async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, time_machine) -> None: - default_conf_usdt['max_open_trades'] = 1 - mocker.patch( - 'freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', - return_value=1.1 - ) + default_conf_usdt["max_open_trades"] = 1 + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.1) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -691,7 +711,7 @@ async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, tim telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt) # Move to saturday - so all trades are within that week - time_machine.move_to('2022-06-11') + time_machine.move_to("2022-06-11") create_mock_trades_usdt(fee) # Try valid data @@ -700,29 +720,33 @@ async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, tim context.args = ["2"] await telegram._weekly(update=update, context=context) assert msg_mock.call_count == 1 - assert "Weekly Profit over the last 2 weeks (starting from Monday):" \ - in msg_mock.call_args_list[0][0][0] - assert 'Monday ' in msg_mock.call_args_list[0][0][0] + assert ( + "Weekly Profit over the last 2 weeks (starting from Monday):" + in msg_mock.call_args_list[0][0][0] + ) + assert "Monday " in msg_mock.call_args_list[0][0][0] today = datetime.now(timezone.utc).date() first_iso_day_of_current_week = today - timedelta(days=today.weekday()) assert str(first_iso_day_of_current_week) in msg_mock.call_args_list[0][0][0] - assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0] - assert '(3)' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert " 2.74 USDT" in msg_mock.call_args_list[0][0][0] + assert " 3.01 USD" in msg_mock.call_args_list[0][0][0] + assert "(3)" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() context.args = [] await telegram._weekly(update=update, context=context) assert msg_mock.call_count == 1 - assert "Weekly Profit over the last 8 weeks (starting from Monday):" \ - in msg_mock.call_args_list[0][0][0] - assert 'Weekly' in msg_mock.call_args_list[0][0][0] - assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0] - assert '(3)' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert ( + "Weekly Profit over the last 8 weeks (starting from Monday):" + in msg_mock.call_args_list[0][0][0] + ) + assert "Weekly" in msg_mock.call_args_list[0][0][0] + assert " 2.74 USDT" in msg_mock.call_args_list[0][0][0] + assert " 3.01 USD" in msg_mock.call_args_list[0][0][0] + assert "(3)" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() @@ -732,7 +756,7 @@ async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, tim context.args = ["-3"] await telegram._weekly(update=update, context=context) assert msg_mock.call_count == 1 - assert 'must be an integer greater than 0' in msg_mock.call_args_list[0][0][0] + assert "must be an integer greater than 0" in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() @@ -742,17 +766,14 @@ async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, tim context.args = ["this week"] await telegram._weekly(update=update, context=context) assert ( - 'Weekly Profit over the last 8 weeks (starting from Monday):' + "Weekly Profit over the last 8 weeks (starting from Monday):" in msg_mock.call_args_list[0][0][0] ) async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, time_machine) -> None: - default_conf_usdt['max_open_trades'] = 1 - mocker.patch( - 'freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', - return_value=1.1 - ) + default_conf_usdt["max_open_trades"] = 1 + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.1) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -761,7 +782,7 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt) # Move to day within the month so all mock trades fall into this week. - time_machine.move_to('2022-06-11') + time_machine.move_to("2022-06-11") create_mock_trades_usdt(fee) # Try valid data @@ -770,15 +791,15 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti context.args = ["2"] await telegram._monthly(update=update, context=context) assert msg_mock.call_count == 1 - assert 'Monthly Profit over the last 2 months:' in msg_mock.call_args_list[0][0][0] - assert 'Month ' in msg_mock.call_args_list[0][0][0] + assert "Monthly Profit over the last 2 months:" in msg_mock.call_args_list[0][0][0] + assert "Month " in msg_mock.call_args_list[0][0][0] today = datetime.now(timezone.utc).date() current_month = f"{today.year}-{today.month:02} " assert current_month in msg_mock.call_args_list[0][0][0] - assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0] - assert '(3)' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert " 2.74 USDT" in msg_mock.call_args_list[0][0][0] + assert " 3.01 USD" in msg_mock.call_args_list[0][0][0] + assert "(3)" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() @@ -786,13 +807,13 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti await telegram._monthly(update=update, context=context) assert msg_mock.call_count == 1 # Default to 6 months - assert 'Monthly Profit over the last 6 months:' in msg_mock.call_args_list[0][0][0] - assert 'Month ' in msg_mock.call_args_list[0][0][0] + assert "Monthly Profit over the last 6 months:" in msg_mock.call_args_list[0][0][0] + assert "Month " in msg_mock.call_args_list[0][0][0] assert current_month in msg_mock.call_args_list[0][0][0] - assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0] - assert '(3)' in msg_mock.call_args_list[0][0][0] - assert '(0)' in msg_mock.call_args_list[0][0][0] + assert " 2.74 USDT" in msg_mock.call_args_list[0][0][0] + assert " 3.01 USD" in msg_mock.call_args_list[0][0][0] + assert "(3)" in msg_mock.call_args_list[0][0][0] + assert "(0)" in msg_mock.call_args_list[0][0][0] # Reset msg_mock msg_mock.reset_mock() @@ -802,14 +823,14 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti context.args = ["12"] await telegram._monthly(update=update, context=context) assert msg_mock.call_count == 1 - assert 'Monthly Profit over the last 12 months:' in msg_mock.call_args_list[0][0][0] - assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] - assert ' 3.01 USD' in msg_mock.call_args_list[0][0][0] - assert '(3)' in msg_mock.call_args_list[0][0][0] + assert "Monthly Profit over the last 12 months:" in msg_mock.call_args_list[0][0][0] + assert " 2.74 USDT" in msg_mock.call_args_list[0][0][0] + assert " 3.01 USD" in msg_mock.call_args_list[0][0][0] + assert "(3)" in msg_mock.call_args_list[0][0][0] # The one-digit months should contain a zero, Eg: September 2021 = "2021-09" # Since we loaded the last 12 months, any month should appear - assert '-09' in msg_mock.call_args_list[0][0][0] + assert "-09" in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() @@ -819,7 +840,7 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti context.args = ["-3"] await telegram._monthly(update=update, context=context) assert msg_mock.call_count == 1 - assert 'must be an integer greater than 0' in msg_mock.call_args_list[0][0][0] + assert "must be an integer greater than 0" in msg_mock.call_args_list[0][0][0] # Try invalid data msg_mock.reset_mock() @@ -828,13 +849,13 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti context = MagicMock() context.args = ["february"] await telegram._monthly(update=update, context=context) - assert 'Monthly Profit over the last 6 months:' in msg_mock.call_args_list[0][0][0] + assert "Monthly Profit over the last 6 months:" in msg_mock.call_args_list[0][0][0] async def test_telegram_profit_handle( - default_conf_usdt, update, ticker_usdt, ticker_sell_up, fee, - limit_sell_order_usdt, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=1.1) + default_conf_usdt, update, ticker_usdt, ticker_sell_up, fee, limit_sell_order_usdt, mocker +) -> None: + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=1.1) mocker.patch.multiple( EXMS, fetch_ticker=ticker_usdt, @@ -846,7 +867,7 @@ async def test_telegram_profit_handle( await telegram._profit(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'No trades yet.' in msg_mock.call_args_list[0][0][0] + assert "No trades yet." in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data @@ -858,19 +879,22 @@ async def test_telegram_profit_handle( context.args = ["aaa"] await telegram._profit(update=update, context=context) assert msg_mock.call_count == 1 - assert 'No closed trade' in msg_mock.call_args_list[-1][0][0] - assert '*ROI:* All trades' in msg_mock.call_args_list[-1][0][0] - mocker.patch('freqtrade.wallets.Wallets.get_starting_balance', return_value=1000) - assert ('∙ `0.298 USDT (0.50%) (0.03 \N{GREEK CAPITAL LETTER SIGMA}%)`' - in msg_mock.call_args_list[-1][0][0]) + assert "No closed trade" in msg_mock.call_args_list[-1][0][0] + assert "*ROI:* All trades" in msg_mock.call_args_list[-1][0][0] + mocker.patch("freqtrade.wallets.Wallets.get_starting_balance", return_value=1000) + assert ( + "∙ `0.298 USDT (0.50%) (0.03 \N{GREEK CAPITAL LETTER SIGMA}%)`" + in msg_mock.call_args_list[-1][0][0] + ) msg_mock.reset_mock() # Update the ticker with a market going up - mocker.patch(f'{EXMS}.fetch_ticker', ticker_sell_up) + mocker.patch(f"{EXMS}.fetch_ticker", ticker_sell_up) # Simulate fulfilled LIMIT_SELL order for trade trade = Trade.session.scalars(select(Trade)).first() oobj = Order.parse_from_ccxt_object( - limit_sell_order_usdt, limit_sell_order_usdt['symbol'], 'sell') + limit_sell_order_usdt, limit_sell_order_usdt["symbol"], "sell" + ) trade.orders.append(oobj) trade.update_trade(oobj) @@ -881,26 +905,30 @@ async def test_telegram_profit_handle( context.args = [3] await telegram._profit(update=update, context=context) assert msg_mock.call_count == 1 - assert '*ROI:* Closed trades' in msg_mock.call_args_list[-1][0][0] - assert ('∙ `5.685 USDT (9.45%) (0.57 \N{GREEK CAPITAL LETTER SIGMA}%)`' - in msg_mock.call_args_list[-1][0][0]) - assert '∙ `6.253 USD`' in msg_mock.call_args_list[-1][0][0] - assert '*ROI:* All trades' in msg_mock.call_args_list[-1][0][0] - assert ('∙ `5.685 USDT (9.45%) (0.57 \N{GREEK CAPITAL LETTER SIGMA}%)`' - in msg_mock.call_args_list[-1][0][0]) - assert '∙ `6.253 USD`' in msg_mock.call_args_list[-1][0][0] + assert "*ROI:* Closed trades" in msg_mock.call_args_list[-1][0][0] + assert ( + "∙ `5.685 USDT (9.45%) (0.57 \N{GREEK CAPITAL LETTER SIGMA}%)`" + in msg_mock.call_args_list[-1][0][0] + ) + assert "∙ `6.253 USD`" in msg_mock.call_args_list[-1][0][0] + assert "*ROI:* All trades" in msg_mock.call_args_list[-1][0][0] + assert ( + "∙ `5.685 USDT (9.45%) (0.57 \N{GREEK CAPITAL LETTER SIGMA}%)`" + in msg_mock.call_args_list[-1][0][0] + ) + assert "∙ `6.253 USD`" in msg_mock.call_args_list[-1][0][0] - assert '*Best Performing:* `ETH/USDT: 9.45%`' in msg_mock.call_args_list[-1][0][0] - assert '*Max Drawdown:*' in msg_mock.call_args_list[-1][0][0] - assert '*Profit factor:*' in msg_mock.call_args_list[-1][0][0] - assert '*Winrate:*' in msg_mock.call_args_list[-1][0][0] - assert '*Expectancy (Ratio):*' in msg_mock.call_args_list[-1][0][0] - assert '*Trading volume:* `126 USDT`' in msg_mock.call_args_list[-1][0][0] + assert "*Best Performing:* `ETH/USDT: 9.45%`" in msg_mock.call_args_list[-1][0][0] + assert "*Max Drawdown:*" in msg_mock.call_args_list[-1][0][0] + assert "*Profit factor:*" in msg_mock.call_args_list[-1][0][0] + assert "*Winrate:*" in msg_mock.call_args_list[-1][0][0] + assert "*Expectancy (Ratio):*" in msg_mock.call_args_list[-1][0][0] + assert "*Trading volume:* `126 USDT`" in msg_mock.call_args_list[-1][0][0] -@pytest.mark.parametrize('is_short', [True, False]) +@pytest.mark.parametrize("is_short", [True, False]) async def test_telegram_stats(default_conf, update, ticker, fee, mocker, is_short) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -911,7 +939,7 @@ async def test_telegram_stats(default_conf, update, ticker, fee, mocker, is_shor await telegram._stats(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'No trades yet.' in msg_mock.call_args_list[0][0][0] + assert "No trades yet." in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() # Create some test data @@ -919,20 +947,20 @@ async def test_telegram_stats(default_conf, update, ticker, fee, mocker, is_shor await telegram._stats(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'Exit Reason' in msg_mock.call_args_list[-1][0][0] - assert 'ROI' in msg_mock.call_args_list[-1][0][0] - assert 'Avg. Duration' in msg_mock.call_args_list[-1][0][0] + assert "Exit Reason" in msg_mock.call_args_list[-1][0][0] + assert "ROI" in msg_mock.call_args_list[-1][0][0] + assert "Avg. Duration" in msg_mock.call_args_list[-1][0][0] # Duration is not only N/A - assert '0:19:00' in msg_mock.call_args_list[-1][0][0] - assert 'N/A' in msg_mock.call_args_list[-1][0][0] + assert "0:19:00" in msg_mock.call_args_list[-1][0][0] + assert "N/A" in msg_mock.call_args_list[-1][0][0] msg_mock.reset_mock() async def test_telegram_balance_handle(default_conf, update, mocker, rpc_balance, tickers) -> None: - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.get_balances', return_value=rpc_balance) - mocker.patch(f'{EXMS}.get_tickers', tickers) - mocker.patch(f'{EXMS}.get_valid_pair_combination', side_effect=lambda a, b: f"{a}/{b}") + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.get_balances", return_value=rpc_balance) + mocker.patch(f"{EXMS}.get_tickers", tickers) + mocker.patch(f"{EXMS}.get_valid_pair_combination", side_effect=lambda a, b: f"{a}/{b}") telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -944,40 +972,40 @@ async def test_telegram_balance_handle(default_conf, update, mocker, rpc_balance result = msg_mock.call_args_list[0][0][0] result_full = msg_mock.call_args_list[1][0][0] assert msg_mock.call_count == 2 - assert '*BTC:*' in result - assert '*ETH:*' not in result - assert '*USDT:*' not in result - assert '*EUR:*' not in result - assert '*LTC:*' not in result + assert "*BTC:*" in result + assert "*ETH:*" not in result + assert "*USDT:*" not in result + assert "*EUR:*" not in result + assert "*LTC:*" not in result - assert '*LTC:*' in result_full - assert '*XRP:*' not in result - assert 'Balance:' in result - assert 'Est. BTC:' in result - assert 'BTC: 11' in result - assert 'BTC: 12' in result_full + assert "*LTC:*" in result_full + assert "*XRP:*" not in result + assert "Balance:" in result + assert "Est. BTC:" in result + assert "BTC: 11" in result + assert "BTC: 12" in result_full assert "*3 Other Currencies (< 0.0001 BTC):*" in result - assert 'BTC: 0.00000309' in result - assert '*Estimated Value*:' in result_full - assert '*Estimated Value (Bot managed assets only)*:' in result + assert "BTC: 0.00000309" in result + assert "*Estimated Value*:" in result_full + assert "*Estimated Value (Bot managed assets only)*:" in result async def test_balance_handle_empty_response(default_conf, update, mocker) -> None: - default_conf['dry_run'] = False - mocker.patch(f'{EXMS}.get_balances', return_value={}) + default_conf["dry_run"] = False + mocker.patch(f"{EXMS}.get_balances", return_value={}) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) - freqtradebot.config['dry_run'] = False + freqtradebot.config["dry_run"] = False await telegram._balance(update=update, context=MagicMock()) result = msg_mock.call_args_list[0][0][0] assert msg_mock.call_count == 1 - assert 'Starting capital: `0 BTC' in result + assert "Starting capital: `0 BTC" in result async def test_balance_handle_empty_response_dry(default_conf, update, mocker) -> None: - mocker.patch(f'{EXMS}.get_balances', return_value={}) + mocker.patch(f"{EXMS}.get_balances", return_value={}) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -993,31 +1021,36 @@ async def test_balance_handle_too_large_response(default_conf, update, mocker) - balances = [] for i in range(100): curr = choice(ascii_uppercase) + choice(ascii_uppercase) + choice(ascii_uppercase) - balances.append({ - 'currency': curr, - 'free': 1.0, - 'used': 0.5, - 'balance': i, - 'bot_owned': 0.5, - 'est_stake': 1, - 'est_stake_bot': 1, - 'stake': 'BTC', - 'is_position': False, - 'leverage': 1.0, - 'position': 0.0, - 'side': 'long', - 'is_bot_managed': True, - }) - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_balance', return_value={ - 'currencies': balances, - 'total': 100.0, - 'total_bot': 100.0, - 'symbol': 100.0, - 'value': 1000.0, - 'value_bot': 1000.0, - 'starting_capital': 1000, - 'starting_capital_fiat': 1000, - }) + balances.append( + { + "currency": curr, + "free": 1.0, + "used": 0.5, + "balance": i, + "bot_owned": 0.5, + "est_stake": 1, + "est_stake_bot": 1, + "stake": "BTC", + "is_position": False, + "leverage": 1.0, + "position": 0.0, + "side": "long", + "is_bot_managed": True, + } + ) + mocker.patch( + "freqtrade.rpc.rpc.RPC._rpc_balance", + return_value={ + "currencies": balances, + "total": 100.0, + "total_bot": 100.0, + "symbol": 100.0, + "value": 1000.0, + "value_bot": 1000.0, + "starting_capital": 1000, + "starting_capital_fiat": 1000, + }, + ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -1032,7 +1065,6 @@ async def test_balance_handle_too_large_response(default_conf, update, mocker) - async def test_start_handle(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.STOPPED @@ -1043,7 +1075,6 @@ async def test_start_handle(default_conf, update, mocker) -> None: async def test_start_handle_already_running(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING @@ -1051,11 +1082,10 @@ async def test_start_handle_already_running(default_conf, update, mocker) -> Non await telegram._start(update=update, context=MagicMock()) assert freqtradebot.state == State.RUNNING assert msg_mock.call_count == 1 - assert 'already running' in msg_mock.call_args_list[0][0][0] + assert "already running" in msg_mock.call_args_list[0][0][0] async def test_stop_handle(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING @@ -1063,11 +1093,10 @@ async def test_stop_handle(default_conf, update, mocker) -> None: await telegram._stop(update=update, context=MagicMock()) assert freqtradebot.state == State.STOPPED assert msg_mock.call_count == 1 - assert 'stopping trader' in msg_mock.call_args_list[0][0][0] + assert "stopping trader" in msg_mock.call_args_list[0][0][0] async def test_stop_handle_already_stopped(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.STOPPED @@ -1075,23 +1104,23 @@ async def test_stop_handle_already_stopped(default_conf, update, mocker) -> None await telegram._stop(update=update, context=MagicMock()) assert freqtradebot.state == State.STOPPED assert msg_mock.call_count == 1 - assert 'already stopped' in msg_mock.call_args_list[0][0][0] + assert "already stopped" in msg_mock.call_args_list[0][0][0] async def test_stopbuy_handle(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) - assert freqtradebot.config['max_open_trades'] != 0 + assert freqtradebot.config["max_open_trades"] != 0 await telegram._stopentry(update=update, context=MagicMock()) - assert freqtradebot.config['max_open_trades'] == 0 + assert freqtradebot.config["max_open_trades"] == 0 assert msg_mock.call_count == 1 - assert 'No more entries will occur from now. Run /reload_config to reset.' \ + assert ( + "No more entries will occur from now. Run /reload_config to reset." in msg_mock.call_args_list[0][0][0] + ) async def test_reload_config_handle(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) freqtradebot.state = State.RUNNING @@ -1099,14 +1128,15 @@ async def test_reload_config_handle(default_conf, update, mocker) -> None: await telegram._reload_config(update=update, context=MagicMock()) assert freqtradebot.state == State.RELOAD_CONFIG assert msg_mock.call_count == 1 - assert 'Reloading config' in msg_mock.call_args_list[0][0][0] + assert "Reloading config" in msg_mock.call_args_list[0][0][0] -async def test_telegram_forceexit_handle(default_conf, update, ticker, fee, - ticker_sell_up, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) - msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) +async def test_telegram_forceexit_handle( + default_conf, update, ticker, fee, ticker_sell_up, mocker +) -> None: + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) + msg_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) patch_exchange(mocker) patch_whitelist(mocker, default_conf) mocker.patch.multiple( @@ -1128,7 +1158,7 @@ async def test_telegram_forceexit_handle(default_conf, update, ticker, fee, assert trade # Increase the price and sell it - mocker.patch(f'{EXMS}.fetch_ticker', ticker_sell_up) + mocker.patch(f"{EXMS}.fetch_ticker", ticker_sell_up) # /forceexit 1 context = MagicMock() @@ -1138,45 +1168,47 @@ async def test_telegram_forceexit_handle(default_conf, update, ticker, fee, assert msg_mock.call_count == 4 last_msg = msg_mock.call_args_list[-2][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/BTC', - 'gain': 'profit', - 'leverage': 1.0, - 'limit': 1.173e-05, - 'order_rate': 1.173e-05, - 'amount': 91.07468123, - 'order_type': 'limit', - 'open_rate': 1.098e-05, - 'current_rate': 1.173e-05, - 'direction': 'Long', - 'profit_amount': 6.314e-05, - 'profit_ratio': 0.0629778, - 'stake_currency': 'BTC', - 'quote_currency': 'BTC', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'buy_tag': ANY, - 'enter_tag': ANY, - 'exit_reason': ExitType.FORCE_EXIT.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'stake_amount': 0.0009999999999054, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/BTC", + "gain": "profit", + "leverage": 1.0, + "limit": 1.173e-05, + "order_rate": 1.173e-05, + "amount": 91.07468123, + "order_type": "limit", + "open_rate": 1.098e-05, + "current_rate": 1.173e-05, + "direction": "Long", + "profit_amount": 6.314e-05, + "profit_ratio": 0.0629778, + "stake_currency": "BTC", + "quote_currency": "BTC", + "base_currency": "ETH", + "fiat_currency": "USD", + "buy_tag": ANY, + "enter_tag": ANY, + "exit_reason": ExitType.FORCE_EXIT.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "stake_amount": 0.0009999999999054, + "sub_trade": False, + "cumulative_profit": 0.0, + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg -async def test_telegram_force_exit_down_handle(default_conf, update, ticker, fee, - ticker_sell_down, mocker) -> None: - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=15000.0) - msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) +async def test_telegram_force_exit_down_handle( + default_conf, update, ticker, fee, ticker_sell_down, mocker +) -> None: + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=15000.0 + ) + msg_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) patch_exchange(mocker) patch_whitelist(mocker, default_conf) @@ -1196,10 +1228,7 @@ async def test_telegram_force_exit_down_handle(default_conf, update, ticker, fee freqtradebot.enter_positions() # Decrease the price and sell it - mocker.patch.multiple( - EXMS, - fetch_ticker=ticker_sell_down - ) + mocker.patch.multiple(EXMS, fetch_ticker=ticker_sell_down) trade = Trade.session.scalars(select(Trade)).first() assert trade @@ -1213,45 +1242,46 @@ async def test_telegram_force_exit_down_handle(default_conf, update, ticker, fee last_msg = msg_mock.call_args_list[-2][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/BTC', - 'gain': 'loss', - 'leverage': 1.0, - 'limit': 1.043e-05, - 'order_rate': 1.043e-05, - 'amount': 91.07468123, - 'order_type': 'limit', - 'open_rate': 1.098e-05, - 'current_rate': 1.043e-05, - 'direction': 'Long', - 'profit_amount': -5.497e-05, - 'profit_ratio': -0.05482878, - 'stake_currency': 'BTC', - 'quote_currency': 'BTC', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'buy_tag': ANY, - 'enter_tag': ANY, - 'exit_reason': ExitType.FORCE_EXIT.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'stake_amount': 0.0009999999999054, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/BTC", + "gain": "loss", + "leverage": 1.0, + "limit": 1.043e-05, + "order_rate": 1.043e-05, + "amount": 91.07468123, + "order_type": "limit", + "open_rate": 1.098e-05, + "current_rate": 1.043e-05, + "direction": "Long", + "profit_amount": -5.497e-05, + "profit_ratio": -0.05482878, + "stake_currency": "BTC", + "quote_currency": "BTC", + "base_currency": "ETH", + "fiat_currency": "USD", + "buy_tag": ANY, + "enter_tag": ANY, + "exit_reason": ExitType.FORCE_EXIT.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "stake_amount": 0.0009999999999054, + "sub_trade": False, + "cumulative_profit": 0.0, + "is_final_exit": False, + "final_profit_ratio": None, } == last_msg async def test_forceexit_all_handle(default_conf, update, ticker, fee, mocker) -> None: patch_exchange(mocker) - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=15000.0) - msg_mock = mocker.patch('freqtrade.rpc.telegram.Telegram.send_msg', MagicMock()) - mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock()) + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=15000.0 + ) + msg_mock = mocker.patch("freqtrade.rpc.telegram.Telegram.send_msg", MagicMock()) + mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock()) patch_whitelist(mocker, default_conf) mocker.patch.multiple( EXMS, @@ -1259,7 +1289,7 @@ async def test_forceexit_all_handle(default_conf, update, ticker, fee, mocker) - get_fee=fee, _dry_is_price_crossed=MagicMock(return_value=True), ) - default_conf['max_open_trades'] = 4 + default_conf["max_open_trades"] = 4 freqtradebot = FreqtradeBot(default_conf) rpc = RPC(freqtradebot) telegram = Telegram(rpc, default_conf) @@ -1278,42 +1308,43 @@ async def test_forceexit_all_handle(default_conf, update, ticker, fee, mocker) - assert msg_mock.call_count == 8 msg = msg_mock.call_args_list[0][0][0] assert { - 'type': RPCMessageType.EXIT, - 'trade_id': 1, - 'exchange': 'Binance', - 'pair': 'ETH/BTC', - 'gain': 'loss', - 'leverage': 1.0, - 'order_rate': 1.099e-05, - 'limit': 1.099e-05, - 'amount': 91.07468123, - 'order_type': 'limit', - 'open_rate': 1.098e-05, - 'current_rate': 1.099e-05, - 'direction': 'Long', - 'profit_amount': -4.09e-06, - 'profit_ratio': -0.00408133, - 'stake_currency': 'BTC', - 'quote_currency': 'BTC', - 'base_currency': 'ETH', - 'fiat_currency': 'USD', - 'buy_tag': ANY, - 'enter_tag': ANY, - 'exit_reason': ExitType.FORCE_EXIT.value, - 'open_date': ANY, - 'close_date': ANY, - 'close_rate': ANY, - 'stake_amount': 0.0009999999999054, - 'sub_trade': False, - 'cumulative_profit': 0.0, - 'is_final_exit': False, - 'final_profit_ratio': None, + "type": RPCMessageType.EXIT, + "trade_id": 1, + "exchange": "Binance", + "pair": "ETH/BTC", + "gain": "loss", + "leverage": 1.0, + "order_rate": 1.099e-05, + "limit": 1.099e-05, + "amount": 91.07468123, + "order_type": "limit", + "open_rate": 1.098e-05, + "current_rate": 1.099e-05, + "direction": "Long", + "profit_amount": -4.09e-06, + "profit_ratio": -0.00408133, + "stake_currency": "BTC", + "quote_currency": "BTC", + "base_currency": "ETH", + "fiat_currency": "USD", + "buy_tag": ANY, + "enter_tag": ANY, + "exit_reason": ExitType.FORCE_EXIT.value, + "open_date": ANY, + "close_date": ANY, + "close_rate": ANY, + "stake_amount": 0.0009999999999054, + "sub_trade": False, + "cumulative_profit": 0.0, + "is_final_exit": False, + "final_profit_ratio": None, } == msg async def test_forceexit_handle_invalid(default_conf, update, mocker) -> None: - mocker.patch('freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price', - return_value=15000.0) + mocker.patch( + "freqtrade.rpc.fiat_convert.CryptoToFiatConverter._find_price", return_value=15000.0 + ) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -1325,7 +1356,7 @@ async def test_forceexit_handle_invalid(default_conf, update, mocker) -> None: context.args = ["1"] await telegram._force_exit(update=update, context=context) assert msg_mock.call_count == 1 - assert 'not running' in msg_mock.call_args_list[0][0][0] + assert "not running" in msg_mock.call_args_list[0][0][0] # Invalid argument msg_mock.reset_mock() @@ -1335,18 +1366,18 @@ async def test_forceexit_handle_invalid(default_conf, update, mocker) -> None: context.args = ["123456"] await telegram._force_exit(update=update, context=context) assert msg_mock.call_count == 1 - assert 'invalid argument' in msg_mock.call_args_list[0][0][0] + assert "invalid argument" in msg_mock.call_args_list[0][0][0] async def test_force_exit_no_pair(default_conf, update, ticker, fee, mocker) -> None: - default_conf['max_open_trades'] = 4 + default_conf["max_open_trades"] = 4 mocker.patch.multiple( EXMS, fetch_ticker=ticker, get_fee=fee, _dry_is_price_crossed=MagicMock(return_value=True), ) - femock = mocker.patch('freqtrade.rpc.rpc.RPC._rpc_force_exit') + femock = mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_exit") telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -1356,7 +1387,7 @@ async def test_force_exit_no_pair(default_conf, update, ticker, fee, mocker) -> context.args = [] await telegram._force_exit(update=update, context=context) # No pair - assert msg_mock.call_args_list[0][1]['msg'] == 'No open trade found.' + assert msg_mock.call_args_list[0][1]["msg"] == "No open trade found." # Create some test data freqtradebot.enter_positions() @@ -1364,12 +1395,12 @@ async def test_force_exit_no_pair(default_conf, update, ticker, fee, mocker) -> # /forceexit await telegram._force_exit(update=update, context=context) - keyboard = msg_mock.call_args_list[0][1]['keyboard'] + keyboard = msg_mock.call_args_list[0][1]["keyboard"] # 4 pairs + cancel assert reduce(lambda acc, x: acc + len(x), keyboard, 0) == 5 assert keyboard[-1][0].text == "Cancel" - assert keyboard[1][0].callback_data == 'force_exit__2 ' + assert keyboard[1][0].callback_data == "force_exit__2 " update = MagicMock() update.callback_query = AsyncMock() update.callback_query.data = keyboard[1][0].callback_data @@ -1377,7 +1408,7 @@ async def test_force_exit_no_pair(default_conf, update, ticker, fee, mocker) -> assert update.callback_query.answer.call_count == 1 assert update.callback_query.edit_message_text.call_count == 1 assert femock.call_count == 1 - assert femock.call_args_list[0][0][0] == '2' + assert femock.call_args_list[0][0][0] == "2" # Retry exiting - but cancel instead update.callback_query.reset_mock() @@ -1388,14 +1419,14 @@ async def test_force_exit_no_pair(default_conf, update, ticker, fee, mocker) -> query = update.callback_query assert query.answer.call_count == 1 assert query.edit_message_text.call_count == 1 - assert query.edit_message_text.call_args_list[-1][1]['text'] == "Force exit canceled." + assert query.edit_message_text.call_args_list[-1][1]["text"] == "Force exit canceled." async def test_force_enter_handle(default_conf, update, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) fbuy_mock = MagicMock(return_value=None) - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_force_entry', fbuy_mock) + mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_entry", fbuy_mock) telegram, freqtradebot, _ = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -1406,26 +1437,26 @@ async def test_force_enter_handle(default_conf, update, mocker) -> None: await telegram._force_enter(update=update, context=context, order_side=SignalDirection.LONG) assert fbuy_mock.call_count == 1 - assert fbuy_mock.call_args_list[0][0][0] == 'ETH/BTC' + assert fbuy_mock.call_args_list[0][0][0] == "ETH/BTC" assert fbuy_mock.call_args_list[0][0][1] is None - assert fbuy_mock.call_args_list[0][1]['order_side'] == SignalDirection.LONG + assert fbuy_mock.call_args_list[0][1]["order_side"] == SignalDirection.LONG # Reset and retry with specified price fbuy_mock = MagicMock(return_value=None) - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_force_entry', fbuy_mock) + mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_entry", fbuy_mock) # /forcelong ETH/BTC 0.055 context = MagicMock() context.args = ["ETH/BTC", "0.055"] await telegram._force_enter(update=update, context=context, order_side=SignalDirection.LONG) assert fbuy_mock.call_count == 1 - assert fbuy_mock.call_args_list[0][0][0] == 'ETH/BTC' + assert fbuy_mock.call_args_list[0][0][0] == "ETH/BTC" assert isinstance(fbuy_mock.call_args_list[0][0][1], float) assert fbuy_mock.call_args_list[0][0][1] == 0.055 async def test_force_enter_handle_exception(default_conf, update, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) patch_get_signal(freqtradebot) @@ -1433,14 +1464,14 @@ async def test_force_enter_handle_exception(default_conf, update, mocker) -> Non await telegram._force_enter(update=update, context=MagicMock(), order_side=SignalDirection.LONG) assert msg_mock.call_count == 1 - assert msg_mock.call_args_list[0][0][0] == 'Force_entry not enabled.' + assert msg_mock.call_args_list[0][0][0] == "Force_entry not enabled." async def test_force_enter_no_pair(default_conf, update, mocker) -> None: - mocker.patch('freqtrade.rpc.rpc.CryptoToFiatConverter._find_price', return_value=15000.0) + mocker.patch("freqtrade.rpc.rpc.CryptoToFiatConverter._find_price", return_value=15000.0) fbuy_mock = MagicMock(return_value=None) - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_force_entry', fbuy_mock) + mocker.patch("freqtrade.rpc.rpc.RPC._rpc_force_entry", fbuy_mock) telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) @@ -1452,29 +1483,28 @@ async def test_force_enter_no_pair(default_conf, update, mocker) -> None: assert fbuy_mock.call_count == 0 assert msg_mock.call_count == 1 - assert msg_mock.call_args_list[0][1]['msg'] == 'Which pair?' + assert msg_mock.call_args_list[0][1]["msg"] == "Which pair?" # assert msg_mock.call_args_list[0][1]['callback_query_handler'] == 'forcebuy' - keyboard = msg_mock.call_args_list[0][1]['keyboard'] + keyboard = msg_mock.call_args_list[0][1]["keyboard"] # One additional button - cancel assert reduce(lambda acc, x: acc + len(x), keyboard, 0) == 5 update = MagicMock() update.callback_query = AsyncMock() - update.callback_query.data = 'force_enter__XRP/USDT_||_long' + update.callback_query.data = "force_enter__XRP/USDT_||_long" await telegram._force_enter_inline(update, None) assert fbuy_mock.call_count == 1 fbuy_mock.reset_mock() update.callback_query = AsyncMock() - update.callback_query.data = 'force_enter__cancel' + update.callback_query.data = "force_enter__cancel" await telegram._force_enter_inline(update, None) assert fbuy_mock.call_count == 0 query = update.callback_query assert query.edit_message_text.call_count == 1 - assert query.edit_message_text.call_args_list[-1][1]['text'] == "Force enter canceled." + assert query.edit_message_text.call_args_list[-1][1]["text"] == "Force enter canceled." async def test_telegram_performance_handle(default_conf_usdt, update, ticker, fee, mocker) -> None: - mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -1487,12 +1517,13 @@ async def test_telegram_performance_handle(default_conf_usdt, update, ticker, fe await telegram._performance(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'Performance' in msg_mock.call_args_list[0][0][0] - assert 'XRP/USDT\t2.842 USDT (10.00%) (1)' in msg_mock.call_args_list[0][0][0] + assert "Performance" in msg_mock.call_args_list[0][0][0] + assert "XRP/USDT\t2.842 USDT (10.00%) (1)" in msg_mock.call_args_list[0][0][0] async def test_telegram_entry_tag_performance_handle( - default_conf_usdt, update, ticker, fee, mocker) -> None: + default_conf_usdt, update, ticker, fee, mocker +) -> None: mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -1506,16 +1537,17 @@ async def test_telegram_entry_tag_performance_handle( context = MagicMock() await telegram._enter_tag_performance(update=update, context=context) assert msg_mock.call_count == 1 - assert 'Entry Tag Performance' in msg_mock.call_args_list[0][0][0] - assert '`TEST1\t3.987 USDT (5.00%) (1)`' in msg_mock.call_args_list[0][0][0] + assert "Entry Tag Performance" in msg_mock.call_args_list[0][0][0] + assert "`TEST1\t3.987 USDT (5.00%) (1)`" in msg_mock.call_args_list[0][0][0] - context.args = ['XRP/USDT'] + context.args = ["XRP/USDT"] await telegram._enter_tag_performance(update=update, context=context) assert msg_mock.call_count == 2 msg_mock.reset_mock() - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_enter_tag_performance', - side_effect=RPCException('Error')) + mocker.patch( + "freqtrade.rpc.rpc.RPC._rpc_enter_tag_performance", side_effect=RPCException("Error") + ) await telegram._enter_tag_performance(update=update, context=MagicMock()) assert msg_mock.call_count == 1 @@ -1523,7 +1555,8 @@ async def test_telegram_entry_tag_performance_handle( async def test_telegram_exit_reason_performance_handle( - default_conf_usdt, update, ticker, fee, mocker) -> None: + default_conf_usdt, update, ticker, fee, mocker +) -> None: mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -1537,24 +1570,26 @@ async def test_telegram_exit_reason_performance_handle( context = MagicMock() await telegram._exit_reason_performance(update=update, context=context) assert msg_mock.call_count == 1 - assert 'Exit Reason Performance' in msg_mock.call_args_list[0][0][0] - assert '`roi\t2.842 USDT (10.00%) (1)`' in msg_mock.call_args_list[0][0][0] - context.args = ['XRP/USDT'] + assert "Exit Reason Performance" in msg_mock.call_args_list[0][0][0] + assert "`roi\t2.842 USDT (10.00%) (1)`" in msg_mock.call_args_list[0][0][0] + context.args = ["XRP/USDT"] await telegram._exit_reason_performance(update=update, context=context) assert msg_mock.call_count == 2 msg_mock.reset_mock() - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_exit_reason_performance', - side_effect=RPCException('Error')) + mocker.patch( + "freqtrade.rpc.rpc.RPC._rpc_exit_reason_performance", side_effect=RPCException("Error") + ) await telegram._exit_reason_performance(update=update, context=MagicMock()) assert msg_mock.call_count == 1 assert "Error" in msg_mock.call_args_list[0][0][0] -async def test_telegram_mix_tag_performance_handle(default_conf_usdt, update, ticker, fee, - mocker) -> None: +async def test_telegram_mix_tag_performance_handle( + default_conf_usdt, update, ticker, fee, mocker +) -> None: mocker.patch.multiple( EXMS, fetch_ticker=ticker, @@ -1569,17 +1604,17 @@ async def test_telegram_mix_tag_performance_handle(default_conf_usdt, update, ti context = MagicMock() await telegram._mix_tag_performance(update=update, context=context) assert msg_mock.call_count == 1 - assert 'Mix Tag Performance' in msg_mock.call_args_list[0][0][0] - assert ('`TEST3 roi\t2.842 USDT (10.00%) (1)`' - in msg_mock.call_args_list[0][0][0]) + assert "Mix Tag Performance" in msg_mock.call_args_list[0][0][0] + assert "`TEST3 roi\t2.842 USDT (10.00%) (1)`" in msg_mock.call_args_list[0][0][0] - context.args = ['XRP/USDT'] + context.args = ["XRP/USDT"] await telegram._mix_tag_performance(update=update, context=context) assert msg_mock.call_count == 2 msg_mock.reset_mock() - mocker.patch('freqtrade.rpc.rpc.RPC._rpc_mix_tag_performance', - side_effect=RPCException('Error')) + mocker.patch( + "freqtrade.rpc.rpc.RPC._rpc_mix_tag_performance", side_effect=RPCException("Error") + ) await telegram._mix_tag_performance(update=update, context=MagicMock()) assert msg_mock.call_count == 1 @@ -1598,7 +1633,7 @@ async def test_count_handle(default_conf, update, ticker, fee, mocker) -> None: freqtradebot.state = State.STOPPED await telegram._count(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'not running' in msg_mock.call_args_list[0][0][0] + assert "not running" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() freqtradebot.state = State.RUNNING @@ -1607,11 +1642,10 @@ async def test_count_handle(default_conf, update, ticker, fee, mocker) -> None: msg_mock.reset_mock() await telegram._count(update=update, context=MagicMock()) - msg = ('
  current    max    total stake\n---------  -----  -------------\n'
-           '        1      {}          {}
').format( - default_conf['max_open_trades'], - default_conf['stake_amount'] - ) + msg = ( + "
  current    max    total stake\n---------  -----  -------------\n"
+        "        1      {}          {}
" + ).format(default_conf["max_open_trades"], default_conf["stake_amount"]) assert msg in msg_mock.call_args_list[0][0][0] @@ -1625,107 +1659,118 @@ async def test_telegram_lock_handle(default_conf, update, ticker, fee, mocker) - patch_get_signal(freqtradebot) await telegram._locks(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'No active locks.' in msg_mock.call_args_list[0][0][0] + assert "No active locks." in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() - PairLocks.lock_pair('ETH/BTC', dt_now() + timedelta(minutes=4), 'randreason') - PairLocks.lock_pair('XRP/BTC', dt_now() + timedelta(minutes=20), 'deadbeef') + PairLocks.lock_pair("ETH/BTC", dt_now() + timedelta(minutes=4), "randreason") + PairLocks.lock_pair("XRP/BTC", dt_now() + timedelta(minutes=20), "deadbeef") await telegram._locks(update=update, context=MagicMock()) - assert 'Pair' in msg_mock.call_args_list[0][0][0] - assert 'Until' in msg_mock.call_args_list[0][0][0] - assert 'Reason\n' in msg_mock.call_args_list[0][0][0] - assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] - assert 'XRP/BTC' in msg_mock.call_args_list[0][0][0] - assert 'deadbeef' in msg_mock.call_args_list[0][0][0] - assert 'randreason' in msg_mock.call_args_list[0][0][0] + assert "Pair" in msg_mock.call_args_list[0][0][0] + assert "Until" in msg_mock.call_args_list[0][0][0] + assert "Reason\n" in msg_mock.call_args_list[0][0][0] + assert "ETH/BTC" in msg_mock.call_args_list[0][0][0] + assert "XRP/BTC" in msg_mock.call_args_list[0][0][0] + assert "deadbeef" in msg_mock.call_args_list[0][0][0] + assert "randreason" in msg_mock.call_args_list[0][0][0] context = MagicMock() - context.args = ['XRP/BTC'] + context.args = ["XRP/BTC"] msg_mock.reset_mock() await telegram._delete_locks(update=update, context=context) - assert 'ETH/BTC' in msg_mock.call_args_list[0][0][0] - assert 'randreason' in msg_mock.call_args_list[0][0][0] - assert 'XRP/BTC' not in msg_mock.call_args_list[0][0][0] - assert 'deadbeef' not in msg_mock.call_args_list[0][0][0] + assert "ETH/BTC" in msg_mock.call_args_list[0][0][0] + assert "randreason" in msg_mock.call_args_list[0][0][0] + assert "XRP/BTC" not in msg_mock.call_args_list[0][0][0] + assert "deadbeef" not in msg_mock.call_args_list[0][0][0] async def test_whitelist_static(default_conf, update, mocker) -> None: - telegram, _freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) await telegram._whitelist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert ("Using whitelist `['StaticPairList']` with 4 pairs\n" - "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['StaticPairList']` with 4 pairs\n" + "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['sorted'] + context.args = ["sorted"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['StaticPairList']` with 4 pairs\n" - "`ETH/BTC, LTC/BTC, NEO/BTC, XRP/BTC`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['StaticPairList']` with 4 pairs\n" + "`ETH/BTC, LTC/BTC, NEO/BTC, XRP/BTC`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['baseonly'] + context.args = ["baseonly"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['StaticPairList']` with 4 pairs\n" - "`ETH, LTC, XRP, NEO`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['StaticPairList']` with 4 pairs\n" + "`ETH, LTC, XRP, NEO`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['baseonly', 'sorted'] + context.args = ["baseonly", "sorted"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['StaticPairList']` with 4 pairs\n" - "`ETH, LTC, NEO, XRP`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['StaticPairList']` with 4 pairs\n" + "`ETH, LTC, NEO, XRP`" in msg_mock.call_args_list[0][0][0] + ) async def test_whitelist_dynamic(default_conf, update, mocker) -> None: - mocker.patch(f'{EXMS}.exchange_has', return_value=True) - default_conf['pairlists'] = [{'method': 'VolumePairList', - 'number_assets': 4 - }] + mocker.patch(f"{EXMS}.exchange_has", return_value=True) + default_conf["pairlists"] = [{"method": "VolumePairList", "number_assets": 4}] telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) await telegram._whitelist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert ("Using whitelist `['VolumePairList']` with 4 pairs\n" - "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['VolumePairList']` with 4 pairs\n" + "`ETH/BTC, LTC/BTC, XRP/BTC, NEO/BTC`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['sorted'] + context.args = ["sorted"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['VolumePairList']` with 4 pairs\n" - "`ETH/BTC, LTC/BTC, NEO/BTC, XRP/BTC`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['VolumePairList']` with 4 pairs\n" + "`ETH/BTC, LTC/BTC, NEO/BTC, XRP/BTC`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['baseonly'] + context.args = ["baseonly"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['VolumePairList']` with 4 pairs\n" - "`ETH, LTC, XRP, NEO`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['VolumePairList']` with 4 pairs\n" + "`ETH, LTC, XRP, NEO`" in msg_mock.call_args_list[0][0][0] + ) context = MagicMock() - context.args = ['baseonly', 'sorted'] + context.args = ["baseonly", "sorted"] msg_mock.reset_mock() await telegram._whitelist(update=update, context=context) - assert ("Using whitelist `['VolumePairList']` with 4 pairs\n" - "`ETH, LTC, NEO, XRP`" in msg_mock.call_args_list[0][0][0]) + assert ( + "Using whitelist `['VolumePairList']` with 4 pairs\n" + "`ETH, LTC, NEO, XRP`" in msg_mock.call_args_list[0][0][0] + ) async def test_blacklist_static(default_conf, update, mocker) -> None: - telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) await telegram._blacklist(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert ("Blacklist contains 2 pairs\n`DOGE/BTC, HOT/BTC`" - in msg_mock.call_args_list[0][0][0]) + assert "Blacklist contains 2 pairs\n`DOGE/BTC, HOT/BTC`" in msg_mock.call_args_list[0][0][0] msg_mock.reset_mock() @@ -1734,8 +1779,10 @@ async def test_blacklist_static(default_conf, update, mocker) -> None: context.args = ["ETH/BTC"] await telegram._blacklist(update=update, context=context) assert msg_mock.call_count == 1 - assert ("Blacklist contains 3 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC`" - in msg_mock.call_args_list[0][0][0]) + assert ( + "Blacklist contains 3 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC`" + in msg_mock.call_args_list[0][0][0] + ) assert freqtradebot.pairlists.blacklist == ["DOGE/BTC", "HOT/BTC", "ETH/BTC"] msg_mock.reset_mock() @@ -1744,21 +1791,24 @@ async def test_blacklist_static(default_conf, update, mocker) -> None: await telegram._blacklist(update=update, context=context) assert msg_mock.call_count == 1 - assert ("Blacklist contains 4 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC, XRP/.*`" - in msg_mock.call_args_list[0][0][0]) + assert ( + "Blacklist contains 4 pairs\n`DOGE/BTC, HOT/BTC, ETH/BTC, XRP/.*`" + in msg_mock.call_args_list[0][0][0] + ) assert freqtradebot.pairlists.blacklist == ["DOGE/BTC", "HOT/BTC", "ETH/BTC", "XRP/.*"] msg_mock.reset_mock() context.args = ["DOGE/BTC"] await telegram._blacklist_delete(update=update, context=context) assert msg_mock.call_count == 1 - assert ("Blacklist contains 3 pairs\n`HOT/BTC, ETH/BTC, XRP/.*`" - in msg_mock.call_args_list[0][0][0]) + assert ( + "Blacklist contains 3 pairs\n`HOT/BTC, ETH/BTC, XRP/.*`" in msg_mock.call_args_list[0][0][0] + ) async def test_telegram_logs(default_conf, update, mocker) -> None: mocker.patch.multiple( - 'freqtrade.rpc.telegram.Telegram', + "freqtrade.rpc.telegram.Telegram", _init=MagicMock(), ) setup_logging(default_conf) @@ -1778,7 +1828,7 @@ async def test_telegram_logs(default_conf, update, mocker) -> None: msg_mock.reset_mock() # Test with changed MaxMessageLength - mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 200) + mocker.patch("freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH", 200) context = MagicMock() context.args = [] await telegram._logs(update=update, context=context) @@ -1788,7 +1838,6 @@ async def test_telegram_logs(default_conf, update, mocker) -> None: async def test_edge_disabled(default_conf, update, mocker) -> None: - telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) await telegram._edge(update=update, context=MagicMock()) @@ -1797,34 +1846,36 @@ async def test_edge_disabled(default_conf, update, mocker) -> None: async def test_edge_enabled(edge_conf, update, mocker) -> None: - mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock( - return_value={ - 'E/F': PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), - } - )) + mocker.patch( + "freqtrade.edge.Edge._cached_pairs", + mocker.PropertyMock( + return_value={ + "E/F": PairInfo(-0.01, 0.66, 3.71, 0.50, 1.71, 10, 60), + } + ), + ) telegram, _, msg_mock = get_telegram_testobject(mocker, edge_conf) await telegram._edge(update=update, context=MagicMock()) assert msg_mock.call_count == 1 - assert 'Edge only validated following pairs:\n
' in msg_mock.call_args_list[0][0][0]
-    assert 'Pair      Winrate    Expectancy    Stoploss' in msg_mock.call_args_list[0][0][0]
+    assert "Edge only validated following pairs:\n
" in msg_mock.call_args_list[0][0][0]
+    assert "Pair      Winrate    Expectancy    Stoploss" in msg_mock.call_args_list[0][0][0]
 
     msg_mock.reset_mock()
 
-    mocker.patch('freqtrade.edge.Edge._cached_pairs', mocker.PropertyMock(
-        return_value={}))
+    mocker.patch("freqtrade.edge.Edge._cached_pairs", mocker.PropertyMock(return_value={}))
     await telegram._edge(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert 'Edge only validated following pairs:' in msg_mock.call_args_list[0][0][0]
-    assert 'Winrate' not in msg_mock.call_args_list[0][0][0]
+    assert "Edge only validated following pairs:" in msg_mock.call_args_list[0][0][0]
+    assert "Winrate" not in msg_mock.call_args_list[0][0][0]
 
 
-@pytest.mark.parametrize('is_short,regex_pattern',
-                         [(True, r"now[ ]*XRP\/BTC \(#3\)  -1.00% \("),
-                          (False, r"now[ ]*XRP\/BTC \(#3\)  1.00% \(")])
+@pytest.mark.parametrize(
+    "is_short,regex_pattern",
+    [(True, r"now[ ]*XRP\/BTC \(#3\)  -1.00% \("), (False, r"now[ ]*XRP\/BTC \(#3\)  1.00% \(")],
+)
 async def test_telegram_trades(mocker, update, default_conf, fee, is_short, regex_pattern):
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     context = MagicMock()
@@ -1835,7 +1886,7 @@ async def test_telegram_trades(mocker, update, default_conf, fee, is_short, rege
     assert "
" not in msg_mock.call_args_list[0][0][0]
     msg_mock.reset_mock()
 
-    context.args = ['hello']
+    context.args = ["hello"]
     await telegram._trades(update=update, context=context)
     assert "0 recent trades:" in msg_mock.call_args_list[0][0][0]
     assert "
" not in msg_mock.call_args_list[0][0][0]
@@ -1854,9 +1905,8 @@ async def test_telegram_trades(mocker, update, default_conf, fee, is_short, rege
     assert bool(re.search(regex_pattern, msg_mock.call_args_list[0][0][0]))
 
 
-@pytest.mark.parametrize('is_short', [True, False])
+@pytest.mark.parametrize("is_short", [True, False])
 async def test_telegram_delete_trade(mocker, update, default_conf, fee, is_short):
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
     context = MagicMock()
     context.args = []
@@ -1875,9 +1925,8 @@ async def test_telegram_delete_trade(mocker, update, default_conf, fee, is_short
     assert "Please make sure to take care of this asset" in msg_mock.call_args_list[0][0][0]
 
 
-@pytest.mark.parametrize('is_short', [True, False])
+@pytest.mark.parametrize("is_short", [True, False])
 async def test_telegram_reload_trade_from_exchange(mocker, update, default_conf, fee, is_short):
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
     context = MagicMock()
     context.args = []
@@ -1894,9 +1943,8 @@ async def test_telegram_reload_trade_from_exchange(mocker, update, default_conf,
     assert "Status: `Reloaded from orders from exchange`" in msg_mock.call_args_list[0][0][0]
 
 
-@pytest.mark.parametrize('is_short', [True, False])
+@pytest.mark.parametrize("is_short", [True, False])
 async def test_telegram_delete_open_order(mocker, update, default_conf, fee, is_short, ticker):
-
     mocker.patch.multiple(
         EXMS,
         fetch_ticker=ticker,
@@ -1919,7 +1967,7 @@ async def test_telegram_delete_open_order(mocker, update, default_conf, fee, is_
     msg_mock.reset_mock()
 
     trade = Trade.get_trades([Trade.id == 6]).first()
-    mocker.patch(f'{EXMS}.fetch_order', return_value=trade.orders[-1].to_ccxt_object())
+    mocker.patch(f"{EXMS}.fetch_order", return_value=trade.orders[-1].to_ccxt_object())
     context = MagicMock()
     context.args = [6]
     await telegram._cancel_open_order(update=update, context=context)
@@ -1932,611 +1980,659 @@ async def test_help_handle(default_conf, update, mocker) -> None:
 
     await telegram._help(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert '*/help:* `This help message`' in msg_mock.call_args_list[0][0][0]
+    assert "*/help:* `This help message`" in msg_mock.call_args_list[0][0][0]
 
 
 async def test_version_handle(default_conf, update, mocker) -> None:
-
     telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     await telegram._version(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert f'*Version:* `{__version__}`' in msg_mock.call_args_list[0][0][0]
+    assert f"*Version:* `{__version__}`" in msg_mock.call_args_list[0][0][0]
 
     msg_mock.reset_mock()
-    freqtradebot.strategy.version = lambda: '1.1.1'
+    freqtradebot.strategy.version = lambda: "1.1.1"
 
     await telegram._version(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert f'*Version:* `{__version__}`' in msg_mock.call_args_list[0][0][0]
-    assert '*Strategy version: * `1.1.1`' in msg_mock.call_args_list[0][0][0]
+    assert f"*Version:* `{__version__}`" in msg_mock.call_args_list[0][0][0]
+    assert "*Strategy version: * `1.1.1`" in msg_mock.call_args_list[0][0][0]
 
 
 async def test_show_config_handle(default_conf, update, mocker) -> None:
-
-    default_conf['runmode'] = RunMode.DRY_RUN
+    default_conf["runmode"] = RunMode.DRY_RUN
 
     telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     await telegram._show_config(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert '*Mode:* `{}`'.format('Dry-run') in msg_mock.call_args_list[0][0][0]
-    assert '*Exchange:* `binance`' in msg_mock.call_args_list[0][0][0]
-    assert f'*Strategy:* `{CURRENT_TEST_STRATEGY}`' in msg_mock.call_args_list[0][0][0]
-    assert '*Stoploss:* `-0.1`' in msg_mock.call_args_list[0][0][0]
+    assert "*Mode:* `{}`".format("Dry-run") in msg_mock.call_args_list[0][0][0]
+    assert "*Exchange:* `binance`" in msg_mock.call_args_list[0][0][0]
+    assert f"*Strategy:* `{CURRENT_TEST_STRATEGY}`" in msg_mock.call_args_list[0][0][0]
+    assert "*Stoploss:* `-0.1`" in msg_mock.call_args_list[0][0][0]
 
     msg_mock.reset_mock()
-    freqtradebot.config['trailing_stop'] = True
+    freqtradebot.config["trailing_stop"] = True
     await telegram._show_config(update=update, context=MagicMock())
     assert msg_mock.call_count == 1
-    assert '*Mode:* `{}`'.format('Dry-run') in msg_mock.call_args_list[0][0][0]
-    assert '*Exchange:* `binance`' in msg_mock.call_args_list[0][0][0]
-    assert f'*Strategy:* `{CURRENT_TEST_STRATEGY}`' in msg_mock.call_args_list[0][0][0]
-    assert '*Initial Stoploss:* `-0.1`' in msg_mock.call_args_list[0][0][0]
+    assert "*Mode:* `{}`".format("Dry-run") in msg_mock.call_args_list[0][0][0]
+    assert "*Exchange:* `binance`" in msg_mock.call_args_list[0][0][0]
+    assert f"*Strategy:* `{CURRENT_TEST_STRATEGY}`" in msg_mock.call_args_list[0][0][0]
+    assert "*Initial Stoploss:* `-0.1`" in msg_mock.call_args_list[0][0][0]
 
 
-@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
-    (RPCMessageType.ENTRY, 'Long', 'long_signal_01', None),
-    (RPCMessageType.ENTRY, 'Long', 'long_signal_01', 1.0),
-    (RPCMessageType.ENTRY, 'Long', 'long_signal_01', 5.0),
-    (RPCMessageType.ENTRY, 'Short', 'short_signal_01', 2.0)])
-def test_send_msg_enter_notification(default_conf, mocker, caplog, message_type,
-                                     enter, enter_signal, leverage) -> None:
-    default_conf['telegram']['notification_settings']['show_candle'] = 'ohlc'
-    df = DataFrame({
-        'open': [1.1],
-        'high': [2.2],
-        'low': [1.0],
-        'close': [1.5],
-    })
-    mocker.patch('freqtrade.data.dataprovider.DataProvider.get_analyzed_dataframe',
-                 return_value=(df, 1))
+@pytest.mark.parametrize(
+    "message_type,enter,enter_signal,leverage",
+    [
+        (RPCMessageType.ENTRY, "Long", "long_signal_01", None),
+        (RPCMessageType.ENTRY, "Long", "long_signal_01", 1.0),
+        (RPCMessageType.ENTRY, "Long", "long_signal_01", 5.0),
+        (RPCMessageType.ENTRY, "Short", "short_signal_01", 2.0),
+    ],
+)
+def test_send_msg_enter_notification(
+    default_conf, mocker, caplog, message_type, enter, enter_signal, leverage
+) -> None:
+    default_conf["telegram"]["notification_settings"]["show_candle"] = "ohlc"
+    df = DataFrame(
+        {
+            "open": [1.1],
+            "high": [2.2],
+            "low": [1.0],
+            "close": [1.5],
+        }
+    )
+    mocker.patch(
+        "freqtrade.data.dataprovider.DataProvider.get_analyzed_dataframe", return_value=(df, 1)
+    )
 
     msg = {
-        'type': message_type,
-        'trade_id': 1,
-        'enter_tag': enter_signal,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': leverage,
-        'open_rate': 1.099e-05,
-        'order_type': 'limit',
-        'direction': enter,
-        'stake_amount': 0.01465333,
-        'stake_amount_fiat': 0.0,
-        'stake_currency': 'BTC',
-        'quote_currency': 'BTC',
-        'base_currency': 'ETH',
-        'fiat_currency': 'USD',
-        'sub_trade': False,
-        'current_rate': 1.099e-05,
-        'amount': 1333.3333333333335,
-        'analyzed_candle': {'open': 1.1, 'high': 2.2, 'low': 1.0, 'close': 1.5},
-        'open_date': dt_now() + timedelta(hours=-1)
+        "type": message_type,
+        "trade_id": 1,
+        "enter_tag": enter_signal,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": leverage,
+        "open_rate": 1.099e-05,
+        "order_type": "limit",
+        "direction": enter,
+        "stake_amount": 0.01465333,
+        "stake_amount_fiat": 0.0,
+        "stake_currency": "BTC",
+        "quote_currency": "BTC",
+        "base_currency": "ETH",
+        "fiat_currency": "USD",
+        "sub_trade": False,
+        "current_rate": 1.099e-05,
+        "amount": 1333.3333333333335,
+        "analyzed_candle": {"open": 1.1, "high": 2.2, "low": 1.0, "close": 1.5},
+        "open_date": dt_now() + timedelta(hours=-1),
     }
     telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     telegram.send_msg(msg)
-    leverage_text = f' ({leverage:.3g}x)' if leverage and leverage != 1.0 else ''
+    leverage_text = f" ({leverage:.3g}x)" if leverage and leverage != 1.0 else ""
 
     assert msg_mock.call_args[0][0] == (
-        f'\N{LARGE BLUE CIRCLE} *Binance (dry):* New Trade (#1)\n'
-        f'*Pair:* `ETH/BTC`\n'
-        '*Candle OHLC*: `1.1, 2.2, 1.0, 1.5`\n'
-        f'*Enter Tag:* `{enter_signal}`\n'
-        '*Amount:* `1333.33333333`\n'
-        f'*Direction:* `{enter}'
-        f'{leverage_text}`\n'
-        '*Open Rate:* `0.00001099 BTC`\n'
-        '*Current Rate:* `0.00001099 BTC`\n'
-        '*Total:* `0.01465333 BTC / 180.895 USD`'
+        f"\N{LARGE BLUE CIRCLE} *Binance (dry):* New Trade (#1)\n"
+        f"*Pair:* `ETH/BTC`\n"
+        "*Candle OHLC*: `1.1, 2.2, 1.0, 1.5`\n"
+        f"*Enter Tag:* `{enter_signal}`\n"
+        "*Amount:* `1333.33333333`\n"
+        f"*Direction:* `{enter}"
+        f"{leverage_text}`\n"
+        "*Open Rate:* `0.00001099 BTC`\n"
+        "*Current Rate:* `0.00001099 BTC`\n"
+        "*Total:* `0.01465333 BTC / 180.895 USD`"
     )
 
-    freqtradebot.config['telegram']['notification_settings'] = {'entry': 'off'}
+    freqtradebot.config["telegram"]["notification_settings"] = {"entry": "off"}
     caplog.clear()
     msg_mock.reset_mock()
     telegram.send_msg(msg)
     assert msg_mock.call_count == 0
     assert log_has("Notification 'entry' not sent.", caplog)
 
-    freqtradebot.config['telegram']['notification_settings'] = {'entry': 'silent'}
+    freqtradebot.config["telegram"]["notification_settings"] = {"entry": "silent"}
     caplog.clear()
     msg_mock.reset_mock()
 
     telegram.send_msg(msg)
     assert msg_mock.call_count == 1
-    assert msg_mock.call_args_list[0][1]['disable_notification'] is True
+    assert msg_mock.call_args_list[0][1]["disable_notification"] is True
 
 
-@pytest.mark.parametrize('message_type,enter_signal', [
-    (RPCMessageType.ENTRY_CANCEL, 'long_signal_01'),
-    (RPCMessageType.ENTRY_CANCEL, 'short_signal_01')])
+@pytest.mark.parametrize(
+    "message_type,enter_signal",
+    [
+        (RPCMessageType.ENTRY_CANCEL, "long_signal_01"),
+        (RPCMessageType.ENTRY_CANCEL, "short_signal_01"),
+    ],
+)
 def test_send_msg_enter_cancel_notification(
-        default_conf, mocker, message_type, enter_signal) -> None:
-
+    default_conf, mocker, message_type, enter_signal
+) -> None:
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
-    telegram.send_msg({
-        'type': message_type,
-        'enter_tag': enter_signal,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'reason': CANCEL_REASON['TIMEOUT']
-    })
-    assert (msg_mock.call_args[0][0] == '\N{WARNING SIGN} *Binance (dry):* '
-            'Cancelling enter Order for ETH/BTC (#1). '
-            'Reason: cancelled due to timeout.')
+    telegram.send_msg(
+        {
+            "type": message_type,
+            "enter_tag": enter_signal,
+            "trade_id": 1,
+            "exchange": "Binance",
+            "pair": "ETH/BTC",
+            "reason": CANCEL_REASON["TIMEOUT"],
+        }
+    )
+    assert (
+        msg_mock.call_args[0][0] == "\N{WARNING SIGN} *Binance (dry):* "
+        "Cancelling enter Order for ETH/BTC (#1). "
+        "Reason: cancelled due to timeout."
+    )
 
 
 def test_send_msg_protection_notification(default_conf, mocker, time_machine) -> None:
-
-    default_conf['telegram']['notification_settings']['protection_trigger'] = 'on'
+    default_conf["telegram"]["notification_settings"]["protection_trigger"] = "on"
 
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
     time_machine.move_to("2021-09-01 05:00:00 +00:00")
-    lock = PairLocks.lock_pair('ETH/BTC', dt_now() + timedelta(minutes=6), 'randreason')
+    lock = PairLocks.lock_pair("ETH/BTC", dt_now() + timedelta(minutes=6), "randreason")
     msg = {
-        'type': RPCMessageType.PROTECTION_TRIGGER,
+        "type": RPCMessageType.PROTECTION_TRIGGER,
     }
     msg.update(lock.to_json())
     telegram.send_msg(msg)
-    assert (msg_mock.call_args[0][0] == "*Protection* triggered due to randreason. "
-            "`ETH/BTC` will be locked until `2021-09-01 05:10:00`.")
+    assert (
+        msg_mock.call_args[0][0] == "*Protection* triggered due to randreason. "
+        "`ETH/BTC` will be locked until `2021-09-01 05:10:00`."
+    )
 
     msg_mock.reset_mock()
     # Test global protection
 
     msg = {
-        'type': RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
+        "type": RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
     }
-    lock = PairLocks.lock_pair('*', dt_now() + timedelta(minutes=100), 'randreason')
+    lock = PairLocks.lock_pair("*", dt_now() + timedelta(minutes=100), "randreason")
     msg.update(lock.to_json())
     telegram.send_msg(msg)
-    assert (msg_mock.call_args[0][0] == "*Protection* triggered due to randreason. "
-            "*All pairs* will be locked until `2021-09-01 06:45:00`.")
+    assert (
+        msg_mock.call_args[0][0] == "*Protection* triggered due to randreason. "
+        "*All pairs* will be locked until `2021-09-01 06:45:00`."
+    )
 
 
-@pytest.mark.parametrize('message_type,entered,enter_signal,leverage', [
-    (RPCMessageType.ENTRY_FILL, 'Long', 'long_signal_01', 1.0),
-    (RPCMessageType.ENTRY_FILL, 'Long', 'long_signal_02', 2.0),
-    (RPCMessageType.ENTRY_FILL, 'Short', 'short_signal_01', 2.0),
-])
-def test_send_msg_entry_fill_notification(default_conf, mocker, message_type, entered,
-                                          enter_signal, leverage) -> None:
-
-    default_conf['telegram']['notification_settings']['entry_fill'] = 'on'
+@pytest.mark.parametrize(
+    "message_type,entered,enter_signal,leverage",
+    [
+        (RPCMessageType.ENTRY_FILL, "Long", "long_signal_01", 1.0),
+        (RPCMessageType.ENTRY_FILL, "Long", "long_signal_02", 2.0),
+        (RPCMessageType.ENTRY_FILL, "Short", "short_signal_01", 2.0),
+    ],
+)
+def test_send_msg_entry_fill_notification(
+    default_conf, mocker, message_type, entered, enter_signal, leverage
+) -> None:
+    default_conf["telegram"]["notification_settings"]["entry_fill"] = "on"
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
-    telegram.send_msg({
-        'type': message_type,
-        'trade_id': 1,
-        'enter_tag': enter_signal,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': leverage,
-        'stake_amount': 0.01465333,
-        'direction': entered,
-        'sub_trade': False,
-        'stake_currency': 'BTC',
-        'quote_currency': 'BTC',
-        'base_currency': 'ETH',
-        'fiat_currency': 'USD',
-        'open_rate': 1.099e-05,
-        'amount': 1333.3333333333335,
-        'open_date': dt_now() - timedelta(hours=1)
-    })
-    leverage_text = f' ({leverage:.3g}x)' if leverage != 1.0 else ''
+    telegram.send_msg(
+        {
+            "type": message_type,
+            "trade_id": 1,
+            "enter_tag": enter_signal,
+            "exchange": "Binance",
+            "pair": "ETH/BTC",
+            "leverage": leverage,
+            "stake_amount": 0.01465333,
+            "direction": entered,
+            "sub_trade": False,
+            "stake_currency": "BTC",
+            "quote_currency": "BTC",
+            "base_currency": "ETH",
+            "fiat_currency": "USD",
+            "open_rate": 1.099e-05,
+            "amount": 1333.3333333333335,
+            "open_date": dt_now() - timedelta(hours=1),
+        }
+    )
+    leverage_text = f" ({leverage:.3g}x)" if leverage != 1.0 else ""
     assert msg_mock.call_args[0][0] == (
-        f'\N{CHECK MARK} *Binance (dry):* New Trade filled (#1)\n'
-        f'*Pair:* `ETH/BTC`\n'
-        f'*Enter Tag:* `{enter_signal}`\n'
-        '*Amount:* `1333.33333333`\n'
-        f'*Direction:* `{entered}'
+        f"\N{CHECK MARK} *Binance (dry):* New Trade filled (#1)\n"
+        f"*Pair:* `ETH/BTC`\n"
+        f"*Enter Tag:* `{enter_signal}`\n"
+        "*Amount:* `1333.33333333`\n"
+        f"*Direction:* `{entered}"
         f"{leverage_text}`\n"
-        '*Open Rate:* `0.00001099 BTC`\n'
-        '*Total:* `0.01465333 BTC / 180.895 USD`'
+        "*Open Rate:* `0.00001099 BTC`\n"
+        "*Total:* `0.01465333 BTC / 180.895 USD`"
     )
 
     msg_mock.reset_mock()
-    telegram.send_msg({
-        'type': message_type,
-        'trade_id': 1,
-        'enter_tag': enter_signal,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': leverage,
-        'stake_amount': 0.01465333,
-        'sub_trade': True,
-        'direction': entered,
-        'stake_currency': 'BTC',
-        'quote_currency': 'BTC',
-        'base_currency': 'ETH',
-        'fiat_currency': 'USD',
-        'open_rate': 1.099e-05,
-        'amount': 1333.3333333333335,
-        'open_date': dt_now() - timedelta(hours=1)
-    })
+    telegram.send_msg(
+        {
+            "type": message_type,
+            "trade_id": 1,
+            "enter_tag": enter_signal,
+            "exchange": "Binance",
+            "pair": "ETH/BTC",
+            "leverage": leverage,
+            "stake_amount": 0.01465333,
+            "sub_trade": True,
+            "direction": entered,
+            "stake_currency": "BTC",
+            "quote_currency": "BTC",
+            "base_currency": "ETH",
+            "fiat_currency": "USD",
+            "open_rate": 1.099e-05,
+            "amount": 1333.3333333333335,
+            "open_date": dt_now() - timedelta(hours=1),
+        }
+    )
 
     assert msg_mock.call_args[0][0] == (
-        f'\N{CHECK MARK} *Binance (dry):* Position increase filled (#1)\n'
-        f'*Pair:* `ETH/BTC`\n'
-        f'*Enter Tag:* `{enter_signal}`\n'
-        '*Amount:* `1333.33333333`\n'
-        f'*Direction:* `{entered}'
+        f"\N{CHECK MARK} *Binance (dry):* Position increase filled (#1)\n"
+        f"*Pair:* `ETH/BTC`\n"
+        f"*Enter Tag:* `{enter_signal}`\n"
+        "*Amount:* `1333.33333333`\n"
+        f"*Direction:* `{entered}"
         f"{leverage_text}`\n"
-        '*Open Rate:* `0.00001099 BTC`\n'
-        '*New Total:* `0.01465333 BTC / 180.895 USD`'
+        "*Open Rate:* `0.00001099 BTC`\n"
+        "*New Total:* `0.01465333 BTC / 180.895 USD`"
     )
 
 
 def test_send_msg_exit_notification(default_conf, mocker) -> None:
-
     with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False):
         telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
         old_convamount = telegram._rpc._fiat_converter.convert_amount
         telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812
-        telegram.send_msg({
-            'type': RPCMessageType.EXIT,
-            'trade_id': 1,
-            'exchange': 'Binance',
-            'pair': 'KEY/ETH',
-            'leverage': 1.0,
-            'direction': 'Long',
-            'gain': 'loss',
-            'order_rate': 3.201e-04,
-            'amount': 1333.3333333333335,
-            'order_type': 'market',
-            'open_rate': 7.5e-04,
-            'current_rate': 3.201e-04,
-            'profit_amount': -0.05746268,
-            'profit_ratio': -0.57405275,
-            'stake_currency': 'ETH',
-            'quote_currency': 'ETH',
-            'base_currency': 'KEY',
-            'fiat_currency': 'USD',
-            'enter_tag': 'buy_signal1',
-            'exit_reason': ExitType.STOP_LOSS.value,
-            'open_date': dt_now() - timedelta(hours=1),
-            'close_date': dt_now(),
-        })
+        telegram.send_msg(
+            {
+                "type": RPCMessageType.EXIT,
+                "trade_id": 1,
+                "exchange": "Binance",
+                "pair": "KEY/ETH",
+                "leverage": 1.0,
+                "direction": "Long",
+                "gain": "loss",
+                "order_rate": 3.201e-04,
+                "amount": 1333.3333333333335,
+                "order_type": "market",
+                "open_rate": 7.5e-04,
+                "current_rate": 3.201e-04,
+                "profit_amount": -0.05746268,
+                "profit_ratio": -0.57405275,
+                "stake_currency": "ETH",
+                "quote_currency": "ETH",
+                "base_currency": "KEY",
+                "fiat_currency": "USD",
+                "enter_tag": "buy_signal1",
+                "exit_reason": ExitType.STOP_LOSS.value,
+                "open_date": dt_now() - timedelta(hours=1),
+                "close_date": dt_now(),
+            }
+        )
         assert msg_mock.call_args[0][0] == (
-            '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
-            '*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH / -24.812 USD)`\n'
-            '*Enter Tag:* `buy_signal1`\n'
-            '*Exit Reason:* `stop_loss`\n'
-            '*Direction:* `Long`\n'
-            '*Amount:* `1333.33333333`\n'
-            '*Open Rate:* `0.00075 ETH`\n'
-            '*Current Rate:* `0.00032 ETH`\n'
-            '*Exit Rate:* `0.00032 ETH`\n'
-            '*Duration:* `1:00:00 (60.0 min)`'
+            "\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n"
+            "*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH / -24.812 USD)`\n"
+            "*Enter Tag:* `buy_signal1`\n"
+            "*Exit Reason:* `stop_loss`\n"
+            "*Direction:* `Long`\n"
+            "*Amount:* `1333.33333333`\n"
+            "*Open Rate:* `0.00075 ETH`\n"
+            "*Current Rate:* `0.00032 ETH`\n"
+            "*Exit Rate:* `0.00032 ETH`\n"
+            "*Duration:* `1:00:00 (60.0 min)`"
         )
 
         msg_mock.reset_mock()
-        telegram.send_msg({
-            'type': RPCMessageType.EXIT,
-            'trade_id': 1,
-            'exchange': 'Binance',
-            'pair': 'KEY/ETH',
-            'direction': 'Long',
-            'gain': 'loss',
-            'order_rate': 3.201e-04,
-            'amount': 1333.3333333333335,
-            'order_type': 'market',
-            'open_rate': 7.5e-04,
-            'current_rate': 3.201e-04,
-            'cumulative_profit': -0.15746268,
-            'profit_amount': -0.05746268,
-            'profit_ratio': -0.57405275,
-            'stake_currency': 'ETH',
-            'quote_currency': 'ETH',
-            'base_currency': 'KEY',
-            'fiat_currency': 'USD',
-            'enter_tag': 'buy_signal1',
-            'exit_reason': ExitType.STOP_LOSS.value,
-            'open_date': dt_now() - timedelta(days=1, hours=2, minutes=30),
-            'close_date': dt_now(),
-            'stake_amount': 0.01,
-            'sub_trade': True,
-        })
+        telegram.send_msg(
+            {
+                "type": RPCMessageType.EXIT,
+                "trade_id": 1,
+                "exchange": "Binance",
+                "pair": "KEY/ETH",
+                "direction": "Long",
+                "gain": "loss",
+                "order_rate": 3.201e-04,
+                "amount": 1333.3333333333335,
+                "order_type": "market",
+                "open_rate": 7.5e-04,
+                "current_rate": 3.201e-04,
+                "cumulative_profit": -0.15746268,
+                "profit_amount": -0.05746268,
+                "profit_ratio": -0.57405275,
+                "stake_currency": "ETH",
+                "quote_currency": "ETH",
+                "base_currency": "KEY",
+                "fiat_currency": "USD",
+                "enter_tag": "buy_signal1",
+                "exit_reason": ExitType.STOP_LOSS.value,
+                "open_date": dt_now() - timedelta(days=1, hours=2, minutes=30),
+                "close_date": dt_now(),
+                "stake_amount": 0.01,
+                "sub_trade": True,
+            }
+        )
         assert msg_mock.call_args[0][0] == (
-            '\N{WARNING SIGN} *Binance (dry):* Partially exiting KEY/ETH (#1)\n'
-            '*Unrealized Sub Profit:* `-57.41% (loss: -0.05746 ETH / -24.812 USD)`\n'
-            '*Cumulative Profit:* `-0.15746 ETH / -24.812 USD`\n'
-            '*Enter Tag:* `buy_signal1`\n'
-            '*Exit Reason:* `stop_loss`\n'
-            '*Direction:* `Long`\n'
-            '*Amount:* `1333.33333333`\n'
-            '*Open Rate:* `0.00075 ETH`\n'
-            '*Current Rate:* `0.00032 ETH`\n'
-            '*Exit Rate:* `0.00032 ETH`\n'
-            '*Remaining:* `0.01 ETH / -24.812 USD`'
-            )
+            "\N{WARNING SIGN} *Binance (dry):* Partially exiting KEY/ETH (#1)\n"
+            "*Unrealized Sub Profit:* `-57.41% (loss: -0.05746 ETH / -24.812 USD)`\n"
+            "*Cumulative Profit:* `-0.15746 ETH / -24.812 USD`\n"
+            "*Enter Tag:* `buy_signal1`\n"
+            "*Exit Reason:* `stop_loss`\n"
+            "*Direction:* `Long`\n"
+            "*Amount:* `1333.33333333`\n"
+            "*Open Rate:* `0.00075 ETH`\n"
+            "*Current Rate:* `0.00032 ETH`\n"
+            "*Exit Rate:* `0.00032 ETH`\n"
+            "*Remaining:* `0.01 ETH / -24.812 USD`"
+        )
 
         msg_mock.reset_mock()
-        telegram.send_msg({
-            'type': RPCMessageType.EXIT,
-            'trade_id': 1,
-            'exchange': 'Binance',
-            'pair': 'KEY/ETH',
-            'direction': 'Long',
-            'gain': 'loss',
-            'order_rate': 3.201e-04,
-            'amount': 1333.3333333333335,
-            'order_type': 'market',
-            'open_rate': 7.5e-04,
-            'current_rate': 3.201e-04,
-            'profit_amount': -0.05746268,
-            'profit_ratio': -0.57405275,
-            'stake_currency': 'ETH',
-            'quote_currency': 'ETH',
-            'base_currency': 'KEY',
-            'fiat_currency': None,
-            'enter_tag': 'buy_signal1',
-            'exit_reason': ExitType.STOP_LOSS.value,
-            'open_date': dt_now() - timedelta(days=1, hours=2, minutes=30),
-            'close_date': dt_now(),
-        })
+        telegram.send_msg(
+            {
+                "type": RPCMessageType.EXIT,
+                "trade_id": 1,
+                "exchange": "Binance",
+                "pair": "KEY/ETH",
+                "direction": "Long",
+                "gain": "loss",
+                "order_rate": 3.201e-04,
+                "amount": 1333.3333333333335,
+                "order_type": "market",
+                "open_rate": 7.5e-04,
+                "current_rate": 3.201e-04,
+                "profit_amount": -0.05746268,
+                "profit_ratio": -0.57405275,
+                "stake_currency": "ETH",
+                "quote_currency": "ETH",
+                "base_currency": "KEY",
+                "fiat_currency": None,
+                "enter_tag": "buy_signal1",
+                "exit_reason": ExitType.STOP_LOSS.value,
+                "open_date": dt_now() - timedelta(days=1, hours=2, minutes=30),
+                "close_date": dt_now(),
+            }
+        )
         assert msg_mock.call_args[0][0] == (
-            '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
-            '*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH)`\n'
-            '*Enter Tag:* `buy_signal1`\n'
-            '*Exit Reason:* `stop_loss`\n'
-            '*Direction:* `Long`\n'
-            '*Amount:* `1333.33333333`\n'
-            '*Open Rate:* `0.00075 ETH`\n'
-            '*Current Rate:* `0.00032 ETH`\n'
-            '*Exit Rate:* `0.00032 ETH`\n'
-            '*Duration:* `1 day, 2:30:00 (1590.0 min)`'
+            "\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n"
+            "*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH)`\n"
+            "*Enter Tag:* `buy_signal1`\n"
+            "*Exit Reason:* `stop_loss`\n"
+            "*Direction:* `Long`\n"
+            "*Amount:* `1333.33333333`\n"
+            "*Open Rate:* `0.00075 ETH`\n"
+            "*Current Rate:* `0.00032 ETH`\n"
+            "*Exit Rate:* `0.00032 ETH`\n"
+            "*Duration:* `1 day, 2:30:00 (1590.0 min)`"
         )
         # Reset singleton function to avoid random breaks
         telegram._rpc._fiat_converter.convert_amount = old_convamount
 
 
 async def test_send_msg_exit_cancel_notification(default_conf, mocker) -> None:
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     old_convamount = telegram._rpc._fiat_converter.convert_amount
     telegram._rpc._fiat_converter.convert_amount = lambda a, b, c: -24.812
-    telegram.send_msg({
-        'type': RPCMessageType.EXIT_CANCEL,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'KEY/ETH',
-        'reason': 'Cancelled on exchange'
-    })
+    telegram.send_msg(
+        {
+            "type": RPCMessageType.EXIT_CANCEL,
+            "trade_id": 1,
+            "exchange": "Binance",
+            "pair": "KEY/ETH",
+            "reason": "Cancelled on exchange",
+        }
+    )
     assert msg_mock.call_args[0][0] == (
-        '\N{WARNING SIGN} *Binance (dry):* Cancelling exit Order for KEY/ETH (#1).'
-        ' Reason: Cancelled on exchange.')
+        "\N{WARNING SIGN} *Binance (dry):* Cancelling exit Order for KEY/ETH (#1)."
+        " Reason: Cancelled on exchange."
+    )
 
     msg_mock.reset_mock()
     # Test with live mode (no dry appendix)
-    telegram._config['dry_run'] = False
-    telegram.send_msg({
-        'type': RPCMessageType.EXIT_CANCEL,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'KEY/ETH',
-        'reason': 'timeout'
-    })
+    telegram._config["dry_run"] = False
+    telegram.send_msg(
+        {
+            "type": RPCMessageType.EXIT_CANCEL,
+            "trade_id": 1,
+            "exchange": "Binance",
+            "pair": "KEY/ETH",
+            "reason": "timeout",
+        }
+    )
     assert msg_mock.call_args[0][0] == (
-        '\N{WARNING SIGN} *Binance:* Cancelling exit Order for KEY/ETH (#1). Reason: timeout.')
+        "\N{WARNING SIGN} *Binance:* Cancelling exit Order for KEY/ETH (#1). Reason: timeout."
+    )
     # Reset singleton function to avoid random breaks
     telegram._rpc._fiat_converter.convert_amount = old_convamount
 
 
-@pytest.mark.parametrize('direction,enter_signal,leverage', [
-    ('Long', 'long_signal_01', None),
-    ('Long', 'long_signal_01', 1.0),
-    ('Long', 'long_signal_01', 5.0),
-    ('Short', 'short_signal_01', 2.0)])
-def test_send_msg_exit_fill_notification(default_conf, mocker, direction,
-                                         enter_signal, leverage) -> None:
-
-    default_conf['telegram']['notification_settings']['exit_fill'] = 'on'
+@pytest.mark.parametrize(
+    "direction,enter_signal,leverage",
+    [
+        ("Long", "long_signal_01", None),
+        ("Long", "long_signal_01", 1.0),
+        ("Long", "long_signal_01", 5.0),
+        ("Short", "short_signal_01", 2.0),
+    ],
+)
+def test_send_msg_exit_fill_notification(
+    default_conf, mocker, direction, enter_signal, leverage
+) -> None:
+    default_conf["telegram"]["notification_settings"]["exit_fill"] = "on"
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
     with time_machine.travel("2022-09-01 05:00:00 +00:00", tick=False):
-        telegram.send_msg({
-            'type': RPCMessageType.EXIT_FILL,
-            'trade_id': 1,
-            'exchange': 'Binance',
-            'pair': 'KEY/ETH',
-            'leverage': leverage,
-            'direction': direction,
-            'gain': 'loss',
-            'limit': 3.201e-04,
-            'amount': 1333.3333333333335,
-            'order_type': 'market',
-            'open_rate': 7.5e-04,
-            'close_rate': 3.201e-04,
-            'profit_amount': -0.05746268,
-            'profit_ratio': -0.57405275,
-            'stake_currency': 'ETH',
-            'quote_currency': 'ETH',
-            'base_currency': 'KEY',
-            'fiat_currency': None,
-            'enter_tag': enter_signal,
-            'exit_reason': ExitType.STOP_LOSS.value,
-            'open_date': dt_now() - timedelta(days=1, hours=2, minutes=30),
-            'close_date': dt_now(),
-        })
+        telegram.send_msg(
+            {
+                "type": RPCMessageType.EXIT_FILL,
+                "trade_id": 1,
+                "exchange": "Binance",
+                "pair": "KEY/ETH",
+                "leverage": leverage,
+                "direction": direction,
+                "gain": "loss",
+                "limit": 3.201e-04,
+                "amount": 1333.3333333333335,
+                "order_type": "market",
+                "open_rate": 7.5e-04,
+                "close_rate": 3.201e-04,
+                "profit_amount": -0.05746268,
+                "profit_ratio": -0.57405275,
+                "stake_currency": "ETH",
+                "quote_currency": "ETH",
+                "base_currency": "KEY",
+                "fiat_currency": None,
+                "enter_tag": enter_signal,
+                "exit_reason": ExitType.STOP_LOSS.value,
+                "open_date": dt_now() - timedelta(days=1, hours=2, minutes=30),
+                "close_date": dt_now(),
+            }
+        )
 
-        leverage_text = f' ({leverage:.3g}x)`\n' if leverage and leverage != 1.0 else '`\n'
+        leverage_text = f" ({leverage:.3g}x)`\n" if leverage and leverage != 1.0 else "`\n"
         assert msg_mock.call_args[0][0] == (
-            '\N{WARNING SIGN} *Binance (dry):* Exited KEY/ETH (#1)\n'
-            '*Profit:* `-57.41% (loss: -0.05746 ETH)`\n'
-            f'*Enter Tag:* `{enter_signal}`\n'
-            '*Exit Reason:* `stop_loss`\n'
+            "\N{WARNING SIGN} *Binance (dry):* Exited KEY/ETH (#1)\n"
+            "*Profit:* `-57.41% (loss: -0.05746 ETH)`\n"
+            f"*Enter Tag:* `{enter_signal}`\n"
+            "*Exit Reason:* `stop_loss`\n"
             f"*Direction:* `{direction}"
             f"{leverage_text}"
-            '*Amount:* `1333.33333333`\n'
-            '*Open Rate:* `0.00075 ETH`\n'
-            '*Exit Rate:* `0.00032 ETH`\n'
-            '*Duration:* `1 day, 2:30:00 (1590.0 min)`'
+            "*Amount:* `1333.33333333`\n"
+            "*Open Rate:* `0.00075 ETH`\n"
+            "*Exit Rate:* `0.00032 ETH`\n"
+            "*Duration:* `1 day, 2:30:00 (1590.0 min)`"
         )
 
 
 def test_send_msg_status_notification(default_conf, mocker) -> None:
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
-    telegram.send_msg({
-        'type': RPCMessageType.STATUS,
-        'status': 'running'
-    })
-    assert msg_mock.call_args[0][0] == '*Status:* `running`'
+    telegram.send_msg({"type": RPCMessageType.STATUS, "status": "running"})
+    assert msg_mock.call_args[0][0] == "*Status:* `running`"
 
 
 async def test_warning_notification(default_conf, mocker) -> None:
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
-    telegram.send_msg({
-        'type': RPCMessageType.WARNING,
-        'status': 'message'
-    })
-    assert msg_mock.call_args[0][0] == '\N{WARNING SIGN} *Warning:* `message`'
+    telegram.send_msg({"type": RPCMessageType.WARNING, "status": "message"})
+    assert msg_mock.call_args[0][0] == "\N{WARNING SIGN} *Warning:* `message`"
 
 
 def test_startup_notification(default_conf, mocker) -> None:
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
-    telegram.send_msg({
-        'type': RPCMessageType.STARTUP,
-        'status': '*Custom:* `Hello World`'
-    })
-    assert msg_mock.call_args[0][0] == '*Custom:* `Hello World`'
+    telegram.send_msg({"type": RPCMessageType.STARTUP, "status": "*Custom:* `Hello World`"})
+    assert msg_mock.call_args[0][0] == "*Custom:* `Hello World`"
 
 
 def test_send_msg_strategy_msg_notification(default_conf, mocker) -> None:
-
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
-    telegram.send_msg({
-        'type': RPCMessageType.STRATEGY_MSG,
-        'msg': 'hello world, Test msg'
-    })
-    assert msg_mock.call_args[0][0] == 'hello world, Test msg'
+    telegram.send_msg({"type": RPCMessageType.STRATEGY_MSG, "msg": "hello world, Test msg"})
+    assert msg_mock.call_args[0][0] == "hello world, Test msg"
 
 
 def test_send_msg_unknown_type(default_conf, mocker) -> None:
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
-    telegram.send_msg({
-        'type': None,
-    })
+    telegram.send_msg(
+        {
+            "type": None,
+        }
+    )
     assert msg_mock.call_count == 0
 
 
-@pytest.mark.parametrize('message_type,enter,enter_signal,leverage', [
-    (RPCMessageType.ENTRY, 'Long', 'long_signal_01', None),
-    (RPCMessageType.ENTRY, 'Long', 'long_signal_01', 2.0),
-    (RPCMessageType.ENTRY, 'Short', 'short_signal_01', 2.0)])
+@pytest.mark.parametrize(
+    "message_type,enter,enter_signal,leverage",
+    [
+        (RPCMessageType.ENTRY, "Long", "long_signal_01", None),
+        (RPCMessageType.ENTRY, "Long", "long_signal_01", 2.0),
+        (RPCMessageType.ENTRY, "Short", "short_signal_01", 2.0),
+    ],
+)
 def test_send_msg_buy_notification_no_fiat(
-        default_conf, mocker, message_type, enter, enter_signal, leverage) -> None:
-    del default_conf['fiat_display_currency']
-    default_conf['dry_run'] = False
+    default_conf, mocker, message_type, enter, enter_signal, leverage
+) -> None:
+    del default_conf["fiat_display_currency"]
+    default_conf["dry_run"] = False
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
-    telegram.send_msg({
-        'type': message_type,
-        'enter_tag': enter_signal,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': leverage,
-        'open_rate': 1.099e-05,
-        'order_type': 'limit',
-        'direction': enter,
-        'sub_trade': False,
-        'stake_amount': 0.01465333,
-        'stake_amount_fiat': 0.0,
-        'stake_currency': 'BTC',
-        'quote_currency': 'BTC',
-        'base_currency': 'ETH',
-        'fiat_currency': None,
-        'current_rate': 1.099e-05,
-        'amount': 1333.3333333333335,
-        'open_date': dt_now() - timedelta(hours=1)
-    })
+    telegram.send_msg(
+        {
+            "type": message_type,
+            "enter_tag": enter_signal,
+            "trade_id": 1,
+            "exchange": "Binance",
+            "pair": "ETH/BTC",
+            "leverage": leverage,
+            "open_rate": 1.099e-05,
+            "order_type": "limit",
+            "direction": enter,
+            "sub_trade": False,
+            "stake_amount": 0.01465333,
+            "stake_amount_fiat": 0.0,
+            "stake_currency": "BTC",
+            "quote_currency": "BTC",
+            "base_currency": "ETH",
+            "fiat_currency": None,
+            "current_rate": 1.099e-05,
+            "amount": 1333.3333333333335,
+            "open_date": dt_now() - timedelta(hours=1),
+        }
+    )
 
-    leverage_text = f' ({leverage:.3g}x)' if leverage and leverage != 1.0 else ''
+    leverage_text = f" ({leverage:.3g}x)" if leverage and leverage != 1.0 else ""
     assert msg_mock.call_args[0][0] == (
-        f'\N{LARGE BLUE CIRCLE} *Binance:* New Trade (#1)\n'
-        '*Pair:* `ETH/BTC`\n'
-        f'*Enter Tag:* `{enter_signal}`\n'
-        '*Amount:* `1333.33333333`\n'
-        f'*Direction:* `{enter}'
-        f'{leverage_text}`\n'
-        '*Open Rate:* `0.00001099 BTC`\n'
-        '*Current Rate:* `0.00001099 BTC`\n'
-        '*Total:* `0.01465333 BTC`'
+        f"\N{LARGE BLUE CIRCLE} *Binance:* New Trade (#1)\n"
+        "*Pair:* `ETH/BTC`\n"
+        f"*Enter Tag:* `{enter_signal}`\n"
+        "*Amount:* `1333.33333333`\n"
+        f"*Direction:* `{enter}"
+        f"{leverage_text}`\n"
+        "*Open Rate:* `0.00001099 BTC`\n"
+        "*Current Rate:* `0.00001099 BTC`\n"
+        "*Total:* `0.01465333 BTC`"
     )
 
 
-@pytest.mark.parametrize('direction,enter_signal,leverage', [
-    ('Long', 'long_signal_01', None),
-    ('Long', 'long_signal_01', 1.0),
-    ('Long', 'long_signal_01', 5.0),
-    ('Short', 'short_signal_01', 2.0),
-])
+@pytest.mark.parametrize(
+    "direction,enter_signal,leverage",
+    [
+        ("Long", "long_signal_01", None),
+        ("Long", "long_signal_01", 1.0),
+        ("Long", "long_signal_01", 5.0),
+        ("Short", "short_signal_01", 2.0),
+    ],
+)
+@pytest.mark.parametrize("fiat", ["", None])
 def test_send_msg_exit_notification_no_fiat(
-        default_conf, mocker, direction, enter_signal, leverage, time_machine) -> None:
-    del default_conf['fiat_display_currency']
-    time_machine.move_to('2022-05-02 00:00:00 +00:00', tick=False)
+    default_conf, mocker, direction, enter_signal, leverage, time_machine, fiat
+) -> None:
+    if fiat is None:
+        del default_conf["fiat_display_currency"]
+    else:
+        default_conf["fiat_display_currency"] = fiat
+    time_machine.move_to("2022-05-02 00:00:00 +00:00", tick=False)
     telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
 
-    telegram.send_msg({
-        'type': RPCMessageType.EXIT,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'KEY/ETH',
-        'gain': 'loss',
-        'leverage': leverage,
-        'direction': direction,
-        'sub_trade': False,
-        'order_rate': 3.201e-04,
-        'amount': 1333.3333333333335,
-        'order_type': 'limit',
-        'open_rate': 7.5e-04,
-        'current_rate': 3.201e-04,
-        'profit_amount': -0.05746268,
-        'profit_ratio': -0.57405275,
-        'stake_currency': 'ETH',
-        'quote_currency': 'ETH',
-        'base_currency': 'KEY',
-        'fiat_currency': 'USD',
-        'enter_tag': enter_signal,
-        'exit_reason': ExitType.STOP_LOSS.value,
-        'open_date': dt_now() - timedelta(hours=2, minutes=35, seconds=3),
-        'close_date': dt_now(),
-    })
+    telegram.send_msg(
+        {
+            "type": RPCMessageType.EXIT,
+            "trade_id": 1,
+            "exchange": "Binance",
+            "pair": "KEY/ETH",
+            "gain": "loss",
+            "leverage": leverage,
+            "direction": direction,
+            "sub_trade": False,
+            "order_rate": 3.201e-04,
+            "amount": 1333.3333333333335,
+            "order_type": "limit",
+            "open_rate": 7.5e-04,
+            "current_rate": 3.201e-04,
+            "profit_amount": -0.05746268,
+            "profit_ratio": -0.57405275,
+            "stake_currency": "ETH",
+            "quote_currency": "ETH",
+            "base_currency": "KEY",
+            "fiat_currency": "USD",
+            "enter_tag": enter_signal,
+            "exit_reason": ExitType.STOP_LOSS.value,
+            "open_date": dt_now() - timedelta(hours=2, minutes=35, seconds=3),
+            "close_date": dt_now(),
+        }
+    )
 
-    leverage_text = f' ({leverage:.3g}x)' if leverage and leverage != 1.0 else ''
+    leverage_text = f" ({leverage:.3g}x)" if leverage and leverage != 1.0 else ""
     assert msg_mock.call_args[0][0] == (
-        '\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n'
-        '*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH)`\n'
-        f'*Enter Tag:* `{enter_signal}`\n'
-        '*Exit Reason:* `stop_loss`\n'
-        f'*Direction:* `{direction}'
-        f'{leverage_text}`\n'
-        '*Amount:* `1333.33333333`\n'
-        '*Open Rate:* `0.00075 ETH`\n'
-        '*Current Rate:* `0.00032 ETH`\n'
-        '*Exit Rate:* `0.00032 ETH`\n'
-        '*Duration:* `2:35:03 (155.1 min)`'
+        "\N{WARNING SIGN} *Binance (dry):* Exiting KEY/ETH (#1)\n"
+        "*Unrealized Profit:* `-57.41% (loss: -0.05746 ETH)`\n"
+        f"*Enter Tag:* `{enter_signal}`\n"
+        "*Exit Reason:* `stop_loss`\n"
+        f"*Direction:* `{direction}"
+        f"{leverage_text}`\n"
+        "*Amount:* `1333.33333333`\n"
+        "*Open Rate:* `0.00075 ETH`\n"
+        "*Current Rate:* `0.00032 ETH`\n"
+        "*Exit Rate:* `0.00032 ETH`\n"
+        "*Duration:* `2:35:03 (155.1 min)`"
     )
 
 
-@pytest.mark.parametrize('msg,expected', [
-    ({'profit_ratio': 0.201, 'exit_reason': 'roi'}, "\N{ROCKET}"),
-    ({'profit_ratio': 0.051, 'exit_reason': 'roi'}, "\N{ROCKET}"),
-    ({'profit_ratio': 0.0256, 'exit_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"),
-    ({'profit_ratio': 0.01, 'exit_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"),
-    ({'profit_ratio': 0.0, 'exit_reason': 'roi'}, "\N{EIGHT SPOKED ASTERISK}"),
-    ({'profit_ratio': -0.05, 'exit_reason': 'stop_loss'}, "\N{WARNING SIGN}"),
-    ({'profit_ratio': -0.02, 'exit_reason': 'sell_signal'}, "\N{CROSS MARK}"),
-])
+@pytest.mark.parametrize(
+    "msg,expected",
+    [
+        ({"profit_ratio": 0.201, "exit_reason": "roi"}, "\N{ROCKET}"),
+        ({"profit_ratio": 0.051, "exit_reason": "roi"}, "\N{ROCKET}"),
+        ({"profit_ratio": 0.0256, "exit_reason": "roi"}, "\N{EIGHT SPOKED ASTERISK}"),
+        ({"profit_ratio": 0.01, "exit_reason": "roi"}, "\N{EIGHT SPOKED ASTERISK}"),
+        ({"profit_ratio": 0.0, "exit_reason": "roi"}, "\N{EIGHT SPOKED ASTERISK}"),
+        ({"profit_ratio": -0.05, "exit_reason": "stop_loss"}, "\N{WARNING SIGN}"),
+        ({"profit_ratio": -0.02, "exit_reason": "sell_signal"}, "\N{CROSS MARK}"),
+    ],
+)
 def test__exit_emoji(default_conf, mocker, msg, expected):
-    del default_conf['fiat_display_currency']
+    del default_conf["fiat_display_currency"]
 
     telegram, _, _ = get_telegram_testobject(mocker, default_conf)
 
@@ -2544,7 +2640,7 @@ def test__exit_emoji(default_conf, mocker, msg, expected):
 
 
 async def test_telegram__send_msg(default_conf, mocker, caplog) -> None:
-    mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock())
+    mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock())
     bot = MagicMock()
     bot.send_message = AsyncMock()
     bot.edit_message_text = AsyncMock()
@@ -2552,64 +2648,68 @@ async def test_telegram__send_msg(default_conf, mocker, caplog) -> None:
     telegram._app = MagicMock()
     telegram._app.bot = bot
 
-    await telegram._send_msg('test')
+    await telegram._send_msg("test")
     assert len(bot.method_calls) == 1
 
     # Test update
     query = MagicMock()
     query.edit_message_text = AsyncMock()
-    await telegram._send_msg('test', callback_path="DeadBeef", query=query, reload_able=True)
+    await telegram._send_msg("test", callback_path="DeadBeef", query=query, reload_able=True)
     assert query.edit_message_text.call_count == 1
-    assert "Updated: " in query.edit_message_text.call_args_list[0][1]['text']
+    assert "Updated: " in query.edit_message_text.call_args_list[0][1]["text"]
 
     query.edit_message_text = AsyncMock(side_effect=BadRequest("not modified"))
-    await telegram._send_msg('test', callback_path="DeadBeef", query=query)
+    await telegram._send_msg("test", callback_path="DeadBeef", query=query)
     assert query.edit_message_text.call_count == 1
     assert not log_has_re(r"TelegramError: .*", caplog)
 
     query.edit_message_text = AsyncMock(side_effect=BadRequest(""))
-    await telegram._send_msg('test2', callback_path="DeadBeef", query=query)
+    await telegram._send_msg("test2", callback_path="DeadBeef", query=query)
     assert query.edit_message_text.call_count == 1
     assert log_has_re(r"TelegramError: .*", caplog)
 
     query.edit_message_text = AsyncMock(side_effect=TelegramError("DeadBEEF"))
-    await telegram._send_msg('test3', callback_path="DeadBeef", query=query)
+    await telegram._send_msg("test3", callback_path="DeadBeef", query=query)
 
     assert log_has_re(r"TelegramError: DeadBEEF! Giving up.*", caplog)
 
 
 async def test__send_msg_network_error(default_conf, mocker, caplog) -> None:
-    mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock())
+    mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock())
     bot = MagicMock()
-    bot.send_message = MagicMock(side_effect=NetworkError('Oh snap'))
+    bot.send_message = MagicMock(side_effect=NetworkError("Oh snap"))
     telegram, _, _ = get_telegram_testobject(mocker, default_conf, mock=False)
     telegram._app = MagicMock()
     telegram._app.bot = bot
 
-    telegram._config['telegram']['enabled'] = True
-    await telegram._send_msg('test')
+    telegram._config["telegram"]["enabled"] = True
+    await telegram._send_msg("test")
 
     # Bot should've tried to send it twice
     assert len(bot.method_calls) == 2
-    assert log_has('Telegram NetworkError: Oh snap! Trying one more time.', caplog)
+    assert log_has("Telegram NetworkError: Oh snap! Trying one more time.", caplog)
 
 
 @pytest.mark.filterwarnings("ignore:.*ChatPermissions")
 async def test__send_msg_keyboard(default_conf, mocker, caplog) -> None:
-    mocker.patch('freqtrade.rpc.telegram.Telegram._init', MagicMock())
+    mocker.patch("freqtrade.rpc.telegram.Telegram._init", MagicMock())
     bot = MagicMock()
     bot.send_message = AsyncMock()
     freqtradebot = get_patched_freqtradebot(mocker, default_conf)
     rpc = RPC(freqtradebot)
 
-    invalid_keys_list = [['/not_valid', '/profit'], ['/daily'], ['/alsoinvalid']]
-    default_keys_list = [['/daily', '/profit', '/balance'],
-                         ['/status', '/status table', '/performance'],
-                         ['/count', '/start', '/stop', '/help']]
+    invalid_keys_list = [["/not_valid", "/profit"], ["/daily"], ["/alsoinvalid"]]
+    default_keys_list = [
+        ["/daily", "/profit", "/balance"],
+        ["/status", "/status table", "/performance"],
+        ["/count", "/start", "/stop", "/help"],
+    ]
     default_keyboard = ReplyKeyboardMarkup(default_keys_list)
 
-    custom_keys_list = [['/daily', '/stats', '/balance', '/profit', '/profit 5'],
-                        ['/count', '/start', '/reload_config', '/help']]
+    custom_keys_list = [
+        ["/daily", "/stats", "/balance", "/profit", "/profit 5"],
+        ["/count", "/start", "/reload_config", "/help"],
+    ]
     custom_keyboard = ReplyKeyboardMarkup(custom_keys_list)
 
     def init_telegram(freqtradebot):
@@ -2619,31 +2719,39 @@ async def test__send_msg_keyboard(default_conf, mocker, caplog) -> None:
         return telegram
 
     # no keyboard in config -> default keyboard
-    freqtradebot.config['telegram']['enabled'] = True
+    freqtradebot.config["telegram"]["enabled"] = True
     telegram = init_telegram(freqtradebot)
-    await telegram._send_msg('test')
-    used_keyboard = bot.send_message.call_args[1]['reply_markup']
+    await telegram._send_msg("test")
+    used_keyboard = bot.send_message.call_args[1]["reply_markup"]
     assert used_keyboard == default_keyboard
 
     # invalid keyboard in config -> default keyboard
-    freqtradebot.config['telegram']['enabled'] = True
-    freqtradebot.config['telegram']['keyboard'] = invalid_keys_list
-    err_msg = re.escape("config.telegram.keyboard: Invalid commands for custom "
-                        "Telegram keyboard: ['/not_valid', '/alsoinvalid']"
-                        "\nvalid commands are: ") + r"*"
+    freqtradebot.config["telegram"]["enabled"] = True
+    freqtradebot.config["telegram"]["keyboard"] = invalid_keys_list
+    err_msg = (
+        re.escape(
+            "config.telegram.keyboard: Invalid commands for custom "
+            "Telegram keyboard: ['/not_valid', '/alsoinvalid']"
+            "\nvalid commands are: "
+        )
+        + r"*"
+    )
     with pytest.raises(OperationalException, match=err_msg):
         telegram = init_telegram(freqtradebot)
 
     # valid keyboard in config -> custom keyboard
-    freqtradebot.config['telegram']['enabled'] = True
-    freqtradebot.config['telegram']['keyboard'] = custom_keys_list
+    freqtradebot.config["telegram"]["enabled"] = True
+    freqtradebot.config["telegram"]["keyboard"] = custom_keys_list
     telegram = init_telegram(freqtradebot)
-    await telegram._send_msg('test')
-    used_keyboard = bot.send_message.call_args[1]['reply_markup']
+    await telegram._send_msg("test")
+    used_keyboard = bot.send_message.call_args[1]["reply_markup"]
     assert used_keyboard == custom_keyboard
-    assert log_has("using custom keyboard from config.json: "
-                   "[['/daily', '/stats', '/balance', '/profit', '/profit 5'], ['/count', "
-                   "'/start', '/reload_config', '/help']]", caplog)
+    assert log_has(
+        "using custom keyboard from config.json: "
+        "[['/daily', '/stats', '/balance', '/profit', '/profit 5'], ['/count', "
+        "'/start', '/reload_config', '/help']]",
+        caplog,
+    )
 
 
 async def test_change_market_direction(default_conf, mocker, update) -> None:
@@ -2660,7 +2768,6 @@ async def test_change_market_direction(default_conf, mocker, update) -> None:
 
 
 async def test_telegram_list_custom_data(default_conf_usdt, update, ticker, fee, mocker) -> None:
-
     mocker.patch.multiple(
         EXMS,
         fetch_ticker=ticker,
@@ -2674,11 +2781,11 @@ async def test_telegram_list_custom_data(default_conf_usdt, update, ticker, fee,
     context = MagicMock()
     await telegram._list_custom_data(update=update, context=context)
     assert msg_mock.call_count == 1
-    assert 'Trade-id not set.' in msg_mock.call_args_list[0][0][0]
+    assert "Trade-id not set." in msg_mock.call_args_list[0][0][0]
     msg_mock.reset_mock()
 
     #
-    context.args = ['1']
+    context.args = ["1"]
     await telegram._list_custom_data(update=update, context=context)
     assert msg_mock.call_count == 1
     assert (
@@ -2688,8 +2795,8 @@ async def test_telegram_list_custom_data(default_conf_usdt, update, ticker, fee,
 
     # Add some custom data
     trade1 = Trade.get_trades_proxy()[0]
-    trade1.set_custom_data('test_int', 1)
-    trade1.set_custom_data('test_dict', {'test': 'dict'})
+    trade1.set_custom_data("test_int", 1)
+    trade1.set_custom_data("test_dict", {"test": "dict"})
     Trade.commit()
     context.args = [f"{trade1.id}"]
     await telegram._list_custom_data(update=update, context=context)
@@ -2697,9 +2804,11 @@ async def test_telegram_list_custom_data(default_conf_usdt, update, ticker, fee,
     assert "Found custom-data entries: " in msg_mock.call_args_list[0][0][0]
     assert (
         "*Key:* `test_int`\n*ID:* `1`\n*Trade ID:* `1`\n*Type:* `int`\n"
-        "*Value:* `1`\n*Create Date:*") in msg_mock.call_args_list[1][0][0]
+        "*Value:* `1`\n*Create Date:*"
+    ) in msg_mock.call_args_list[1][0][0]
     assert (
-        '*Key:* `test_dict`\n*ID:* `2`\n*Trade ID:* `1`\n*Type:* `dict`\n'
-        '*Value:* `{"test": "dict"}`\n*Create Date:* `') in msg_mock.call_args_list[2][0][0]
+        "*Key:* `test_dict`\n*ID:* `2`\n*Trade ID:* `1`\n*Type:* `dict`\n"
+        '*Value:* `{"test": "dict"}`\n*Create Date:* `'
+    ) in msg_mock.call_args_list[2][0][0]
 
     msg_mock.reset_mock()
diff --git a/tests/rpc/test_rpc_webhook.py b/tests/rpc/test_rpc_webhook.py
index 7d88056e4..dc33f965f 100644
--- a/tests/rpc/test_rpc_webhook.py
+++ b/tests/rpc/test_rpc_webhook.py
@@ -26,47 +26,43 @@ def get_webhook_dict() -> dict:
             "value2": "limit {limit:8f}",
             "value3": "{stake_amount:8f} {stake_currency}",
             "value4": "leverage {leverage:.1f}",
-            "value5": "direction {direction}"
+            "value5": "direction {direction}",
         },
         "webhookentrycancel": {
             "value1": "Cancelling Open Buy Order for {pair}",
             "value2": "limit {limit:8f}",
             "value3": "{stake_amount:8f} {stake_currency}",
             "value4": "leverage {leverage:.1f}",
-            "value5": "direction {direction}"
+            "value5": "direction {direction}",
         },
         "webhookentryfill": {
             "value1": "Buy Order for {pair} filled",
             "value2": "at {open_rate:8f}",
             "value3": "{stake_amount:8f} {stake_currency}",
             "value4": "leverage {leverage:.1f}",
-            "value5": "direction {direction}"
+            "value5": "direction {direction}",
         },
         "webhookexit": {
             "value1": "Selling {pair}",
             "value2": "limit {limit:8f}",
-            "value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})"
+            "value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})",
         },
         "webhookexitcancel": {
             "value1": "Cancelling Open Sell Order for {pair}",
             "value2": "limit {limit:8f}",
-            "value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})"
+            "value3": "profit: {profit_amount:8f} {stake_currency} ({profit_ratio})",
         },
         "webhookexitfill": {
             "value1": "Sell Order for {pair} filled",
             "value2": "at {close_rate:8f}",
-            "value3": ""
+            "value3": "",
         },
-        "webhookstatus": {
-            "value1": "Status: {status}",
-            "value2": "",
-            "value3": ""
-        }
+        "webhookstatus": {"value1": "Status: {status}", "value2": "", "value3": ""},
     }
 
 
 def test__init__(mocker, default_conf):
-    default_conf['webhook'] = {'enabled': True, 'url': "https://DEADBEEF.com"}
+    default_conf["webhook"] = {"enabled": True, "url": "https://DEADBEEF.com"}
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
     assert webhook._config == default_conf
 
@@ -80,255 +76,290 @@ def test_send_msg_webhook(default_conf, mocker):
     msg_mock = MagicMock()
     mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock)
     msg = {
-        'type': RPCMessageType.ENTRY,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 1.0,
-        'direction': 'Long',
-        'limit': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 1.0,
+        "direction": "Long",
+        "limit": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["entry"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["entry"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["entry"]["value3"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value4"] ==
-            default_conf["webhook"]["entry"]["value4"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value5"] ==
-            default_conf["webhook"]["entry"]["value5"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["entry"]["value1"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["entry"]["value2"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["entry"]["value3"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value4"] == default_conf["webhook"]["entry"]["value4"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value5"] == default_conf["webhook"]["entry"]["value5"].format(
+        **msg
+    )
     # Test short
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.ENTRY,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 2.0,
-        'direction': 'Short',
-        'limit': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 2.0,
+        "direction": "Short",
+        "limit": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["entry"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["entry"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["entry"]["value3"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value4"] ==
-            default_conf["webhook"]["entry"]["value4"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value5"] ==
-            default_conf["webhook"]["entry"]["value5"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["entry"]["value1"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["entry"]["value2"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["entry"]["value3"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value4"] == default_conf["webhook"]["entry"]["value4"].format(
+        **msg
+    )
+    assert msg_mock.call_args[0][0]["value5"] == default_conf["webhook"]["entry"]["value5"].format(
+        **msg
+    )
     # Test buy cancel
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.ENTRY_CANCEL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 1.0,
-        'direction': 'Long',
-        'limit': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY_CANCEL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 1.0,
+        "direction": "Long",
+        "limit": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value3"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookentrycancel"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookentrycancel"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookentrycancel"][
+        "value3"
+    ].format(**msg)
     # Test short cancel
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.ENTRY_CANCEL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 2.0,
-        'direction': 'Short',
-        'limit': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY_CANCEL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 2.0,
+        "direction": "Short",
+        "limit": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value3"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value4"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value4"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value5"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value5"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookentrycancel"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookentrycancel"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookentrycancel"][
+        "value3"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value4"] == default_conf["webhook"]["webhookentrycancel"][
+        "value4"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value5"] == default_conf["webhook"]["webhookentrycancel"][
+        "value5"
+    ].format(**msg)
     # Test buy fill
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.ENTRY_FILL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 1.0,
-        'direction': 'Long',
-        'open_rate': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY_FILL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 1.0,
+        "direction": "Long",
+        "open_rate": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookentryfill"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookentryfill"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookentryfill"]["value3"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value4"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value4"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value5"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value5"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookentryfill"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookentryfill"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookentryfill"][
+        "value3"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value4"] == default_conf["webhook"]["webhookentrycancel"][
+        "value4"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value5"] == default_conf["webhook"]["webhookentrycancel"][
+        "value5"
+    ].format(**msg)
     # Test short fill
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.ENTRY_FILL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'leverage': 2.0,
-        'direction': 'Short',
-        'open_rate': 0.005,
-        'stake_amount': 0.8,
-        'stake_amount_fiat': 500,
-        'stake_currency': 'BTC',
-        'fiat_currency': 'EUR'
+        "type": RPCMessageType.ENTRY_FILL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "leverage": 2.0,
+        "direction": "Short",
+        "open_rate": 0.005,
+        "stake_amount": 0.8,
+        "stake_amount_fiat": 500,
+        "stake_currency": "BTC",
+        "fiat_currency": "EUR",
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookentryfill"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookentryfill"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookentryfill"]["value3"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value4"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value4"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value5"] ==
-            default_conf["webhook"]["webhookentrycancel"]["value5"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookentryfill"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookentryfill"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookentryfill"][
+        "value3"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value4"] == default_conf["webhook"]["webhookentrycancel"][
+        "value4"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value5"] == default_conf["webhook"]["webhookentrycancel"][
+        "value5"
+    ].format(**msg)
     # Test sell
     msg_mock.reset_mock()
 
     msg = {
-        'type': RPCMessageType.EXIT,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'gain': "profit",
-        'limit': 0.005,
-        'amount': 0.8,
-        'order_type': 'limit',
-        'open_rate': 0.004,
-        'current_rate': 0.005,
-        'profit_amount': 0.001,
-        'profit_ratio': 0.20,
-        'stake_currency': 'BTC',
-        'sell_reason': ExitType.STOP_LOSS.value
+        "type": RPCMessageType.EXIT,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "gain": "profit",
+        "limit": 0.005,
+        "amount": 0.8,
+        "order_type": "limit",
+        "open_rate": 0.004,
+        "current_rate": 0.005,
+        "profit_amount": 0.001,
+        "profit_ratio": 0.20,
+        "stake_currency": "BTC",
+        "sell_reason": ExitType.STOP_LOSS.value,
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookexit"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookexit"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookexit"]["value3"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookexit"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookexit"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookexit"][
+        "value3"
+    ].format(**msg)
     # Test sell cancel
     msg_mock.reset_mock()
     msg = {
-        'type': RPCMessageType.EXIT_CANCEL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'gain': "profit",
-        'limit': 0.005,
-        'amount': 0.8,
-        'order_type': 'limit',
-        'open_rate': 0.004,
-        'current_rate': 0.005,
-        'profit_amount': 0.001,
-        'profit_ratio': 0.20,
-        'stake_currency': 'BTC',
-        'sell_reason': ExitType.STOP_LOSS.value
+        "type": RPCMessageType.EXIT_CANCEL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "gain": "profit",
+        "limit": 0.005,
+        "amount": 0.8,
+        "order_type": "limit",
+        "open_rate": 0.004,
+        "current_rate": 0.005,
+        "profit_amount": 0.001,
+        "profit_ratio": 0.20,
+        "stake_currency": "BTC",
+        "sell_reason": ExitType.STOP_LOSS.value,
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookexitcancel"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookexitcancel"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookexitcancel"]["value3"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookexitcancel"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookexitcancel"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookexitcancel"][
+        "value3"
+    ].format(**msg)
     # Test Sell fill
     msg_mock.reset_mock()
     msg = {
-        'type': RPCMessageType.EXIT_FILL,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'gain': "profit",
-        'close_rate': 0.005,
-        'amount': 0.8,
-        'order_type': 'limit',
-        'open_rate': 0.004,
-        'current_rate': 0.005,
-        'profit_amount': 0.001,
-        'profit_ratio': 0.20,
-        'stake_currency': 'BTC',
-        'sell_reason': ExitType.STOP_LOSS.value
+        "type": RPCMessageType.EXIT_FILL,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "gain": "profit",
+        "close_rate": 0.005,
+        "amount": 0.8,
+        "order_type": "limit",
+        "open_rate": 0.004,
+        "current_rate": 0.005,
+        "profit_amount": 0.001,
+        "profit_ratio": 0.20,
+        "stake_currency": "BTC",
+        "sell_reason": ExitType.STOP_LOSS.value,
     }
     webhook.send_msg(msg=msg)
     assert msg_mock.call_count == 1
-    assert (msg_mock.call_args[0][0]["value1"] ==
-            default_conf["webhook"]["webhookexitfill"]["value1"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value2"] ==
-            default_conf["webhook"]["webhookexitfill"]["value2"].format(**msg))
-    assert (msg_mock.call_args[0][0]["value3"] ==
-            default_conf["webhook"]["webhookexitfill"]["value3"].format(**msg))
+    assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookexitfill"][
+        "value1"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookexitfill"][
+        "value2"
+    ].format(**msg)
+    assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookexitfill"][
+        "value3"
+    ].format(**msg)
 
-    for msgtype in [RPCMessageType.STATUS,
-                    RPCMessageType.WARNING,
-                    RPCMessageType.STARTUP]:
+    for msgtype in [RPCMessageType.STATUS, RPCMessageType.WARNING, RPCMessageType.STARTUP]:
         # Test notification
-        msg = {
-            'type': msgtype,
-            'status': 'Unfilled sell order for BTC cancelled due to timeout'
-        }
+        msg = {"type": msgtype, "status": "Unfilled sell order for BTC cancelled due to timeout"}
         msg_mock = MagicMock()
         mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock)
         webhook.send_msg(msg)
         assert msg_mock.call_count == 1
-        assert (msg_mock.call_args[0][0]["value1"] ==
-                default_conf["webhook"]["webhookstatus"]["value1"].format(**msg))
-        assert (msg_mock.call_args[0][0]["value2"] ==
-                default_conf["webhook"]["webhookstatus"]["value2"].format(**msg))
-        assert (msg_mock.call_args[0][0]["value3"] ==
-                default_conf["webhook"]["webhookstatus"]["value3"].format(**msg))
+        assert msg_mock.call_args[0][0]["value1"] == default_conf["webhook"]["webhookstatus"][
+            "value1"
+        ].format(**msg)
+        assert msg_mock.call_args[0][0]["value2"] == default_conf["webhook"]["webhookstatus"][
+            "value2"
+        ].format(**msg)
+        assert msg_mock.call_args[0][0]["value3"] == default_conf["webhook"]["webhookstatus"][
+            "value3"
+        ].format(**msg)
 
 
 def test_exception_send_msg(default_conf, mocker, caplog):
@@ -338,9 +369,8 @@ def test_exception_send_msg(default_conf, mocker, caplog):
     del default_conf["webhook"]["webhookentry"]
 
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
-    webhook.send_msg({'type': RPCMessageType.ENTRY})
-    assert log_has(f"Message type '{RPCMessageType.ENTRY}' not configured for webhooks",
-                   caplog)
+    webhook.send_msg({"type": RPCMessageType.ENTRY})
+    assert log_has(f"Message type '{RPCMessageType.ENTRY}' not configured for webhooks", caplog)
 
     default_conf["webhook"] = get_webhook_dict()
     default_conf["webhook"]["strategy_msg"] = {"value1": "{DEADBEEF:8f}"}
@@ -348,112 +378,103 @@ def test_exception_send_msg(default_conf, mocker, caplog):
     mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock)
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
     msg = {
-        'type': RPCMessageType.STRATEGY_MSG,
-        'msg': 'hello world',
+        "type": RPCMessageType.STRATEGY_MSG,
+        "msg": "hello world",
     }
     webhook.send_msg(msg)
-    assert log_has("Problem calling Webhook. Please check your webhook configuration. "
-                   "Exception: 'DEADBEEF'", caplog)
+    assert log_has(
+        "Problem calling Webhook. Please check your webhook configuration. "
+        "Exception: 'DEADBEEF'",
+        caplog,
+    )
 
     # Test no failure for not implemented but known messagetypes
     for e in RPCMessageType:
-        msg = {
-            'type': e,
-            'status': 'whatever'
-            }
+        msg = {"type": e, "status": "whatever"}
         webhook.send_msg(msg)
 
     # Test no failure for not implemented but known messagetypes
     for e in RPCMessageType:
-        msg = {
-            'type': e,
-            'status': 'whatever'
-            }
+        msg = {"type": e, "status": "whatever"}
         webhook.send_msg(msg)
 
 
 def test__send_msg(default_conf, mocker, caplog):
     default_conf["webhook"] = get_webhook_dict()
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
-    msg = {'value1': 'DEADBEEF',
-           'value2': 'ALIVEBEEF',
-           'value3': 'FREQTRADE'}
+    msg = {"value1": "DEADBEEF", "value2": "ALIVEBEEF", "value3": "FREQTRADE"}
     post = MagicMock()
     mocker.patch("freqtrade.rpc.webhook.post", post)
     webhook._send_msg(msg)
 
     assert post.call_count == 1
-    assert post.call_args[1] == {'data': msg, 'timeout': 10}
-    assert post.call_args[0] == (default_conf['webhook']['url'], )
+    assert post.call_args[1] == {"data": msg, "timeout": 10}
+    assert post.call_args[0] == (default_conf["webhook"]["url"],)
 
     post = MagicMock(side_effect=RequestException)
     mocker.patch("freqtrade.rpc.webhook.post", post)
     webhook._send_msg(msg)
-    assert log_has('Could not call webhook url. Exception: ', caplog)
+    assert log_has("Could not call webhook url. Exception: ", caplog)
 
 
 def test__send_msg_with_json_format(default_conf, mocker, caplog):
     default_conf["webhook"] = get_webhook_dict()
     default_conf["webhook"]["format"] = "json"
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
-    msg = {'text': 'Hello'}
+    msg = {"text": "Hello"}
     post = MagicMock()
     mocker.patch("freqtrade.rpc.webhook.post", post)
     webhook._send_msg(msg)
 
-    assert post.call_args[1] == {'json': msg, 'timeout': 10}
+    assert post.call_args[1] == {"json": msg, "timeout": 10}
 
 
 def test__send_msg_with_raw_format(default_conf, mocker, caplog):
     default_conf["webhook"] = get_webhook_dict()
     default_conf["webhook"]["format"] = "raw"
     webhook = Webhook(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
-    msg = {'data': 'Hello'}
+    msg = {"data": "Hello"}
     post = MagicMock()
     mocker.patch("freqtrade.rpc.webhook.post", post)
     webhook._send_msg(msg)
 
     assert post.call_args[1] == {
-        'data': msg['data'],
-        'headers': {'Content-Type': 'text/plain'},
-        'timeout': 10
+        "data": msg["data"],
+        "headers": {"Content-Type": "text/plain"},
+        "timeout": 10,
     }
 
 
 def test_send_msg_discord(default_conf, mocker):
-
-    default_conf["discord"] = {
-        'enabled': True,
-        'webhook_url': "https://webhookurl..."
-    }
+    default_conf["discord"] = {"enabled": True, "webhook_url": "https://webhookurl..."}
     msg_mock = MagicMock()
     mocker.patch("freqtrade.rpc.webhook.Webhook._send_msg", msg_mock)
     discord = Discord(RPC(get_patched_freqtradebot(mocker, default_conf)), default_conf)
 
     msg = {
-        'type': RPCMessageType.EXIT_FILL,
-        'trade_id': 1,
-        'exchange': 'Binance',
-        'pair': 'ETH/BTC',
-        'direction': 'Long',
-        'gain': "profit",
-        'close_rate': 0.005,
-        'amount': 0.8,
-        'order_type': 'limit',
-        'open_date': datetime.now() - timedelta(days=1),
-        'close_date': datetime.now(),
-        'open_rate': 0.004,
-        'current_rate': 0.005,
-        'profit_amount': 0.001,
-        'profit_ratio': 0.20,
-        'stake_currency': 'BTC',
-        'enter_tag': 'enter_tagggg',
-        'exit_reason': ExitType.STOP_LOSS.value,
+        "type": RPCMessageType.EXIT_FILL,
+        "trade_id": 1,
+        "exchange": "Binance",
+        "pair": "ETH/BTC",
+        "direction": "Long",
+        "gain": "profit",
+        "close_rate": 0.005,
+        "amount": 0.8,
+        "order_type": "limit",
+        "open_date": datetime.now() - timedelta(days=1),
+        "close_date": datetime.now(),
+        "open_rate": 0.004,
+        "current_rate": 0.005,
+        "profit_amount": 0.001,
+        "profit_ratio": 0.20,
+        "stake_currency": "BTC",
+        "enter_tag": "enter_tagggg",
+        "exit_reason": ExitType.STOP_LOSS.value,
     }
     discord.send_msg(msg=msg)
 
     assert msg_mock.call_count == 1
-    assert 'embeds' in msg_mock.call_args_list[0][0][0]
-    assert 'title' in msg_mock.call_args_list[0][0][0]['embeds'][0]
-    assert 'color' in msg_mock.call_args_list[0][0][0]['embeds'][0]
-    assert 'fields' in msg_mock.call_args_list[0][0][0]['embeds'][0]
+    assert "embeds" in msg_mock.call_args_list[0][0][0]
+    assert "title" in msg_mock.call_args_list[0][0][0]["embeds"][0]
+    assert "color" in msg_mock.call_args_list[0][0][0]["embeds"][0]
+    assert "fields" in msg_mock.call_args_list[0][0][0]["embeds"][0]
diff --git a/tests/strategy/strats/broken_strats/broken_futures_strategies.py b/tests/strategy/strats/broken_strats/broken_futures_strategies.py
index bb7ce2b32..b2131e63e 100644
--- a/tests/strategy/strats/broken_strats/broken_futures_strategies.py
+++ b/tests/strategy/strats/broken_strats/broken_futures_strategies.py
@@ -12,7 +12,6 @@ from freqtrade.strategy.interface import IStrategy
 
 
 class TestStrategyNoImplements(IStrategy):
-
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return super().populate_indicators(dataframe, metadata)
 
@@ -26,9 +25,15 @@ class TestStrategyImplementCustomSell(TestStrategyNoImplementSell):
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return super().populate_exit_trend(dataframe, metadata)
 
-    def custom_sell(self, pair: str, trade, current_time: datetime,
-                    current_rate: float, current_profit: float,
-                    **kwargs):
+    def custom_sell(
+        self,
+        pair: str,
+        trade,
+        current_time: datetime,
+        current_rate: float,
+        current_profit: float,
+        **kwargs,
+    ):
         return False
 
 
@@ -36,8 +41,9 @@ class TestStrategyImplementBuyTimeout(TestStrategyNoImplementSell):
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return super().populate_exit_trend(dataframe, metadata)
 
-    def check_buy_timeout(self, pair: str, trade, order: Order,
-                          current_time: datetime, **kwargs) -> bool:
+    def check_buy_timeout(
+        self, pair: str, trade, order: Order, current_time: datetime, **kwargs
+    ) -> bool:
         return False
 
 
@@ -45,6 +51,7 @@ class TestStrategyImplementSellTimeout(TestStrategyNoImplementSell):
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return super().populate_exit_trend(dataframe, metadata)
 
-    def check_sell_timeout(self, pair: str, trade, order: Order,
-                           current_time: datetime, **kwargs) -> bool:
+    def check_sell_timeout(
+        self, pair: str, trade, order: Order, current_time: datetime, **kwargs
+    ) -> bool:
         return False
diff --git a/tests/strategy/strats/broken_strats/legacy_strategy_v1.py b/tests/strategy/strats/broken_strats/legacy_strategy_v1.py
index f3b8c2696..b0d7a2ae6 100644
--- a/tests/strategy/strats/broken_strats/legacy_strategy_v1.py
+++ b/tests/strategy/strats/broken_strats/legacy_strategy_v1.py
@@ -6,25 +6,16 @@ from freqtrade.strategy import IStrategy
 
 # Dummy strategy - no longer loads but raises an exception.
 class TestStrategyLegacyV1(IStrategy):
-
-    minimal_roi = {
-        "40": 0.0,
-        "30": 0.01,
-        "20": 0.02,
-        "0": 0.04
-    }
+    minimal_roi = {"40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04}
     stoploss = -0.10
 
-    timeframe = '5m'
+    timeframe = "5m"
 
     def populate_indicators(self, dataframe: DataFrame) -> DataFrame:
-
         return dataframe
 
     def populate_buy_trend(self, dataframe: DataFrame) -> DataFrame:
-
         return dataframe
 
     def populate_sell_trend(self, dataframe: DataFrame) -> DataFrame:
-
         return dataframe
diff --git a/tests/strategy/strats/freqai_rl_test_strat.py b/tests/strategy/strats/freqai_rl_test_strat.py
index 2bf4aaa30..359ac764d 100644
--- a/tests/strategy/strats/freqai_rl_test_strat.py
+++ b/tests/strategy/strats/freqai_rl_test_strat.py
@@ -25,22 +25,20 @@ class freqai_rl_test_strat(IStrategy):
     startup_candle_count: int = 300
     can_short = False
 
-    def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
-                                       metadata: Dict, **kwargs):
-
+    def feature_engineering_expand_all(
+        self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs
+    ):
         dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
 
         return dataframe
 
     def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-pct-change"] = dataframe["close"].pct_change()
         dataframe["%-raw_volume"] = dataframe["volume"]
 
         return dataframe
 
     def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
         dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
 
@@ -52,19 +50,16 @@ class freqai_rl_test_strat(IStrategy):
         return dataframe
 
     def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["&-action"] = 0
 
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         dataframe = self.freqai.start(dataframe, metadata, self)
 
         return dataframe
 
     def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
         enter_long_conditions = [df["do_predict"] == 1, df["&-action"] == 1]
 
         if enter_long_conditions:
diff --git a/tests/strategy/strats/freqai_test_classifier.py b/tests/strategy/strats/freqai_test_classifier.py
index a68a87b2a..ab8ab87cb 100644
--- a/tests/strategy/strats/freqai_test_classifier.py
+++ b/tests/strategy/strats/freqai_test_classifier.py
@@ -57,9 +57,9 @@ class freqai_test_classifier(IStrategy):
                 informative_pairs.append((pair, tf))
         return informative_pairs
 
-    def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
-                                       metadata: Dict, **kwargs):
-
+    def feature_engineering_expand_all(
+        self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs
+    ):
         dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
         dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
         dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
@@ -67,7 +67,6 @@ class freqai_test_classifier(IStrategy):
         return dataframe
 
     def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-pct-change"] = dataframe["close"].pct_change()
         dataframe["%-raw_volume"] = dataframe["volume"]
         dataframe["%-raw_price"] = dataframe["close"]
@@ -75,7 +74,6 @@ class freqai_test_classifier(IStrategy):
         return dataframe
 
     def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
         dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
 
@@ -83,13 +81,13 @@ class freqai_test_classifier(IStrategy):
 
     def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
         self.freqai.class_names = ["down", "up"]
-        dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-100) >
-                                              dataframe["close"], 'up', 'down')
+        dataframe["&s-up_or_down"] = np.where(
+            dataframe["close"].shift(-100) > dataframe["close"], "up", "down"
+        )
 
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         self.freqai_info = self.config["freqai"]
 
         dataframe = self.freqai.start(dataframe, metadata, self)
@@ -97,15 +95,14 @@ class freqai_test_classifier(IStrategy):
         return dataframe
 
     def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
-        enter_long_conditions = [df['&s-up_or_down'] == 'up']
+        enter_long_conditions = [df["&s-up_or_down"] == "up"]
 
         if enter_long_conditions:
             df.loc[
                 reduce(lambda x, y: x & y, enter_long_conditions), ["enter_long", "enter_tag"]
             ] = (1, "long")
 
-        enter_short_conditions = [df['&s-up_or_down'] == 'down']
+        enter_short_conditions = [df["&s-up_or_down"] == "down"]
 
         if enter_short_conditions:
             df.loc[
@@ -115,5 +112,4 @@ class freqai_test_classifier(IStrategy):
         return df
 
     def populate_exit_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
         return df
diff --git a/tests/strategy/strats/freqai_test_multimodel_classifier_strat.py b/tests/strategy/strats/freqai_test_multimodel_classifier_strat.py
index b2ddc21e3..ef32edf2a 100644
--- a/tests/strategy/strats/freqai_test_multimodel_classifier_strat.py
+++ b/tests/strategy/strats/freqai_test_multimodel_classifier_strat.py
@@ -44,9 +44,9 @@ class freqai_test_multimodel_classifier_strat(IStrategy):
     )
     max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
 
-    def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
-                                       metadata: Dict, **kwargs):
-
+    def feature_engineering_expand_all(
+        self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs
+    ):
         dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
         dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
         dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
@@ -54,7 +54,6 @@ class freqai_test_multimodel_classifier_strat(IStrategy):
         return dataframe
 
     def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-pct-change"] = dataframe["close"].pct_change()
         dataframe["%-raw_volume"] = dataframe["volume"]
         dataframe["%-raw_price"] = dataframe["close"]
@@ -62,24 +61,23 @@ class freqai_test_multimodel_classifier_strat(IStrategy):
         return dataframe
 
     def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
         dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
 
         return dataframe
 
     def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
+        dataframe["&s-up_or_down"] = np.where(
+            dataframe["close"].shift(-50) > dataframe["close"], "up", "down"
+        )
 
-        dataframe['&s-up_or_down'] = np.where(dataframe["close"].shift(-50) >
-                                              dataframe["close"], 'up', 'down')
-
-        dataframe['&s-up_or_down2'] = np.where(dataframe["close"].shift(-50) >
-                                               dataframe["close"], 'up2', 'down2')
+        dataframe["&s-up_or_down2"] = np.where(
+            dataframe["close"].shift(-50) > dataframe["close"], "up2", "down2"
+        )
 
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         self.freqai_info = self.config["freqai"]
 
         dataframe = self.freqai.start(dataframe, metadata, self)
@@ -89,7 +87,6 @@ class freqai_test_multimodel_classifier_strat(IStrategy):
         return dataframe
 
     def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
         enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
 
         if enter_long_conditions:
diff --git a/tests/strategy/strats/freqai_test_multimodel_strat.py b/tests/strategy/strats/freqai_test_multimodel_strat.py
index 5b09598a5..46df7e275 100644
--- a/tests/strategy/strats/freqai_test_multimodel_strat.py
+++ b/tests/strategy/strats/freqai_test_multimodel_strat.py
@@ -43,9 +43,9 @@ class freqai_test_multimodel_strat(IStrategy):
     )
     max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
 
-    def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
-                                       metadata: Dict, **kwargs):
-
+    def feature_engineering_expand_all(
+        self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs
+    ):
         dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
         dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
         dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
@@ -53,7 +53,6 @@ class freqai_test_multimodel_strat(IStrategy):
         return dataframe
 
     def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-pct-change"] = dataframe["close"].pct_change()
         dataframe["%-raw_volume"] = dataframe["volume"]
         dataframe["%-raw_price"] = dataframe["close"]
@@ -61,14 +60,12 @@ class freqai_test_multimodel_strat(IStrategy):
         return dataframe
 
     def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
         dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
 
         return dataframe
 
     def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["&-s_close"] = (
             dataframe["close"]
             .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
@@ -76,15 +73,14 @@ class freqai_test_multimodel_strat(IStrategy):
             .mean()
             / dataframe["close"]
             - 1
-            )
+        )
 
         dataframe["&-s_range"] = (
             dataframe["close"]
             .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
             .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
             .max()
-            -
-            dataframe["close"]
+            - dataframe["close"]
             .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
             .rolling(self.freqai_info["feature_parameters"]["label_period_candles"])
             .min()
@@ -93,7 +89,6 @@ class freqai_test_multimodel_strat(IStrategy):
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         self.freqai_info = self.config["freqai"]
 
         dataframe = self.freqai.start(dataframe, metadata, self)
@@ -103,7 +98,6 @@ class freqai_test_multimodel_strat(IStrategy):
         return dataframe
 
     def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
         enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
 
         if enter_long_conditions:
diff --git a/tests/strategy/strats/freqai_test_strat.py b/tests/strategy/strats/freqai_test_strat.py
index 6db308406..90c4642ba 100644
--- a/tests/strategy/strats/freqai_test_strat.py
+++ b/tests/strategy/strats/freqai_test_strat.py
@@ -43,9 +43,9 @@ class freqai_test_strat(IStrategy):
     )
     max_roi_time_long = IntParameter(0, 800, default=400, space="sell", optimize=False, load=True)
 
-    def feature_engineering_expand_all(self, dataframe: DataFrame, period: int,
-                                       metadata: Dict, **kwargs):
-
+    def feature_engineering_expand_all(
+        self, dataframe: DataFrame, period: int, metadata: Dict, **kwargs
+    ):
         dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
         dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
         dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
@@ -53,7 +53,6 @@ class freqai_test_strat(IStrategy):
         return dataframe
 
     def feature_engineering_expand_basic(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-pct-change"] = dataframe["close"].pct_change()
         dataframe["%-raw_volume"] = dataframe["volume"]
         dataframe["%-raw_price"] = dataframe["close"]
@@ -61,14 +60,12 @@ class freqai_test_strat(IStrategy):
         return dataframe
 
     def feature_engineering_standard(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["%-day_of_week"] = dataframe["date"].dt.dayofweek
         dataframe["%-hour_of_day"] = dataframe["date"].dt.hour
 
         return dataframe
 
     def set_freqai_targets(self, dataframe: DataFrame, metadata: Dict, **kwargs):
-
         dataframe["&-s_close"] = (
             dataframe["close"]
             .shift(-self.freqai_info["feature_parameters"]["label_period_candles"])
@@ -76,12 +73,11 @@ class freqai_test_strat(IStrategy):
             .mean()
             / dataframe["close"]
             - 1
-            )
+        )
 
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         self.freqai_info = self.config["freqai"]
 
         dataframe = self.freqai.start(dataframe, metadata, self)
@@ -91,7 +87,6 @@ class freqai_test_strat(IStrategy):
         return dataframe
 
     def populate_entry_trend(self, df: DataFrame, metadata: dict) -> DataFrame:
-
         enter_long_conditions = [df["do_predict"] == 1, df["&-s_close"] > df["target_roi"]]
 
         if enter_long_conditions:
diff --git a/tests/strategy/strats/hyperoptable_strategy.py b/tests/strategy/strats/hyperoptable_strategy.py
index d05e8ead2..3a2b210be 100644
--- a/tests/strategy/strats/hyperoptable_strategy.py
+++ b/tests/strategy/strats/hyperoptable_strategy.py
@@ -17,20 +17,18 @@ class HyperoptableStrategy(StrategyTestV3):
     """
 
     buy_params = {
-        'buy_rsi': 35,
+        "buy_rsi": 35,
         # Intentionally not specified, so "default" is tested
         # 'buy_plusdi': 0.4
     }
 
-    sell_params = {
-        'sell_rsi': 74,
-        'sell_minusdi': 0.4
-    }
+    sell_params = {"sell_rsi": 74, "sell_minusdi": 0.4}
 
-    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space='buy')
-    sell_rsi = IntParameter(low=50, high=100, default=70, space='sell')
-    sell_minusdi = DecimalParameter(low=0, high=1, default=0.5001, decimals=3, space='sell',
-                                    load=False)
+    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space="buy")
+    sell_rsi = IntParameter(low=50, high=100, default=70, space="sell")
+    sell_minusdi = DecimalParameter(
+        low=0, high=1, default=0.5001, decimals=3, space="sell", load=False
+    )
     protection_enabled = BooleanParameter(default=True)
     protection_cooldown_lookback = IntParameter([0, 50], default=30)
 
@@ -43,16 +41,18 @@ class HyperoptableStrategy(StrategyTestV3):
     def protections(self):
         prot = []
         if self.protection_enabled.value:
-            prot.append({
-                "method": "CooldownPeriod",
-                "stop_duration_candles": self.protection_cooldown_lookback.value
-            })
+            prot.append(
+                {
+                    "method": "CooldownPeriod",
+                    "stop_duration_candles": self.protection_cooldown_lookback.value,
+                }
+            )
         return prot
 
     bot_loop_started = False
     bot_started = False
 
-    def bot_loop_start(self):
+    def bot_loop_start(self, **kwargs):
         self.bot_loop_started = True
 
     def bot_start(self, **kwargs) -> None:
@@ -60,7 +60,7 @@ class HyperoptableStrategy(StrategyTestV3):
         Parameters can also be defined here ...
         """
         self.bot_started = True
-        self.buy_rsi = IntParameter([0, 50], default=30, space='buy')
+        self.buy_rsi = IntParameter([0, 50], default=30, space="buy")
 
     def informative_pairs(self):
         """
@@ -84,16 +84,14 @@ class HyperoptableStrategy(StrategyTestV3):
         """
         dataframe.loc[
             (
-                (dataframe['rsi'] < self.buy_rsi.value) &
-                (dataframe['fastd'] < 35) &
-                (dataframe['adx'] > 30) &
-                (dataframe['plus_di'] > self.buy_plusdi.value)
-            ) |
-            (
-                (dataframe['adx'] > 65) &
-                (dataframe['plus_di'] > self.buy_plusdi.value)
-            ),
-            'buy'] = 1
+                (dataframe["rsi"] < self.buy_rsi.value)
+                & (dataframe["fastd"] < 35)
+                & (dataframe["adx"] > 30)
+                & (dataframe["plus_di"] > self.buy_plusdi.value)
+            )
+            | ((dataframe["adx"] > 65) & (dataframe["plus_di"] > self.buy_plusdi.value)),
+            "buy",
+        ] = 1
 
         return dataframe
 
@@ -107,15 +105,13 @@ class HyperoptableStrategy(StrategyTestV3):
         dataframe.loc[
             (
                 (
-                    (qtpylib.crossed_above(dataframe['rsi'], self.sell_rsi.value)) |
-                    (qtpylib.crossed_above(dataframe['fastd'], 70))
-                ) &
-                (dataframe['adx'] > 10) &
-                (dataframe['minus_di'] > 0)
-            ) |
-            (
-                (dataframe['adx'] > 70) &
-                (dataframe['minus_di'] > self.sell_minusdi.value)
-            ),
-            'sell'] = 1
+                    (qtpylib.crossed_above(dataframe["rsi"], self.sell_rsi.value))
+                    | (qtpylib.crossed_above(dataframe["fastd"], 70))
+                )
+                & (dataframe["adx"] > 10)
+                & (dataframe["minus_di"] > 0)
+            )
+            | ((dataframe["adx"] > 70) & (dataframe["minus_di"] > self.sell_minusdi.value)),
+            "sell",
+        ] = 1
         return dataframe
diff --git a/tests/strategy/strats/hyperoptable_strategy_v2.py b/tests/strategy/strats/hyperoptable_strategy_v2.py
index 94a15b456..40e139e1f 100644
--- a/tests/strategy/strats/hyperoptable_strategy_v2.py
+++ b/tests/strategy/strats/hyperoptable_strategy_v2.py
@@ -15,20 +15,22 @@ class HyperoptableStrategyV2(StrategyTestV2):
     """
 
     buy_params = {
-        'buy_rsi': 35,
+        "buy_rsi": 35,
         # Intentionally not specified, so "default" is tested
         # 'buy_plusdi': 0.4
     }
 
     sell_params = {
-        'sell_rsi': 74,
-        'sell_minusdi': 0.4
+        # Sell parameters
+        "sell_rsi": 74,
+        "sell_minusdi": 0.4,
     }
 
-    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space='buy')
-    sell_rsi = IntParameter(low=50, high=100, default=70, space='sell')
-    sell_minusdi = DecimalParameter(low=0, high=1, default=0.5001, decimals=3, space='sell',
-                                    load=False)
+    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space="buy")
+    sell_rsi = IntParameter(low=50, high=100, default=70, space="sell")
+    sell_minusdi = DecimalParameter(
+        low=0, high=1, default=0.5001, decimals=3, space="sell", load=False
+    )
     protection_enabled = BooleanParameter(default=True)
     protection_cooldown_lookback = IntParameter([0, 50], default=30)
 
@@ -36,19 +38,21 @@ class HyperoptableStrategyV2(StrategyTestV2):
     def protections(self):
         prot = []
         if self.protection_enabled.value:
-            prot.append({
-                "method": "CooldownPeriod",
-                "stop_duration_candles": self.protection_cooldown_lookback.value
-            })
+            prot.append(
+                {
+                    "method": "CooldownPeriod",
+                    "stop_duration_candles": self.protection_cooldown_lookback.value,
+                }
+            )
         return prot
 
     bot_loop_started = False
 
-    def bot_loop_start(self):
+    def bot_loop_start(self, **kwargs):
         self.bot_loop_started = True
 
     def bot_start(self, **kwargs) -> None:
         """
         Parameters can also be defined here ...
         """
-        self.buy_rsi = IntParameter([0, 50], default=30, space='buy')
+        self.buy_rsi = IntParameter([0, 50], default=30, space="buy")
diff --git a/tests/strategy/strats/informative_decorator_strategy.py b/tests/strategy/strats/informative_decorator_strategy.py
index f34eddc69..7414981e3 100644
--- a/tests/strategy/strats/informative_decorator_strategy.py
+++ b/tests/strategy/strats/informative_decorator_strategy.py
@@ -13,72 +13,73 @@ class InformativeDecoratorTest(IStrategy):
     or strategy repository https://github.com/freqtrade/freqtrade-strategies
     for samples and inspiration.
     """
+
     INTERFACE_VERSION = 2
     stoploss = -0.10
-    timeframe = '5m'
+    timeframe = "5m"
     startup_candle_count: int = 20
 
     def informative_pairs(self):
         # Intentionally return 2 tuples, must be converted to 3 in compatibility code
         return [
-            ('NEO/USDT', '5m'),
-            ('NEO/USDT', '15m', ''),
-            ('NEO/USDT', '2h', 'futures'),
-            ]
+            ("NEO/USDT", "5m"),
+            ("NEO/USDT", "15m", ""),
+            ("NEO/USDT", "2h", "futures"),
+        ]
 
     def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['buy'] = 0
+        dataframe["buy"] = 0
         return dataframe
 
     def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['sell'] = 0
+        dataframe["sell"] = 0
         return dataframe
 
     # Decorator stacking test.
-    @informative('30m')
-    @informative('1h')
+    @informative("30m")
+    @informative("1h")
     def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
     # Simple informative test.
-    @informative('1h', 'NEO/{stake}')
+    @informative("1h", "NEO/{stake}")
     def populate_indicators_neo_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
-    @informative('1h', '{base}/BTC')
+    @informative("1h", "{base}/BTC")
     def populate_indicators_base_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
     # Quote currency different from stake currency test.
-    @informative('1h', 'ETH/BTC', candle_type='spot')
+    @informative("1h", "ETH/BTC", candle_type="spot")
     def populate_indicators_eth_btc_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
     # Formatting test.
-    @informative('30m', 'NEO/{stake}', '{column}_{BASE}_{QUOTE}_{base}_{quote}_{asset}_{timeframe}')
+    @informative("30m", "NEO/{stake}", "{column}_{BASE}_{QUOTE}_{base}_{quote}_{asset}_{timeframe}")
     def populate_indicators_btc_1h_2(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
     # Custom formatter test
-    @informative('30m', 'ETH/{stake}', fmt=lambda column, **kwargs: column + '_from_callable')
+    @informative("30m", "ETH/{stake}", fmt=lambda column, **kwargs: column + "_from_callable")
     def populate_indicators_eth_30m(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         return dataframe
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         # Strategy timeframe indicators for current pair.
-        dataframe['rsi'] = 14
+        dataframe["rsi"] = 14
         # Informative pairs are available in this method.
-        dataframe['rsi_less'] = dataframe['rsi'] < dataframe['rsi_1h']
+        dataframe["rsi_less"] = dataframe["rsi"] < dataframe["rsi_1h"]
 
         # Mixing manual informative pairs with decorators.
-        informative = self.dp.get_pair_dataframe('NEO/USDT', '5m', '')
-        informative['rsi'] = 14
-        dataframe = merge_informative_pair(dataframe, informative, self.timeframe, '5m', ffill=True)
+        informative = self.dp.get_pair_dataframe("NEO/USDT", "5m", "")
+        informative["rsi"] = 14
+        dataframe = merge_informative_pair(dataframe, informative, self.timeframe, "5m", ffill=True)
 
         return dataframe
diff --git a/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py b/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py
index e50d5d17b..618b401bb 100644
--- a/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py
+++ b/tests/strategy/strats/lookahead_bias/strategy_test_v3_with_lookahead_bias.py
@@ -10,49 +10,44 @@ class strategy_test_v3_with_lookahead_bias(IStrategy):
     INTERFACE_VERSION = 3
 
     # Minimal ROI designed for the strategy
-    minimal_roi = {
-        "40": 0.0,
-        "30": 0.01,
-        "20": 0.02,
-        "0": 0.04
-    }
+    minimal_roi = {"40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04}
 
     # Optimal stoploss designed for the strategy
     stoploss = -0.10
 
     # Optimal timeframe for the strategy
-    timeframe = '5m'
-    scenario = CategoricalParameter(['no_bias', 'bias1'], default='bias1', space="buy")
+    timeframe = "5m"
+    scenario = CategoricalParameter(["no_bias", "bias1"], default="bias1", space="buy")
 
     # Number of candles the strategy requires before producing valid signals
     startup_candle_count: int = 20
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         # bias is introduced here
-        if self.scenario.value != 'no_bias':
-            ichi = ichimoku(dataframe,
-                            conversion_line_period=20,
-                            base_line_periods=60,
-                            laggin_span=120,
-                            displacement=30)
-            dataframe['chikou_span'] = ichi['chikou_span']
+        if self.scenario.value != "no_bias":
+            ichi = ichimoku(
+                dataframe,
+                conversion_line_period=20,
+                base_line_periods=60,
+                laggin_span=120,
+                displacement=30,
+            )
+            dataframe["chikou_span"] = ichi["chikou_span"]
 
         return dataframe
 
     def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        if self.scenario.value == 'no_bias':
-            dataframe.loc[dataframe['close'].shift(10) < dataframe['close'], 'enter_long'] = 1
+        if self.scenario.value == "no_bias":
+            dataframe.loc[dataframe["close"].shift(10) < dataframe["close"], "enter_long"] = 1
         else:
-            dataframe.loc[dataframe['close'].shift(-10) > dataframe['close'], 'enter_long'] = 1
+            dataframe.loc[dataframe["close"].shift(-10) > dataframe["close"], "enter_long"] = 1
 
         return dataframe
 
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-        if self.scenario.value == 'no_bias':
-            dataframe.loc[
-                dataframe['close'].shift(10) < dataframe['close'], 'exit'] = 1
+        if self.scenario.value == "no_bias":
+            dataframe.loc[dataframe["close"].shift(10) < dataframe["close"], "exit"] = 1
         else:
-            dataframe.loc[
-                dataframe['close'].shift(-10) > dataframe['close'], 'exit'] = 1
+            dataframe.loc[dataframe["close"].shift(-10) > dataframe["close"], "exit"] = 1
 
         return dataframe
diff --git a/tests/strategy/strats/strategy_test_v2.py b/tests/strategy/strats/strategy_test_v2.py
index 9e1c47575..58473e9c0 100644
--- a/tests/strategy/strats/strategy_test_v2.py
+++ b/tests/strategy/strats/strategy_test_v2.py
@@ -15,28 +15,24 @@ class StrategyTestV2(IStrategy):
     or strategy repository https://github.com/freqtrade/freqtrade-strategies
     for samples and inspiration.
     """
+
     INTERFACE_VERSION = 2
 
     # Minimal ROI designed for the strategy
-    minimal_roi = {
-        "40": 0.0,
-        "30": 0.01,
-        "20": 0.02,
-        "0": 0.04
-    }
+    minimal_roi = {"40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04}
 
     # Optimal stoploss designed for the strategy
     stoploss = -0.10
 
     # Optimal timeframe for the strategy
-    timeframe = '5m'
+    timeframe = "5m"
 
     # Optional order type mapping
     order_types = {
-        'entry': 'limit',
-        'exit': 'limit',
-        'stoploss': 'limit',
-        'stoploss_on_exchange': False
+        "entry": "limit",
+        "exit": "limit",
+        "stoploss": "limit",
+        "stoploss_on_exchange": False,
     }
 
     # Number of candles the strategy requires before producing valid signals
@@ -44,8 +40,8 @@ class StrategyTestV2(IStrategy):
 
     # Optional time in force for orders
     order_time_in_force = {
-        'entry': 'gtc',
-        'exit': 'gtc',
+        "entry": "gtc",
+        "exit": "gtc",
     }
     # Test legacy use_sell_signal definition
     use_sell_signal = False
@@ -69,36 +65,36 @@ class StrategyTestV2(IStrategy):
         # ------------------------------------
 
         # ADX
-        dataframe['adx'] = ta.ADX(dataframe)
+        dataframe["adx"] = ta.ADX(dataframe)
 
         # MACD
         macd = ta.MACD(dataframe)
-        dataframe['macd'] = macd['macd']
-        dataframe['macdsignal'] = macd['macdsignal']
-        dataframe['macdhist'] = macd['macdhist']
+        dataframe["macd"] = macd["macd"]
+        dataframe["macdsignal"] = macd["macdsignal"]
+        dataframe["macdhist"] = macd["macdhist"]
 
         # Minus Directional Indicator / Movement
-        dataframe['minus_di'] = ta.MINUS_DI(dataframe)
+        dataframe["minus_di"] = ta.MINUS_DI(dataframe)
 
         # Plus Directional Indicator / Movement
-        dataframe['plus_di'] = ta.PLUS_DI(dataframe)
+        dataframe["plus_di"] = ta.PLUS_DI(dataframe)
 
         # RSI
-        dataframe['rsi'] = ta.RSI(dataframe)
+        dataframe["rsi"] = ta.RSI(dataframe)
 
         # Stoch fast
         stoch_fast = ta.STOCHF(dataframe)
-        dataframe['fastd'] = stoch_fast['fastd']
-        dataframe['fastk'] = stoch_fast['fastk']
+        dataframe["fastd"] = stoch_fast["fastd"]
+        dataframe["fastk"] = stoch_fast["fastk"]
 
         # Bollinger bands
         bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
-        dataframe['bb_lowerband'] = bollinger['lower']
-        dataframe['bb_middleband'] = bollinger['mid']
-        dataframe['bb_upperband'] = bollinger['upper']
+        dataframe["bb_lowerband"] = bollinger["lower"]
+        dataframe["bb_middleband"] = bollinger["mid"]
+        dataframe["bb_upperband"] = bollinger["upper"]
 
         # EMA - Exponential Moving Average
-        dataframe['ema10'] = ta.EMA(dataframe, timeperiod=10)
+        dataframe["ema10"] = ta.EMA(dataframe, timeperiod=10)
 
         return dataframe
 
@@ -111,16 +107,14 @@ class StrategyTestV2(IStrategy):
         """
         dataframe.loc[
             (
-                (dataframe['rsi'] < 35) &
-                (dataframe['fastd'] < 35) &
-                (dataframe['adx'] > 30) &
-                (dataframe['plus_di'] > 0.5)
-            ) |
-            (
-                (dataframe['adx'] > 65) &
-                (dataframe['plus_di'] > 0.5)
-            ),
-            'buy'] = 1
+                (dataframe["rsi"] < 35)
+                & (dataframe["fastd"] < 35)
+                & (dataframe["adx"] > 30)
+                & (dataframe["plus_di"] > 0.5)
+            )
+            | ((dataframe["adx"] > 65) & (dataframe["plus_di"] > 0.5)),
+            "buy",
+        ] = 1
 
         return dataframe
 
@@ -134,15 +128,13 @@ class StrategyTestV2(IStrategy):
         dataframe.loc[
             (
                 (
-                    (qtpylib.crossed_above(dataframe['rsi'], 70)) |
-                    (qtpylib.crossed_above(dataframe['fastd'], 70))
-                ) &
-                (dataframe['adx'] > 10) &
-                (dataframe['minus_di'] > 0)
-            ) |
-            (
-                (dataframe['adx'] > 70) &
-                (dataframe['minus_di'] > 0.5)
-            ),
-            'sell'] = 1
+                    (qtpylib.crossed_above(dataframe["rsi"], 70))
+                    | (qtpylib.crossed_above(dataframe["fastd"], 70))
+                )
+                & (dataframe["adx"] > 10)
+                & (dataframe["minus_di"] > 0)
+            )
+            | ((dataframe["adx"] > 70) & (dataframe["minus_di"] > 0.5)),
+            "sell",
+        ] = 1
         return dataframe
diff --git a/tests/strategy/strats/strategy_test_v3.py b/tests/strategy/strats/strategy_test_v3.py
index 83c7353ce..71404242a 100644
--- a/tests/strategy/strats/strategy_test_v3.py
+++ b/tests/strategy/strats/strategy_test_v3.py
@@ -8,8 +8,13 @@ from pandas import DataFrame
 
 import freqtrade.vendor.qtpylib.indicators as qtpylib
 from freqtrade.persistence import Trade
-from freqtrade.strategy import (BooleanParameter, DecimalParameter, IntParameter, IStrategy,
-                                RealParameter)
+from freqtrade.strategy import (
+    BooleanParameter,
+    DecimalParameter,
+    IntParameter,
+    IStrategy,
+    RealParameter,
+)
 
 
 class StrategyTestV3(IStrategy):
@@ -20,15 +25,11 @@ class StrategyTestV3(IStrategy):
     or strategy repository https://github.com/freqtrade/freqtrade-strategies
     for samples and inspiration.
     """
+
     INTERFACE_VERSION = 3
 
     # Minimal ROI designed for the strategy
-    minimal_roi = {
-        "40": 0.0,
-        "30": 0.01,
-        "20": 0.02,
-        "0": 0.04
-    }
+    minimal_roi = {"40": 0.0, "30": 0.01, "20": 0.02, "0": 0.04}
 
     # Optimal max_open_trades for the strategy
     max_open_trades = -1
@@ -37,14 +38,14 @@ class StrategyTestV3(IStrategy):
     stoploss = -0.10
 
     # Optimal timeframe for the strategy
-    timeframe = '5m'
+    timeframe = "5m"
 
     # Optional order type mapping
     order_types = {
-        'entry': 'limit',
-        'exit': 'limit',
-        'stoploss': 'limit',
-        'stoploss_on_exchange': False
+        "entry": "limit",
+        "exit": "limit",
+        "stoploss": "limit",
+        "stoploss_on_exchange": False,
     }
 
     # Number of candles the strategy requires before producing valid signals
@@ -52,26 +53,24 @@ class StrategyTestV3(IStrategy):
 
     # Optional time in force for orders
     order_time_in_force = {
-        'entry': 'gtc',
-        'exit': 'gtc',
+        "entry": "gtc",
+        "exit": "gtc",
     }
 
     buy_params = {
-        'buy_rsi': 35,
+        "buy_rsi": 35,
         # Intentionally not specified, so "default" is tested
         # 'buy_plusdi': 0.4
     }
 
-    sell_params = {
-        'sell_rsi': 74,
-        'sell_minusdi': 0.4
-    }
+    sell_params = {"sell_rsi": 74, "sell_minusdi": 0.4}
 
-    buy_rsi = IntParameter([0, 50], default=30, space='buy')
-    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space='buy')
-    sell_rsi = IntParameter(low=50, high=100, default=70, space='sell')
-    sell_minusdi = DecimalParameter(low=0, high=1, default=0.5001, decimals=3, space='sell',
-                                    load=False)
+    buy_rsi = IntParameter([0, 50], default=30, space="buy")
+    buy_plusdi = RealParameter(low=0, high=1, default=0.5, space="buy")
+    sell_rsi = IntParameter(low=50, high=100, default=70, space="sell")
+    sell_minusdi = DecimalParameter(
+        low=0, high=1, default=0.5001, decimals=3, space="sell", load=False
+    )
     protection_enabled = BooleanParameter(default=True)
     protection_cooldown_lookback = IntParameter([0, 50], default=30)
 
@@ -92,67 +91,61 @@ class StrategyTestV3(IStrategy):
         self.bot_started = True
 
     def informative_pairs(self):
-
         return []
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         # Momentum Indicator
         # ------------------------------------
 
         # ADX
-        dataframe['adx'] = ta.ADX(dataframe)
+        dataframe["adx"] = ta.ADX(dataframe)
 
         # MACD
         macd = ta.MACD(dataframe)
-        dataframe['macd'] = macd['macd']
-        dataframe['macdsignal'] = macd['macdsignal']
-        dataframe['macdhist'] = macd['macdhist']
+        dataframe["macd"] = macd["macd"]
+        dataframe["macdsignal"] = macd["macdsignal"]
+        dataframe["macdhist"] = macd["macdhist"]
 
         # Minus Directional Indicator / Movement
-        dataframe['minus_di'] = ta.MINUS_DI(dataframe)
+        dataframe["minus_di"] = ta.MINUS_DI(dataframe)
 
         # Plus Directional Indicator / Movement
-        dataframe['plus_di'] = ta.PLUS_DI(dataframe)
+        dataframe["plus_di"] = ta.PLUS_DI(dataframe)
 
         # RSI
-        dataframe['rsi'] = ta.RSI(dataframe)
+        dataframe["rsi"] = ta.RSI(dataframe)
 
         # Stoch fast
         stoch_fast = ta.STOCHF(dataframe)
-        dataframe['fastd'] = stoch_fast['fastd']
-        dataframe['fastk'] = stoch_fast['fastk']
+        dataframe["fastd"] = stoch_fast["fastd"]
+        dataframe["fastk"] = stoch_fast["fastk"]
 
         # Bollinger bands
         bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(dataframe), window=20, stds=2)
-        dataframe['bb_lowerband'] = bollinger['lower']
-        dataframe['bb_middleband'] = bollinger['mid']
-        dataframe['bb_upperband'] = bollinger['upper']
+        dataframe["bb_lowerband"] = bollinger["lower"]
+        dataframe["bb_middleband"] = bollinger["mid"]
+        dataframe["bb_upperband"] = bollinger["upper"]
 
         # EMA - Exponential Moving Average
-        dataframe['ema10'] = ta.EMA(dataframe, timeperiod=10)
+        dataframe["ema10"] = ta.EMA(dataframe, timeperiod=10)
 
         return dataframe
 
     def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         dataframe.loc[
             (
-                (dataframe['rsi'] < self.buy_rsi.value) &
-                (dataframe['fastd'] < 35) &
-                (dataframe['adx'] > 30) &
-                (dataframe['plus_di'] > self.buy_plusdi.value)
-            ) |
-            (
-                (dataframe['adx'] > 65) &
-                (dataframe['plus_di'] > self.buy_plusdi.value)
-            ),
-            'enter_long'] = 1
+                (dataframe["rsi"] < self.buy_rsi.value)
+                & (dataframe["fastd"] < 35)
+                & (dataframe["adx"] > 30)
+                & (dataframe["plus_di"] > self.buy_plusdi.value)
+            )
+            | ((dataframe["adx"] > 65) & (dataframe["plus_di"] > self.buy_plusdi.value)),
+            "enter_long",
+        ] = 1
         dataframe.loc[
-            (
-                qtpylib.crossed_below(dataframe['rsi'], self.sell_rsi.value)
-            ),
-            ('enter_short', 'enter_tag')] = (1, 'short_Tag')
+            (qtpylib.crossed_below(dataframe["rsi"], self.sell_rsi.value)),
+            ("enter_short", "enter_tag"),
+        ] = (1, "short_Tag")
 
         return dataframe
 
@@ -160,41 +153,53 @@ class StrategyTestV3(IStrategy):
         dataframe.loc[
             (
                 (
-                    (qtpylib.crossed_above(dataframe['rsi'], self.sell_rsi.value)) |
-                    (qtpylib.crossed_above(dataframe['fastd'], 70))
-                ) &
-                (dataframe['adx'] > 10) &
-                (dataframe['minus_di'] > 0)
-            ) |
-            (
-                (dataframe['adx'] > 70) &
-                (dataframe['minus_di'] > self.sell_minusdi.value)
-            ),
-            'exit_long'] = 1
+                    (qtpylib.crossed_above(dataframe["rsi"], self.sell_rsi.value))
+                    | (qtpylib.crossed_above(dataframe["fastd"], 70))
+                )
+                & (dataframe["adx"] > 10)
+                & (dataframe["minus_di"] > 0)
+            )
+            | ((dataframe["adx"] > 70) & (dataframe["minus_di"] > self.sell_minusdi.value)),
+            "exit_long",
+        ] = 1
 
         dataframe.loc[
-            (
-                qtpylib.crossed_above(dataframe['rsi'], self.buy_rsi.value)
-            ),
-            ('exit_short', 'exit_tag')] = (1, 'short_Tag')
+            (qtpylib.crossed_above(dataframe["rsi"], self.buy_rsi.value)),
+            ("exit_short", "exit_tag"),
+        ] = (1, "short_Tag")
 
         return dataframe
 
-    def leverage(self, pair: str, current_time: datetime, current_rate: float,
-                 proposed_leverage: float, max_leverage: float, entry_tag: Optional[str],
-                 side: str, **kwargs) -> float:
+    def leverage(
+        self,
+        pair: str,
+        current_time: datetime,
+        current_rate: float,
+        proposed_leverage: float,
+        max_leverage: float,
+        entry_tag: Optional[str],
+        side: str,
+        **kwargs,
+    ) -> float:
         # Return 3.0 in all cases.
         # Bot-logic must make sure it's an allowed leverage and eventually adjust accordingly.
 
         return 3.0
 
-    def adjust_trade_position(self, trade: Trade, current_time: datetime,
-                              current_rate: float, current_profit: float,
-                              min_stake: Optional[float], max_stake: float,
-                              current_entry_rate: float, current_exit_rate: float,
-                              current_entry_profit: float, current_exit_profit: float,
-                              **kwargs) -> Optional[float]:
-
+    def adjust_trade_position(
+        self,
+        trade: Trade,
+        current_time: datetime,
+        current_rate: float,
+        current_profit: float,
+        min_stake: Optional[float],
+        max_stake: float,
+        current_entry_rate: float,
+        current_exit_rate: float,
+        current_entry_profit: float,
+        current_exit_profit: float,
+        **kwargs,
+    ) -> Optional[float]:
         if current_profit < -0.0075:
             orders = trade.select_filled_orders(trade.entry_side)
             return round(orders[0].stake_amount, 0)
diff --git a/tests/strategy/strats/strategy_test_v3_custom_entry_price.py b/tests/strategy/strats/strategy_test_v3_custom_entry_price.py
index 607ff6e1e..956766d14 100644
--- a/tests/strategy/strats/strategy_test_v3_custom_entry_price.py
+++ b/tests/strategy/strats/strategy_test_v3_custom_entry_price.py
@@ -17,24 +17,28 @@ class StrategyTestV3CustomEntryPrice(StrategyTestV3):
     or strategy repository https://github.com/freqtrade/freqtrade-strategies
     for samples and inspiration.
     """
+
     new_entry_price: float = 0.001
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return dataframe
 
     def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
-        dataframe.loc[
-            dataframe['volume'] > 0,
-            'enter_long'] = 1
+        dataframe.loc[dataframe["volume"] > 0, "enter_long"] = 1
 
         return dataframe
 
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         return dataframe
 
-    def custom_entry_price(self, pair: str, trade: Optional[Trade], current_time: datetime,
-                           proposed_rate: float,
-                           entry_tag: Optional[str], side: str, **kwargs) -> float:
-
+    def custom_entry_price(
+        self,
+        pair: str,
+        trade: Optional[Trade],
+        current_time: datetime,
+        proposed_rate: float,
+        entry_tag: Optional[str],
+        side: str,
+        **kwargs,
+    ) -> float:
         return self.new_entry_price
diff --git a/tests/strategy/strats/strategy_test_v3_recursive_issue.py b/tests/strategy/strats/strategy_test_v3_recursive_issue.py
index b3074113d..d03486886 100644
--- a/tests/strategy/strats/strategy_test_v3_recursive_issue.py
+++ b/tests/strategy/strats/strategy_test_v3_recursive_issue.py
@@ -10,37 +10,33 @@ class strategy_test_v3_recursive_issue(IStrategy):
     INTERFACE_VERSION = 3
 
     # Minimal ROI designed for the strategy
-    minimal_roi = {
-        "0": 0.04
-    }
+    minimal_roi = {"0": 0.04}
 
     # Optimal stoploss designed for the strategy
     stoploss = -0.10
 
     # Optimal timeframe for the strategy
-    timeframe = '5m'
-    scenario = CategoricalParameter(['no_bias', 'bias1', 'bias2'], default='bias1', space="buy")
+    timeframe = "5m"
+    scenario = CategoricalParameter(["no_bias", "bias1", "bias2"], default="bias1", space="buy")
 
     # Number of candles the strategy requires before producing valid signals
     startup_candle_count: int = 100
 
     def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
         # bias is introduced here
-        if self.scenario.value == 'no_bias':
-            dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
+        if self.scenario.value == "no_bias":
+            dataframe["rsi"] = ta.RSI(dataframe, timeperiod=14)
         else:
-            dataframe['rsi'] = ta.RSI(dataframe, timeperiod=50)
+            dataframe["rsi"] = ta.RSI(dataframe, timeperiod=50)
 
-        if self.scenario.value == 'bias2':
+        if self.scenario.value == "bias2":
             # Has both bias1 and bias2
-            dataframe['rsi_lookahead'] = ta.RSI(dataframe, timeperiod=50).shift(-1)
+            dataframe["rsi_lookahead"] = ta.RSI(dataframe, timeperiod=50).shift(-1)
 
         return dataframe
 
     def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         return dataframe
 
     def populate_exit_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
-
         return dataframe
diff --git a/tests/strategy/test_default_strategy.py b/tests/strategy/test_default_strategy.py
index afe7fc97a..494e374c4 100644
--- a/tests/strategy/test_default_strategy.py
+++ b/tests/strategy/test_default_strategy.py
@@ -9,22 +9,25 @@ from .strats.strategy_test_v3 import StrategyTestV3
 
 
 def test_strategy_test_v3_structure():
-    assert hasattr(StrategyTestV3, 'minimal_roi')
-    assert hasattr(StrategyTestV3, 'stoploss')
-    assert hasattr(StrategyTestV3, 'timeframe')
-    assert hasattr(StrategyTestV3, 'populate_indicators')
-    assert hasattr(StrategyTestV3, 'populate_entry_trend')
-    assert hasattr(StrategyTestV3, 'populate_exit_trend')
+    assert hasattr(StrategyTestV3, "minimal_roi")
+    assert hasattr(StrategyTestV3, "stoploss")
+    assert hasattr(StrategyTestV3, "timeframe")
+    assert hasattr(StrategyTestV3, "populate_indicators")
+    assert hasattr(StrategyTestV3, "populate_entry_trend")
+    assert hasattr(StrategyTestV3, "populate_exit_trend")
 
 
-@pytest.mark.parametrize('is_short,side', [
-    (True, 'short'),
-    (False, 'long'),
-])
+@pytest.mark.parametrize(
+    "is_short,side",
+    [
+        (True, "short"),
+        (False, "long"),
+    ],
+)
 def test_strategy_test_v3(dataframe_1m, fee, is_short, side):
     strategy = StrategyTestV3({})
 
-    metadata = {'pair': 'ETH/BTC'}
+    metadata = {"pair": "ETH/BTC"}
     assert isinstance(strategy.minimal_roi, dict)
     assert isinstance(strategy.stoploss, float)
     assert isinstance(strategy.timeframe, str)
@@ -34,23 +37,46 @@ def test_strategy_test_v3(dataframe_1m, fee, is_short, side):
     assert isinstance(strategy.populate_sell_trend(indicators, metadata), DataFrame)
 
     trade = Trade(
-        open_rate=19_000,
-        amount=0.1,
-        pair='ETH/BTC',
-        fee_open=fee.return_value,
-        is_short=is_short
+        open_rate=19_000, amount=0.1, pair="ETH/BTC", fee_open=fee.return_value, is_short=is_short
     )
 
-    assert strategy.confirm_trade_entry(pair='ETH/BTC', order_type='limit', amount=0.1,
-                                        rate=20000, time_in_force='gtc',
-                                        current_time=datetime.now(timezone.utc),
-                                        side=side, entry_tag=None) is True
-    assert strategy.confirm_trade_exit(pair='ETH/BTC', trade=trade, order_type='limit', amount=0.1,
-                                       rate=20000, time_in_force='gtc', exit_reason='roi',
-                                       sell_reason='roi',
-                                       current_time=datetime.now(timezone.utc),
-                                       side=side) is True
+    assert (
+        strategy.confirm_trade_entry(
+            pair="ETH/BTC",
+            order_type="limit",
+            amount=0.1,
+            rate=20000,
+            time_in_force="gtc",
+            current_time=datetime.now(timezone.utc),
+            side=side,
+            entry_tag=None,
+        )
+        is True
+    )
+    assert (
+        strategy.confirm_trade_exit(
+            pair="ETH/BTC",
+            trade=trade,
+            order_type="limit",
+            amount=0.1,
+            rate=20000,
+            time_in_force="gtc",
+            exit_reason="roi",
+            sell_reason="roi",
+            current_time=datetime.now(timezone.utc),
+            side=side,
+        )
+        is True
+    )
 
-    assert strategy.custom_stoploss(pair='ETH/BTC', trade=trade, current_time=datetime.now(),
-                                    current_rate=20_000, current_profit=0.05, after_fill=False
-                                    ) == strategy.stoploss
+    assert (
+        strategy.custom_stoploss(
+            pair="ETH/BTC",
+            trade=trade,
+            current_time=datetime.now(),
+            current_rate=20_000,
+            current_profit=0.05,
+            after_fill=False,
+        )
+        == strategy.stoploss
+    )
diff --git a/tests/strategy/test_interface.py b/tests/strategy/test_interface.py
index a53eead95..531d05c0c 100644
--- a/tests/strategy/test_interface.py
+++ b/tests/strategy/test_interface.py
@@ -18,12 +18,23 @@ from freqtrade.optimize.space import SKDecimal
 from freqtrade.persistence import PairLocks, Trade
 from freqtrade.resolvers import StrategyResolver
 from freqtrade.strategy.hyper import detect_parameters
-from freqtrade.strategy.parameters import (BaseParameter, BooleanParameter, CategoricalParameter,
-                                           DecimalParameter, IntParameter, RealParameter)
+from freqtrade.strategy.parameters import (
+    BaseParameter,
+    BooleanParameter,
+    CategoricalParameter,
+    DecimalParameter,
+    IntParameter,
+    RealParameter,
+)
 from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
 from freqtrade.util import dt_now
-from tests.conftest import (CURRENT_TEST_STRATEGY, TRADE_SIDES, create_mock_trades, log_has,
-                            log_has_re)
+from tests.conftest import (
+    CURRENT_TEST_STRATEGY,
+    TRADE_SIDES,
+    create_mock_trades,
+    log_has,
+    log_has_re,
+)
 
 from .strats.strategy_test_v3 import StrategyTestV3
 
@@ -34,167 +45,165 @@ _STRATEGY.dp = DataProvider({}, None, None)
 
 
 def test_returns_latest_signal(ohlcv_history):
-    ohlcv_history.loc[1, 'date'] = dt_now()
+    ohlcv_history.loc[1, "date"] = dt_now()
     # Take a copy to correctly modify the call
     mocked_history = ohlcv_history.copy()
-    mocked_history['enter_long'] = 0
-    mocked_history['exit_long'] = 0
-    mocked_history['enter_short'] = 0
-    mocked_history['exit_short'] = 0
+    mocked_history["enter_long"] = 0
+    mocked_history["exit_long"] = 0
+    mocked_history["enter_short"] = 0
+    mocked_history["exit_short"] = 0
     # Set tags in lines that don't matter to test nan in the sell line
-    mocked_history.loc[0, 'enter_tag'] = 'wrong_line'
-    mocked_history.loc[0, 'exit_tag'] = 'wrong_line'
-    mocked_history.loc[1, 'exit_long'] = 1
+    mocked_history.loc[0, "enter_tag"] = "wrong_line"
+    mocked_history.loc[0, "exit_tag"] = "wrong_line"
+    mocked_history.loc[1, "exit_long"] = 1
 
-    assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, True, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
-    mocked_history.loc[1, 'exit_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 1
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (None, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (False, True, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (False, False, None)
+    mocked_history.loc[1, "exit_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 1
 
-    assert _STRATEGY.get_entry_signal(
-        'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
-    mocked_history.loc[1, 'exit_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 0
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (
+        SignalDirection.LONG,
+        None,
+    )
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (True, False, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (False, False, None)
+    mocked_history.loc[1, "exit_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 0
 
-    assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
-    mocked_history.loc[1, 'exit_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 1
-    mocked_history.loc[1, 'enter_tag'] = 'buy_signal_01'
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (None, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (False, False, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (False, False, None)
+    mocked_history.loc[1, "exit_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 1
+    mocked_history.loc[1, "enter_tag"] = "buy_signal_01"
 
-    assert _STRATEGY.get_entry_signal(
-        'ETH/BTC', '5m', mocked_history) == (SignalDirection.LONG, 'buy_signal_01')
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (True, False, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (False, False, None)
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (
+        SignalDirection.LONG,
+        "buy_signal_01",
+    )
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (True, False, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (False, False, None)
 
-    mocked_history.loc[1, 'exit_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 0
-    mocked_history.loc[1, 'enter_short'] = 1
-    mocked_history.loc[1, 'exit_short'] = 0
-    mocked_history.loc[1, 'enter_tag'] = 'sell_signal_01'
+    mocked_history.loc[1, "exit_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 0
+    mocked_history.loc[1, "enter_short"] = 1
+    mocked_history.loc[1, "exit_short"] = 0
+    mocked_history.loc[1, "enter_tag"] = "sell_signal_01"
 
     # Don't provide short signal while in spot mode
-    assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (None, None)
 
-    _STRATEGY.config['trading_mode'] = 'futures'
+    _STRATEGY.config["trading_mode"] = "futures"
     # Short signal gets ignored as can_short is not set.
-    assert _STRATEGY.get_entry_signal('ETH/BTC', '5m', mocked_history) == (None, None)
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (None, None)
 
     _STRATEGY.can_short = True
 
-    assert _STRATEGY.get_entry_signal(
-        'ETH/BTC', '5m', mocked_history) == (SignalDirection.SHORT, 'sell_signal_01')
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history) == (False, False, None)
-    assert _STRATEGY.get_exit_signal('ETH/BTC', '5m', mocked_history, True) == (True, False, None)
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (
+        SignalDirection.SHORT,
+        "sell_signal_01",
+    )
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (False, False, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (True, False, None)
 
-    mocked_history.loc[1, 'enter_short'] = 0
-    mocked_history.loc[1, 'exit_short'] = 1
-    mocked_history.loc[1, 'exit_tag'] = 'sell_signal_02'
-    assert _STRATEGY.get_entry_signal(
-        'ETH/BTC', '5m', mocked_history) == (None, None)
-    assert _STRATEGY.get_exit_signal(
-        'ETH/BTC', '5m', mocked_history) == (False, False, 'sell_signal_02')
-    assert _STRATEGY.get_exit_signal(
-        'ETH/BTC', '5m', mocked_history, True) == (False, True, 'sell_signal_02')
+    mocked_history.loc[1, "enter_short"] = 0
+    mocked_history.loc[1, "exit_short"] = 1
+    mocked_history.loc[1, "exit_tag"] = "sell_signal_02"
+    assert _STRATEGY.get_entry_signal("ETH/BTC", "5m", mocked_history) == (None, None)
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history) == (
+        False,
+        False,
+        "sell_signal_02",
+    )
+    assert _STRATEGY.get_exit_signal("ETH/BTC", "5m", mocked_history, True) == (
+        False,
+        True,
+        "sell_signal_02",
+    )
 
     _STRATEGY.can_short = False
-    _STRATEGY.config['trading_mode'] = 'spot'
+    _STRATEGY.config["trading_mode"] = "spot"
 
 
 def test_analyze_pair_empty(mocker, caplog, ohlcv_history):
-    mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
-    mocker.patch.object(
-        _STRATEGY, '_analyze_ticker_internal',
-        return_value=DataFrame([])
-    )
-    mocker.patch.object(_STRATEGY, 'assert_df')
+    mocker.patch.object(_STRATEGY.dp, "ohlcv", return_value=ohlcv_history)
+    mocker.patch.object(_STRATEGY, "_analyze_ticker_internal", return_value=DataFrame([]))
+    mocker.patch.object(_STRATEGY, "assert_df")
 
-    _STRATEGY.analyze_pair('ETH/BTC')
+    _STRATEGY.analyze_pair("ETH/BTC")
 
-    assert log_has('Empty dataframe for pair ETH/BTC', caplog)
+    assert log_has("Empty dataframe for pair ETH/BTC", caplog)
 
 
 def test_get_signal_empty(default_conf, caplog):
     assert (None, None) == _STRATEGY.get_latest_candle(
-        'foo', default_conf['timeframe'], DataFrame()
+        "foo", default_conf["timeframe"], DataFrame()
     )
-    assert log_has('Empty candle (OHLCV) data for pair foo', caplog)
+    assert log_has("Empty candle (OHLCV) data for pair foo", caplog)
     caplog.clear()
 
-    assert (None, None) == _STRATEGY.get_latest_candle('bar', default_conf['timeframe'], None)
-    assert log_has('Empty candle (OHLCV) data for pair bar', caplog)
+    assert (None, None) == _STRATEGY.get_latest_candle("bar", default_conf["timeframe"], None)
+    assert log_has("Empty candle (OHLCV) data for pair bar", caplog)
     caplog.clear()
 
     assert (None, None) == _STRATEGY.get_latest_candle(
-        'baz',
-        default_conf['timeframe'],
-        DataFrame([])
+        "baz", default_conf["timeframe"], DataFrame([])
     )
-    assert log_has('Empty candle (OHLCV) data for pair baz', caplog)
+    assert log_has("Empty candle (OHLCV) data for pair baz", caplog)
 
 
 def test_get_signal_exception_valueerror(mocker, caplog, ohlcv_history):
     caplog.set_level(logging.INFO)
-    mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
-    mocker.patch.object(
-        _STRATEGY, '_analyze_ticker_internal',
-        side_effect=ValueError('xyz')
-    )
-    _STRATEGY.analyze_pair('foo')
-    assert log_has_re(r'Strategy caused the following exception: xyz.*', caplog)
+    mocker.patch.object(_STRATEGY.dp, "ohlcv", return_value=ohlcv_history)
+    mocker.patch.object(_STRATEGY, "_analyze_ticker_internal", side_effect=ValueError("xyz"))
+    _STRATEGY.analyze_pair("foo")
+    assert log_has_re(r"Strategy caused the following exception: xyz.*", caplog)
     caplog.clear()
 
     mocker.patch.object(
-        _STRATEGY, 'analyze_ticker',
-        side_effect=Exception('invalid ticker history ')
+        _STRATEGY, "analyze_ticker", side_effect=Exception("invalid ticker history ")
     )
-    _STRATEGY.analyze_pair('foo')
-    assert log_has_re(r'Strategy caused the following exception: xyz.*', caplog)
+    _STRATEGY.analyze_pair("foo")
+    assert log_has_re(r"Strategy caused the following exception: xyz.*", caplog)
 
 
 def test_get_signal_old_dataframe(default_conf, mocker, caplog, ohlcv_history):
     # default_conf defines a 5m interval. we check interval * 2 + 5m
     # this is necessary as the last candle is removed (partial candles) by default
-    ohlcv_history.loc[1, 'date'] = dt_now() - timedelta(minutes=16)
+    ohlcv_history.loc[1, "date"] = dt_now() - timedelta(minutes=16)
     # Take a copy to correctly modify the call
     mocked_history = ohlcv_history.copy()
-    mocked_history['exit_long'] = 0
-    mocked_history['enter_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 1
+    mocked_history["exit_long"] = 0
+    mocked_history["enter_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 1
 
     caplog.set_level(logging.INFO)
-    mocker.patch.object(_STRATEGY, 'assert_df')
+    mocker.patch.object(_STRATEGY, "assert_df")
 
     assert (None, None) == _STRATEGY.get_latest_candle(
-        'xyz',
-        default_conf['timeframe'],
-        mocked_history
+        "xyz", default_conf["timeframe"], mocked_history
     )
-    assert log_has('Outdated history for pair xyz. Last tick is 16 minutes old', caplog)
+    assert log_has("Outdated history for pair xyz. Last tick is 16 minutes old", caplog)
 
 
 def test_get_signal_no_sell_column(default_conf, mocker, caplog, ohlcv_history):
     # default_conf defines a 5m interval. we check interval * 2 + 5m
     # this is necessary as the last candle is removed (partial candles) by default
-    ohlcv_history.loc[1, 'date'] = dt_now()
+    ohlcv_history.loc[1, "date"] = dt_now()
     # Take a copy to correctly modify the call
     mocked_history = ohlcv_history.copy()
     # Intentionally don't set sell column
     # mocked_history['sell'] = 0
-    mocked_history['enter_long'] = 0
-    mocked_history.loc[1, 'enter_long'] = 1
+    mocked_history["enter_long"] = 0
+    mocked_history.loc[1, "enter_long"] = 1
 
     caplog.set_level(logging.INFO)
-    mocker.patch.object(_STRATEGY, 'assert_df')
+    mocker.patch.object(_STRATEGY, "assert_df")
 
     assert (SignalDirection.LONG, None) == _STRATEGY.get_entry_signal(
-        'xyz',
-        default_conf['timeframe'],
-        mocked_history
+        "xyz", default_conf["timeframe"], mocked_history
     )
 
 
@@ -206,77 +215,102 @@ def test_ignore_expired_candle(default_conf):
     # Add 1 candle length as the "latest date" defines candle open.
     current_time = latest_date + timedelta(seconds=80 + 300)
 
-    assert strategy.ignore_expired_candle(
-        latest_date=latest_date,
-        current_time=current_time,
-        timeframe_seconds=300,
-        enter=True
-    ) is True
+    assert (
+        strategy.ignore_expired_candle(
+            latest_date=latest_date, current_time=current_time, timeframe_seconds=300, enter=True
+        )
+        is True
+    )
 
     current_time = latest_date + timedelta(seconds=30 + 300)
 
-    assert strategy.ignore_expired_candle(
-        latest_date=latest_date,
-        current_time=current_time,
-        timeframe_seconds=300,
-        enter=True
-    ) is not True
+    assert (
+        strategy.ignore_expired_candle(
+            latest_date=latest_date, current_time=current_time, timeframe_seconds=300, enter=True
+        )
+        is not True
+    )
 
 
 def test_assert_df_raise(mocker, caplog, ohlcv_history):
-    ohlcv_history.loc[1, 'date'] = dt_now() - timedelta(minutes=16)
+    ohlcv_history.loc[1, "date"] = dt_now() - timedelta(minutes=16)
     # Take a copy to correctly modify the call
     mocked_history = ohlcv_history.copy()
-    mocked_history['sell'] = 0
-    mocked_history['buy'] = 0
-    mocked_history.loc[1, 'buy'] = 1
+    mocked_history["sell"] = 0
+    mocked_history["buy"] = 0
+    mocked_history.loc[1, "buy"] = 1
 
     caplog.set_level(logging.INFO)
-    mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
-    mocker.patch.object(_STRATEGY.dp, 'get_analyzed_dataframe', return_value=(mocked_history, 0))
-    mocker.patch.object(
-        _STRATEGY, 'assert_df',
-        side_effect=StrategyError('Dataframe returned...')
+    mocker.patch.object(_STRATEGY.dp, "ohlcv", return_value=ohlcv_history)
+    mocker.patch.object(_STRATEGY.dp, "get_analyzed_dataframe", return_value=(mocked_history, 0))
+    mocker.patch.object(_STRATEGY, "assert_df", side_effect=StrategyError("Dataframe returned..."))
+    _STRATEGY.analyze_pair("xyz")
+    assert log_has(
+        "Unable to analyze candle (OHLCV) data for pair xyz: Dataframe returned...", caplog
     )
-    _STRATEGY.analyze_pair('xyz')
-    assert log_has('Unable to analyze candle (OHLCV) data for pair xyz: Dataframe returned...',
-                   caplog)
 
 
 def test_assert_df(ohlcv_history, caplog):
     df_len = len(ohlcv_history) - 1
-    ohlcv_history.loc[:, 'enter_long'] = 0
-    ohlcv_history.loc[:, 'exit_long'] = 0
+    ohlcv_history.loc[:, "enter_long"] = 0
+    ohlcv_history.loc[:, "exit_long"] = 0
     # Ensure it's running when passed correctly
-    _STRATEGY.assert_df(ohlcv_history, len(ohlcv_history),
-                        ohlcv_history.loc[df_len, 'close'], ohlcv_history.loc[df_len, 'date'])
+    _STRATEGY.assert_df(
+        ohlcv_history,
+        len(ohlcv_history),
+        ohlcv_history.loc[df_len, "close"],
+        ohlcv_history.loc[df_len, "date"],
+    )
 
     with pytest.raises(StrategyError, match=r"Dataframe returned from strategy.*length\."):
-        _STRATEGY.assert_df(ohlcv_history, len(ohlcv_history) + 1,
-                            ohlcv_history.loc[df_len, 'close'], ohlcv_history.loc[df_len, 'date'])
+        _STRATEGY.assert_df(
+            ohlcv_history,
+            len(ohlcv_history) + 1,
+            ohlcv_history.loc[df_len, "close"],
+            ohlcv_history.loc[df_len, "date"],
+        )
 
-    with pytest.raises(StrategyError,
-                       match=r"Dataframe returned from strategy.*last close price\."):
-        _STRATEGY.assert_df(ohlcv_history, len(ohlcv_history),
-                            ohlcv_history.loc[df_len, 'close'] + 0.01,
-                            ohlcv_history.loc[df_len, 'date'])
-    with pytest.raises(StrategyError,
-                       match=r"Dataframe returned from strategy.*last date\."):
-        _STRATEGY.assert_df(ohlcv_history, len(ohlcv_history),
-                            ohlcv_history.loc[df_len, 'close'], ohlcv_history.loc[0, 'date'])
-    with pytest.raises(StrategyError,
-                       match=r"No dataframe returned \(return statement missing\?\)."):
-        _STRATEGY.assert_df(None, len(ohlcv_history),
-                            ohlcv_history.loc[df_len, 'close'], ohlcv_history.loc[0, 'date'])
-    with pytest.raises(StrategyError,
-                       match="enter_long/buy column not set."):
-        _STRATEGY.assert_df(ohlcv_history.drop('enter_long', axis=1), len(ohlcv_history),
-                            ohlcv_history.loc[df_len, 'close'], ohlcv_history.loc[0, 'date'])
+    with pytest.raises(
+        StrategyError, match=r"Dataframe returned from strategy.*last close price\."
+    ):
+        _STRATEGY.assert_df(
+            ohlcv_history,
+            len(ohlcv_history),
+            ohlcv_history.loc[df_len, "close"] + 0.01,
+            ohlcv_history.loc[df_len, "date"],
+        )
+    with pytest.raises(StrategyError, match=r"Dataframe returned from strategy.*last date\."):
+        _STRATEGY.assert_df(
+            ohlcv_history,
+            len(ohlcv_history),
+            ohlcv_history.loc[df_len, "close"],
+            ohlcv_history.loc[0, "date"],
+        )
+    with pytest.raises(
+        StrategyError, match=r"No dataframe returned \(return statement missing\?\)."
+    ):
+        _STRATEGY.assert_df(
+            None,
+            len(ohlcv_history),
+            ohlcv_history.loc[df_len, "close"],
+            ohlcv_history.loc[0, "date"],
+        )
+    with pytest.raises(StrategyError, match="enter_long/buy column not set."):
+        _STRATEGY.assert_df(
+            ohlcv_history.drop("enter_long", axis=1),
+            len(ohlcv_history),
+            ohlcv_history.loc[df_len, "close"],
+            ohlcv_history.loc[0, "date"],
+        )
 
     _STRATEGY.disable_dataframe_checks = True
     caplog.clear()
-    _STRATEGY.assert_df(ohlcv_history, len(ohlcv_history),
-                        ohlcv_history.loc[2, 'close'], ohlcv_history.loc[0, 'date'])
+    _STRATEGY.assert_df(
+        ohlcv_history,
+        len(ohlcv_history),
+        ohlcv_history.loc[2, "close"],
+        ohlcv_history.loc[0, "date"],
+    )
     assert log_has_re(r"Dataframe returned from strategy.*last date\.", caplog)
     # reset to avoid problems in other tests due to test leakage
     _STRATEGY.disable_dataframe_checks = False
@@ -285,26 +319,24 @@ def test_assert_df(ohlcv_history, caplog):
 def test_advise_all_indicators(default_conf, testdatadir) -> None:
     strategy = StrategyResolver.load_strategy(default_conf)
 
-    timerange = TimeRange.parse_timerange('1510694220-1510700340')
-    data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
-                     fill_up_missing=True)
+    timerange = TimeRange.parse_timerange("1510694220-1510700340")
+    data = load_data(testdatadir, "1m", ["UNITTEST/BTC"], timerange=timerange, fill_up_missing=True)
     processed = strategy.advise_all_indicators(data)
-    assert len(processed['UNITTEST/BTC']) == 103
+    assert len(processed["UNITTEST/BTC"]) == 103
 
 
 def test_freqai_not_initialized(default_conf) -> None:
     strategy = StrategyResolver.load_strategy(default_conf)
     strategy.ft_bot_start()
-    with pytest.raises(OperationalException, match=r'freqAI is not enabled\.'):
+    with pytest.raises(OperationalException, match=r"freqAI is not enabled\."):
         strategy.freqai.start()
 
 
 def test_advise_all_indicators_copy(mocker, default_conf, testdatadir) -> None:
     strategy = StrategyResolver.load_strategy(default_conf)
-    aimock = mocker.patch('freqtrade.strategy.interface.IStrategy.advise_indicators')
-    timerange = TimeRange.parse_timerange('1510694220-1510700340')
-    data = load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange,
-                     fill_up_missing=True)
+    aimock = mocker.patch("freqtrade.strategy.interface.IStrategy.advise_indicators")
+    timerange = TimeRange.parse_timerange("1510694220-1510700340")
+    data = load_data(testdatadir, "1m", ["UNITTEST/BTC"], timerange=timerange, fill_up_missing=True)
     strategy.advise_all_indicators(data)
     assert aimock.call_count == 1
     # Ensure that a copy of the dataframe is passed to advice_indicators
@@ -312,21 +344,19 @@ def test_advise_all_indicators_copy(mocker, default_conf, testdatadir) -> None:
 
 
 def test_min_roi_reached(default_conf, fee) -> None:
-
     # Use list to confirm sequence does not matter
-    min_roi_list = [{20: 0.05, 55: 0.01, 0: 0.1},
-                    {0: 0.1, 20: 0.05, 55: 0.01}]
+    min_roi_list = [{20: 0.05, 55: 0.01, 0: 0.1}, {0: 0.1, 20: 0.05, 55: 0.01}]
     for roi in min_roi_list:
         strategy = StrategyResolver.load_strategy(default_conf)
         strategy.minimal_roi = roi
         trade = Trade(
-            pair='ETH/BTC',
+            pair="ETH/BTC",
             stake_amount=0.001,
             amount=5,
             open_date=dt_now() - timedelta(hours=1),
             fee_open=fee.return_value,
             fee_close=fee.return_value,
-            exchange='binance',
+            exchange="binance",
             open_rate=1,
         )
 
@@ -341,30 +371,22 @@ def test_min_roi_reached(default_conf, fee) -> None:
 
 
 def test_min_roi_reached2(default_conf, fee) -> None:
-
     # test with ROI raising after last interval
-    min_roi_list = [{20: 0.07,
-                     30: 0.05,
-                     55: 0.30,
-                     0: 0.1
-                     },
-                    {0: 0.1,
-                     20: 0.07,
-                     30: 0.05,
-                     55: 0.30
-                     },
-                    ]
+    min_roi_list = [
+        {20: 0.07, 30: 0.05, 55: 0.30, 0: 0.1},
+        {0: 0.1, 20: 0.07, 30: 0.05, 55: 0.30},
+    ]
     for roi in min_roi_list:
         strategy = StrategyResolver.load_strategy(default_conf)
         strategy.minimal_roi = roi
         trade = Trade(
-            pair='ETH/BTC',
+            pair="ETH/BTC",
             stake_amount=0.001,
             amount=5,
             open_date=dt_now() - timedelta(hours=1),
             fee_open=fee.return_value,
             fee_close=fee.return_value,
-            exchange='binance',
+            exchange="binance",
             open_rate=1,
         )
 
@@ -383,22 +405,22 @@ def test_min_roi_reached2(default_conf, fee) -> None:
 
 
 def test_min_roi_reached3(default_conf, fee) -> None:
-
     # test for issue #1948
-    min_roi = {20: 0.07,
-               30: 0.05,
-               55: 0.30,
-               }
+    min_roi = {
+        20: 0.07,
+        30: 0.05,
+        55: 0.30,
+    }
     strategy = StrategyResolver.load_strategy(default_conf)
     strategy.minimal_roi = min_roi
     trade = Trade(
-        pair='ETH/BTC',
+        pair="ETH/BTC",
         stake_amount=0.001,
         amount=5,
         open_date=dt_now() - timedelta(hours=1),
         fee_open=fee.return_value,
         fee_close=fee.return_value,
-        exchange='binance',
+        exchange="binance",
         open_rate=1,
     )
 
@@ -417,42 +439,117 @@ def test_min_roi_reached3(default_conf, fee) -> None:
 
 
 @pytest.mark.parametrize(
-    'profit,adjusted,expected,liq,trailing,custom,profit2,adjusted2,expected2,custom_stop', [
+    "profit,adjusted,expected,liq,trailing,custom,profit2,adjusted2,expected2,custom_stop",
+    [
         # Profit, adjusted stoploss(absolute), profit for 2nd call, enable trailing,
         #   enable custom stoploss, expected after 1st call, expected after 2nd call
         (0.2, 0.9, ExitType.NONE, None, False, False, 0.3, 0.9, ExitType.NONE, None),
         (0.2, 0.9, ExitType.NONE, None, False, False, -0.2, 0.9, ExitType.STOP_LOSS, None),
         (0.2, 0.9, ExitType.NONE, 0.92, False, False, -0.09, 0.9, ExitType.LIQUIDATION, None),
-        (0.2, 1.14, ExitType.NONE, None, True, False, 0.05, 1.14, ExitType.TRAILING_STOP_LOSS,
-         None),
+        (
+            0.2,
+            1.14,
+            ExitType.NONE,
+            None,
+            True,
+            False,
+            0.05,
+            1.14,
+            ExitType.TRAILING_STOP_LOSS,
+            None,
+        ),
         (0.01, 0.96, ExitType.NONE, None, True, False, 0.05, 1, ExitType.NONE, None),
         (0.05, 1, ExitType.NONE, None, True, False, -0.01, 1, ExitType.TRAILING_STOP_LOSS, None),
         # Default custom case - trails with 10%
         (0.05, 0.95, ExitType.NONE, None, False, True, -0.02, 0.95, ExitType.NONE, None),
-        (0.05, 0.95, ExitType.NONE, None, False, True, -0.06, 0.95, ExitType.TRAILING_STOP_LOSS,
-         None),
-        (0.05, 1, ExitType.NONE, None, False, True, -0.06, 1, ExitType.TRAILING_STOP_LOSS,
-         lambda **kwargs: -0.05),
-        (0.05, 1, ExitType.NONE, None, False, True, 0.09, 1.04, ExitType.NONE,
-         lambda **kwargs: -0.05),
-        (0.05, 0.95, ExitType.NONE, None, False, True, 0.09, 0.98, ExitType.NONE,
-         lambda current_profit, **kwargs: -0.1 if current_profit < 0.6 else -(current_profit * 2)),
+        (
+            0.05,
+            0.95,
+            ExitType.NONE,
+            None,
+            False,
+            True,
+            -0.06,
+            0.95,
+            ExitType.TRAILING_STOP_LOSS,
+            None,
+        ),
+        (
+            0.05,
+            1,
+            ExitType.NONE,
+            None,
+            False,
+            True,
+            -0.06,
+            1,
+            ExitType.TRAILING_STOP_LOSS,
+            lambda **kwargs: -0.05,
+        ),
+        (
+            0.05,
+            1,
+            ExitType.NONE,
+            None,
+            False,
+            True,
+            0.09,
+            1.04,
+            ExitType.NONE,
+            lambda **kwargs: -0.05,
+        ),
+        (
+            0.05,
+            0.95,
+            ExitType.NONE,
+            None,
+            False,
+            True,
+            0.09,
+            0.98,
+            ExitType.NONE,
+            lambda current_profit, **kwargs: (
+                -0.1 if current_profit < 0.6 else -(current_profit * 2)
+            ),
+        ),
         # Error case - static stoploss in place
-        (0.05, 0.9, ExitType.NONE, None, False, True, 0.09, 0.9, ExitType.NONE,
-         lambda **kwargs: None),
-    ])
-def test_ft_stoploss_reached(default_conf, fee, profit, adjusted, expected, liq, trailing, custom,
-                             profit2, adjusted2, expected2, custom_stop) -> None:
-
+        (
+            0.05,
+            0.9,
+            ExitType.NONE,
+            None,
+            False,
+            True,
+            0.09,
+            0.9,
+            ExitType.NONE,
+            lambda **kwargs: None,
+        ),
+    ],
+)
+def test_ft_stoploss_reached(
+    default_conf,
+    fee,
+    profit,
+    adjusted,
+    expected,
+    liq,
+    trailing,
+    custom,
+    profit2,
+    adjusted2,
+    expected2,
+    custom_stop,
+) -> None:
     strategy = StrategyResolver.load_strategy(default_conf)
     trade = Trade(
-        pair='ETH/BTC',
+        pair="ETH/BTC",
         stake_amount=0.01,
         amount=1,
         open_date=dt_now() - timedelta(hours=1),
         fee_open=fee.return_value,
         fee_close=fee.return_value,
-        exchange='binance',
+        exchange="binance",
         open_rate=1,
         liquidation_price=liq,
     )
@@ -466,9 +563,14 @@ def test_ft_stoploss_reached(default_conf, fee, profit, adjusted, expected, liq,
 
     now = dt_now()
     current_rate = trade.open_rate * (1 + profit)
-    sl_flag = strategy.ft_stoploss_reached(current_rate=current_rate, trade=trade,
-                                           current_time=now, current_profit=profit,
-                                           force_stoploss=0, high=None)
+    sl_flag = strategy.ft_stoploss_reached(
+        current_rate=current_rate,
+        trade=trade,
+        current_time=now,
+        current_profit=profit,
+        force_stoploss=0,
+        high=None,
+    )
     assert isinstance(sl_flag, ExitCheckTuple)
     assert sl_flag.exit_type == expected
     if expected == ExitType.NONE:
@@ -478,9 +580,14 @@ def test_ft_stoploss_reached(default_conf, fee, profit, adjusted, expected, liq,
     assert round(trade.stop_loss, 2) == adjusted
     current_rate2 = trade.open_rate * (1 + profit2)
 
-    sl_flag = strategy.ft_stoploss_reached(current_rate=current_rate2, trade=trade,
-                                           current_time=now, current_profit=profit2,
-                                           force_stoploss=0, high=None)
+    sl_flag = strategy.ft_stoploss_reached(
+        current_rate=current_rate2,
+        trade=trade,
+        current_time=now,
+        current_profit=profit2,
+        force_stoploss=0,
+        high=None,
+    )
     assert sl_flag.exit_type == expected2
     if expected2 == ExitType.NONE:
         assert sl_flag.exit_flag is False
@@ -492,159 +599,145 @@ def test_ft_stoploss_reached(default_conf, fee, profit, adjusted, expected, liq,
 
 
 def test_custom_exit(default_conf, fee, caplog) -> None:
-
     strategy = StrategyResolver.load_strategy(default_conf)
     trade = Trade(
-        pair='ETH/BTC',
+        pair="ETH/BTC",
         stake_amount=0.01,
         amount=1,
         open_date=dt_now() - timedelta(hours=1),
         fee_open=fee.return_value,
         fee_close=fee.return_value,
-        exchange='binance',
+        exchange="binance",
         open_rate=1,
         leverage=1.0,
     )
 
     now = dt_now()
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
 
     assert res == []
 
     strategy.custom_exit = MagicMock(return_value=True)
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert res[0].exit_flag is True
     assert res[0].exit_type == ExitType.CUSTOM_EXIT
-    assert res[0].exit_reason == 'custom_exit'
+    assert res[0].exit_reason == "custom_exit"
 
-    strategy.custom_exit = MagicMock(return_value='hello world')
+    strategy.custom_exit = MagicMock(return_value="hello world")
 
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert res[0].exit_type == ExitType.CUSTOM_EXIT
     assert res[0].exit_flag is True
-    assert res[0].exit_reason == 'hello world'
+    assert res[0].exit_reason == "hello world"
 
     caplog.clear()
-    strategy.custom_exit = MagicMock(return_value='h' * CUSTOM_TAG_MAX_LENGTH * 2)
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    strategy.custom_exit = MagicMock(return_value="h" * CUSTOM_TAG_MAX_LENGTH * 2)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert res[0].exit_type == ExitType.CUSTOM_EXIT
     assert res[0].exit_flag is True
-    assert res[0].exit_reason == 'h' * (CUSTOM_TAG_MAX_LENGTH)
-    assert log_has_re('Custom exit reason returned from custom_exit is too long.*', caplog)
+    assert res[0].exit_reason == "h" * (CUSTOM_TAG_MAX_LENGTH)
+    assert log_has_re("Custom exit reason returned from custom_exit is too long.*", caplog)
 
 
 def test_should_sell(default_conf, fee) -> None:
-
     strategy = StrategyResolver.load_strategy(default_conf)
     trade = Trade(
-        pair='ETH/BTC',
+        pair="ETH/BTC",
         stake_amount=0.01,
         amount=1,
         open_date=dt_now() - timedelta(hours=1),
         fee_open=fee.return_value,
         fee_close=fee.return_value,
-        exchange='binance',
+        exchange="binance",
         open_rate=1,
         leverage=1.0,
     )
     now = dt_now()
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
 
     assert res == []
     strategy.min_roi_reached = MagicMock(return_value=True)
 
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert len(res) == 1
     assert res == [ExitCheckTuple(exit_type=ExitType.ROI)]
 
     strategy.min_roi_reached = MagicMock(return_value=True)
     strategy.ft_stoploss_reached = MagicMock(
-        return_value=ExitCheckTuple(exit_type=ExitType.STOP_LOSS))
+        return_value=ExitCheckTuple(exit_type=ExitType.STOP_LOSS)
+    )
 
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert len(res) == 2
     assert res == [
         ExitCheckTuple(exit_type=ExitType.STOP_LOSS),
         ExitCheckTuple(exit_type=ExitType.ROI),
-        ]
+    ]
 
-    strategy.custom_exit = MagicMock(return_value='hello world')
+    strategy.custom_exit = MagicMock(return_value="hello world")
     # custom-exit and exit-signal is first
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=False,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=False, low=None, high=None)
     assert len(res) == 3
     assert res == [
-        ExitCheckTuple(exit_type=ExitType.CUSTOM_EXIT, exit_reason='hello world'),
+        ExitCheckTuple(exit_type=ExitType.CUSTOM_EXIT, exit_reason="hello world"),
         ExitCheckTuple(exit_type=ExitType.STOP_LOSS),
         ExitCheckTuple(exit_type=ExitType.ROI),
-        ]
+    ]
 
     strategy.ft_stoploss_reached = MagicMock(
-            return_value=ExitCheckTuple(exit_type=ExitType.TRAILING_STOP_LOSS))
+        return_value=ExitCheckTuple(exit_type=ExitType.TRAILING_STOP_LOSS)
+    )
     # Regular exit signal
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=True,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=True, low=None, high=None)
     assert len(res) == 3
     assert res == [
         ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL),
         ExitCheckTuple(exit_type=ExitType.ROI),
         ExitCheckTuple(exit_type=ExitType.TRAILING_STOP_LOSS),
-        ]
+    ]
 
     # Regular exit signal, no ROI
     strategy.min_roi_reached = MagicMock(return_value=False)
-    res = strategy.should_exit(trade, 1, now,
-                               enter=False, exit_=True,
-                               low=None, high=None)
+    res = strategy.should_exit(trade, 1, now, enter=False, exit_=True, low=None, high=None)
     assert len(res) == 2
     assert res == [
         ExitCheckTuple(exit_type=ExitType.EXIT_SIGNAL),
         ExitCheckTuple(exit_type=ExitType.TRAILING_STOP_LOSS),
-        ]
+    ]
 
 
-@pytest.mark.parametrize('side', TRADE_SIDES)
+@pytest.mark.parametrize("side", TRADE_SIDES)
 def test_leverage_callback(default_conf, side) -> None:
-    default_conf['strategy'] = 'StrategyTestV2'
+    default_conf["strategy"] = "StrategyTestV2"
     strategy = StrategyResolver.load_strategy(default_conf)
 
-    assert strategy.leverage(
-        pair='XRP/USDT',
-        current_time=datetime.now(timezone.utc),
-        current_rate=2.2,
-        proposed_leverage=1.0,
-        max_leverage=5.0,
-        side=side,
-        entry_tag=None,
-        ) == 1
+    assert (
+        strategy.leverage(
+            pair="XRP/USDT",
+            current_time=datetime.now(timezone.utc),
+            current_rate=2.2,
+            proposed_leverage=1.0,
+            max_leverage=5.0,
+            side=side,
+            entry_tag=None,
+        )
+        == 1
+    )
 
-    default_conf['strategy'] = CURRENT_TEST_STRATEGY
+    default_conf["strategy"] = CURRENT_TEST_STRATEGY
     strategy = StrategyResolver.load_strategy(default_conf)
-    assert strategy.leverage(
-        pair='XRP/USDT',
-        current_time=datetime.now(timezone.utc),
-        current_rate=2.2,
-        proposed_leverage=1.0,
-        max_leverage=5.0,
-        side=side,
-        entry_tag='entry_tag_test',
-        ) == 3
+    assert (
+        strategy.leverage(
+            pair="XRP/USDT",
+            current_time=datetime.now(timezone.utc),
+            current_rate=2.2,
+            proposed_leverage=1.0,
+            max_leverage=5.0,
+            side=side,
+            entry_tag="entry_tag_test",
+        )
+        == 3
+    )
 
 
 def test_analyze_ticker_default(ohlcv_history, mocker, caplog) -> None:
@@ -653,29 +746,28 @@ def test_analyze_ticker_default(ohlcv_history, mocker, caplog) -> None:
     entry_mock = MagicMock(side_effect=lambda x, meta: x)
     exit_mock = MagicMock(side_effect=lambda x, meta: x)
     mocker.patch.multiple(
-        'freqtrade.strategy.interface.IStrategy',
+        "freqtrade.strategy.interface.IStrategy",
         advise_indicators=ind_mock,
         advise_entry=entry_mock,
         advise_exit=exit_mock,
-
     )
     strategy = StrategyTestV3({})
-    strategy.analyze_ticker(ohlcv_history, {'pair': 'ETH/BTC'})
+    strategy.analyze_ticker(ohlcv_history, {"pair": "ETH/BTC"})
     assert ind_mock.call_count == 1
     assert entry_mock.call_count == 1
     assert entry_mock.call_count == 1
 
-    assert log_has('TA Analysis Launched', caplog)
-    assert not log_has('Skipping TA Analysis for already analyzed candle', caplog)
+    assert log_has("TA Analysis Launched", caplog)
+    assert not log_has("Skipping TA Analysis for already analyzed candle", caplog)
     caplog.clear()
 
-    strategy.analyze_ticker(ohlcv_history, {'pair': 'ETH/BTC'})
+    strategy.analyze_ticker(ohlcv_history, {"pair": "ETH/BTC"})
     # No analysis happens as process_only_new_candles is true
     assert ind_mock.call_count == 2
     assert entry_mock.call_count == 2
     assert entry_mock.call_count == 2
-    assert log_has('TA Analysis Launched', caplog)
-    assert not log_has('Skipping TA Analysis for already analyzed candle', caplog)
+    assert log_has("TA Analysis Launched", caplog)
+    assert not log_has("Skipping TA Analysis for already analyzed candle", caplog)
 
 
 def test__analyze_ticker_internal_skip_analyze(ohlcv_history, mocker, caplog) -> None:
@@ -684,65 +776,64 @@ def test__analyze_ticker_internal_skip_analyze(ohlcv_history, mocker, caplog) ->
     entry_mock = MagicMock(side_effect=lambda x, meta: x)
     exit_mock = MagicMock(side_effect=lambda x, meta: x)
     mocker.patch.multiple(
-        'freqtrade.strategy.interface.IStrategy',
+        "freqtrade.strategy.interface.IStrategy",
         advise_indicators=ind_mock,
         advise_entry=entry_mock,
         advise_exit=exit_mock,
-
     )
     strategy = StrategyTestV3({})
     strategy.dp = DataProvider({}, None, None)
     strategy.process_only_new_candles = True
 
-    ret = strategy._analyze_ticker_internal(ohlcv_history, {'pair': 'ETH/BTC'})
-    assert 'high' in ret.columns
-    assert 'low' in ret.columns
-    assert 'close' in ret.columns
+    ret = strategy._analyze_ticker_internal(ohlcv_history, {"pair": "ETH/BTC"})
+    assert "high" in ret.columns
+    assert "low" in ret.columns
+    assert "close" in ret.columns
     assert isinstance(ret, DataFrame)
     assert ind_mock.call_count == 1
     assert entry_mock.call_count == 1
     assert entry_mock.call_count == 1
-    assert log_has('TA Analysis Launched', caplog)
-    assert not log_has('Skipping TA Analysis for already analyzed candle', caplog)
+    assert log_has("TA Analysis Launched", caplog)
+    assert not log_has("Skipping TA Analysis for already analyzed candle", caplog)
     caplog.clear()
 
-    ret = strategy._analyze_ticker_internal(ohlcv_history, {'pair': 'ETH/BTC'})
+    ret = strategy._analyze_ticker_internal(ohlcv_history, {"pair": "ETH/BTC"})
     # No analysis happens as process_only_new_candles is true
     assert ind_mock.call_count == 1
     assert entry_mock.call_count == 1
     assert entry_mock.call_count == 1
     # only skipped analyze adds buy and sell columns, otherwise it's all mocked
-    assert 'enter_long' in ret.columns
-    assert 'exit_long' in ret.columns
-    assert ret['enter_long'].sum() == 0
-    assert ret['exit_long'].sum() == 0
-    assert not log_has('TA Analysis Launched', caplog)
-    assert log_has('Skipping TA Analysis for already analyzed candle', caplog)
+    assert "enter_long" in ret.columns
+    assert "exit_long" in ret.columns
+    assert ret["enter_long"].sum() == 0
+    assert ret["exit_long"].sum() == 0
+    assert not log_has("TA Analysis Launched", caplog)
+    assert log_has("Skipping TA Analysis for already analyzed candle", caplog)
 
 
 @pytest.mark.usefixtures("init_persistence")
 def test_is_pair_locked(default_conf):
-    PairLocks.timeframe = default_conf['timeframe']
+    PairLocks.timeframe = default_conf["timeframe"]
     PairLocks.use_db = True
     strategy = StrategyResolver.load_strategy(default_conf)
     # No lock should be present
     assert len(PairLocks.get_pair_locks(None)) == 0
 
-    pair = 'ETH/BTC'
+    pair = "ETH/BTC"
     assert not strategy.is_pair_locked(pair)
     strategy.lock_pair(pair, dt_now() + timedelta(minutes=4))
     # ETH/BTC locked for 4 minutes
     assert strategy.is_pair_locked(pair)
 
     # XRP/BTC should not be locked now
-    pair = 'XRP/BTC'
+    pair = "XRP/BTC"
     assert not strategy.is_pair_locked(pair)
 
     # Unlocking a pair that's not locked should not raise an error
     strategy.unlock_pair(pair)
 
     # Unlock original pair
-    pair = 'ETH/BTC'
+    pair = "ETH/BTC"
     strategy.unlock_pair(pair)
     assert not strategy.is_pair_locked(pair)
 
@@ -753,7 +844,7 @@ def test_is_pair_locked(default_conf):
     strategy.unlock_reason(reason)
     assert not strategy.is_pair_locked(pair)
 
-    pair = 'BTC/USDT'
+    pair = "BTC/USDT"
     # Lock until 14:30
     lock_time = datetime(2020, 5, 1, 14, 30, 0, tzinfo=timezone.utc)
     # Subtract 2 seconds, as locking rounds up to the next candle.
@@ -772,7 +863,7 @@ def test_is_pair_locked(default_conf):
     assert not strategy.is_pair_locked(pair, candle_date=lock_time + timedelta(minutes=10))
 
     # Change timeframe to 15m
-    strategy.timeframe = '15m'
+    strategy.timeframe = "15m"
     # Candle from 14:14 - lock goes until 14:30
     assert strategy.is_pair_locked(pair, candle_date=lock_time + timedelta(minutes=-16))
     assert strategy.is_pair_locked(pair, candle_date=lock_time + timedelta(minutes=-15, seconds=-2))
@@ -781,45 +872,48 @@ def test_is_pair_locked(default_conf):
 
 
 def test_is_informative_pairs_callback(default_conf):
-    default_conf.update({'strategy': 'StrategyTestV2'})
+    default_conf.update({"strategy": "StrategyTestV2"})
     strategy = StrategyResolver.load_strategy(default_conf)
     # Should return empty
     # Uses fallback to base implementation
     assert [] == strategy.gather_informative_pairs()
 
 
-@pytest.mark.parametrize('error', [
-    ValueError, KeyError, Exception,
-])
+@pytest.mark.parametrize(
+    "error",
+    [
+        ValueError,
+        KeyError,
+        Exception,
+    ],
+)
 def test_strategy_safe_wrapper_error(caplog, error):
     def failing_method():
-        raise error('This is an error.')
+        raise error("This is an error.")
 
-    with pytest.raises(StrategyError, match=r'This is an error.'):
-        strategy_safe_wrapper(failing_method, message='DeadBeef')()
+    with pytest.raises(StrategyError, match=r"This is an error."):
+        strategy_safe_wrapper(failing_method, message="DeadBeef")()
 
-    assert log_has_re(r'DeadBeef.*', caplog)
-    ret = strategy_safe_wrapper(failing_method, message='DeadBeef', default_retval=True)()
+    assert log_has_re(r"DeadBeef.*", caplog)
+    ret = strategy_safe_wrapper(failing_method, message="DeadBeef", default_retval=True)()
 
     assert isinstance(ret, bool)
     assert ret
 
     caplog.clear()
     # Test suppressing error
-    ret = strategy_safe_wrapper(failing_method, message='DeadBeef', supress_error=True)()
-    assert log_has_re(r'DeadBeef.*', caplog)
+    ret = strategy_safe_wrapper(failing_method, message="DeadBeef", supress_error=True)()
+    assert log_has_re(r"DeadBeef.*", caplog)
 
 
-@pytest.mark.parametrize('value', [
-    1, 22, 55, True, False, {'a': 1, 'b': '112'},
-    [1, 2, 3, 4], (4, 2, 3, 6)
-])
+@pytest.mark.parametrize(
+    "value", [1, 22, 55, True, False, {"a": 1, "b": "112"}, [1, 2, 3, 4], (4, 2, 3, 6)]
+)
 def test_strategy_safe_wrapper(value):
-
     def working_method(argumentpassedin):
         return argumentpassedin
 
-    ret = strategy_safe_wrapper(working_method, message='DeadBeef')(value)
+    ret = strategy_safe_wrapper(working_method, message="DeadBeef")(value)
 
     assert isinstance(ret, type(value))
     assert ret == value
@@ -839,7 +933,7 @@ def test_strategy_safe_wrapper_trade_copy(fee):
     trade = Trade.get_open_trades()[0]
     # Don't assert anything before strategy_wrapper.
     # This ensures that relationship loading works correctly.
-    ret = strategy_safe_wrapper(working_method, message='DeadBeef')(trade=trade)
+    ret = strategy_safe_wrapper(working_method, message="DeadBeef")(trade=trade)
     assert isinstance(ret, Trade)
     assert id(trade) != id(ret)
     # Did not modify the original order
@@ -852,35 +946,35 @@ def test_hyperopt_parameters():
     from skopt.space import Categorical, Integer, Real
 
     with pytest.raises(OperationalException, match=r"Name is determined.*"):
-        IntParameter(low=0, high=5, default=1, name='hello')
+        IntParameter(low=0, high=5, default=1, name="hello")
 
     with pytest.raises(OperationalException, match=r"IntParameter space must be.*"):
-        IntParameter(low=0, default=5, space='buy')
+        IntParameter(low=0, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"RealParameter space must be.*"):
-        RealParameter(low=0, default=5, space='buy')
+        RealParameter(low=0, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"DecimalParameter space must be.*"):
-        DecimalParameter(low=0, default=5, space='buy')
+        DecimalParameter(low=0, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"IntParameter space invalid\."):
-        IntParameter([0, 10], high=7, default=5, space='buy')
+        IntParameter([0, 10], high=7, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"RealParameter space invalid\."):
-        RealParameter([0, 10], high=7, default=5, space='buy')
+        RealParameter([0, 10], high=7, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"DecimalParameter space invalid\."):
-        DecimalParameter([0, 10], high=7, default=5, space='buy')
+        DecimalParameter([0, 10], high=7, default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"CategoricalParameter space must.*"):
-        CategoricalParameter(['aa'], default='aa', space='buy')
+        CategoricalParameter(["aa"], default="aa", space="buy")
 
     with pytest.raises(TypeError):
-        BaseParameter(opt_range=[0, 1], default=1, space='buy')
+        BaseParameter(opt_range=[0, 1], default=1, space="buy")
 
-    intpar = IntParameter(low=0, high=5, default=1, space='buy')
+    intpar = IntParameter(low=0, high=5, default=1, space="buy")
     assert intpar.value == 1
-    assert isinstance(intpar.get_space(''), Integer)
+    assert isinstance(intpar.get_space(""), Integer)
     assert isinstance(intpar.range, range)
     assert len(list(intpar.range)) == 1
     # Range contains ONLY the default / value.
@@ -890,13 +984,13 @@ def test_hyperopt_parameters():
     assert len(list(intpar.range)) == 6
     assert list(intpar.range) == [0, 1, 2, 3, 4, 5]
 
-    fltpar = RealParameter(low=0.0, high=5.5, default=1.0, space='buy')
+    fltpar = RealParameter(low=0.0, high=5.5, default=1.0, space="buy")
     assert fltpar.value == 1
-    assert isinstance(fltpar.get_space(''), Real)
+    assert isinstance(fltpar.get_space(""), Real)
 
-    fltpar = DecimalParameter(low=0.0, high=0.5, default=0.14, decimals=1, space='buy')
+    fltpar = DecimalParameter(low=0.0, high=0.5, default=0.14, decimals=1, space="buy")
     assert fltpar.value == 0.1
-    assert isinstance(fltpar.get_space(''), SKDecimal)
+    assert isinstance(fltpar.get_space(""), SKDecimal)
     assert isinstance(fltpar.range, list)
     assert len(list(fltpar.range)) == 1
     # Range contains ONLY the default / value.
@@ -905,21 +999,22 @@ def test_hyperopt_parameters():
     assert len(list(fltpar.range)) == 6
     assert list(fltpar.range) == [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
 
-    catpar = CategoricalParameter(['buy_rsi', 'buy_macd', 'buy_none'],
-                                  default='buy_macd', space='buy')
-    assert catpar.value == 'buy_macd'
-    assert isinstance(catpar.get_space(''), Categorical)
+    catpar = CategoricalParameter(
+        ["buy_rsi", "buy_macd", "buy_none"], default="buy_macd", space="buy"
+    )
+    assert catpar.value == "buy_macd"
+    assert isinstance(catpar.get_space(""), Categorical)
     assert isinstance(catpar.range, list)
     assert len(list(catpar.range)) == 1
     # Range contains ONLY the default / value.
     assert list(catpar.range) == [catpar.value]
     catpar.in_space = True
     assert len(list(catpar.range)) == 3
-    assert list(catpar.range) == ['buy_rsi', 'buy_macd', 'buy_none']
+    assert list(catpar.range) == ["buy_rsi", "buy_macd", "buy_none"]
 
-    boolpar = BooleanParameter(default=True, space='buy')
+    boolpar = BooleanParameter(default=True, space="buy")
     assert boolpar.value is True
-    assert isinstance(boolpar.get_space(''), Categorical)
+    assert isinstance(boolpar.get_space(""), Categorical)
     assert isinstance(boolpar.range, list)
     assert len(list(boolpar.range)) == 1
 
@@ -936,59 +1031,55 @@ def test_hyperopt_parameters():
 
 
 def test_auto_hyperopt_interface(default_conf):
-    default_conf.update({'strategy': 'HyperoptableStrategyV2'})
-    PairLocks.timeframe = default_conf['timeframe']
+    default_conf.update({"strategy": "HyperoptableStrategyV2"})
+    PairLocks.timeframe = default_conf["timeframe"]
     strategy = StrategyResolver.load_strategy(default_conf)
     strategy.ft_bot_start()
     with pytest.raises(OperationalException):
-        next(strategy.enumerate_parameters('deadBeef'))
+        next(strategy.enumerate_parameters("deadBeef"))
 
-    assert strategy.buy_rsi.value == strategy.buy_params['buy_rsi']
+    assert strategy.buy_rsi.value == strategy.buy_params["buy_rsi"]
     # PlusDI is NOT in the buy-params, so default should be used
     assert strategy.buy_plusdi.value == 0.5
-    assert strategy.sell_rsi.value == strategy.sell_params['sell_rsi']
+    assert strategy.sell_rsi.value == strategy.sell_params["sell_rsi"]
 
-    assert repr(strategy.sell_rsi) == 'IntParameter(74)'
+    assert repr(strategy.sell_rsi) == "IntParameter(74)"
 
     # Parameter is disabled - so value from sell_param dict will NOT be used.
     assert strategy.sell_minusdi.value == 0.5
     all_params = strategy.detect_all_parameters()
     assert isinstance(all_params, dict)
     # Only one buy param at class level
-    assert len(all_params['buy']) == 1
+    assert len(all_params["buy"]) == 1
     # Running detect params at instance level reveals both parameters.
-    assert len(list(detect_parameters(strategy, 'buy'))) == 2
-    assert len(all_params['sell']) == 2
+    assert len(list(detect_parameters(strategy, "buy"))) == 2
+    assert len(all_params["sell"]) == 2
     # Number of Hyperoptable parameters
-    assert all_params['count'] == 5
+    assert all_params["count"] == 5
 
-    strategy.__class__.sell_rsi = IntParameter([0, 10], default=5, space='buy')
+    strategy.__class__.sell_rsi = IntParameter([0, 10], default=5, space="buy")
 
     with pytest.raises(OperationalException, match=r"Inconclusive parameter.*"):
-        [x for x in detect_parameters(strategy, 'sell')]
+        [x for x in detect_parameters(strategy, "sell")]
 
 
 def test_auto_hyperopt_interface_loadparams(default_conf, mocker, caplog):
-    default_conf.update({'strategy': 'HyperoptableStrategy'})
-    del default_conf['stoploss']
-    del default_conf['minimal_roi']
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=True))
-    mocker.patch.object(Path, 'open')
+    default_conf.update({"strategy": "HyperoptableStrategy"})
+    del default_conf["stoploss"]
+    del default_conf["minimal_roi"]
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=True))
+    mocker.patch.object(Path, "open")
     expected_result = {
         "strategy_name": "HyperoptableStrategy",
         "params": {
             "stoploss": {
                 "stoploss": -0.05,
             },
-            "roi": {
-                "0": 0.2,
-                "1200": 0.01
-            }
-        }
+            "roi": {"0": 0.2, "1200": 0.01},
+        },
     }
-    mocker.patch('freqtrade.strategy.hyper.HyperoptTools.load_params',
-                 return_value=expected_result)
-    PairLocks.timeframe = default_conf['timeframe']
+    mocker.patch("freqtrade.strategy.hyper.HyperoptTools.load_params", return_value=expected_result)
+    PairLocks.timeframe = default_conf["timeframe"]
     strategy = StrategyResolver.load_strategy(default_conf)
     assert strategy.stoploss == -0.05
     assert strategy.minimal_roi == {0: 0.2, 1200: 0.01}
@@ -999,47 +1090,45 @@ def test_auto_hyperopt_interface_loadparams(default_conf, mocker, caplog):
             "stoploss": {
                 "stoploss": -0.05,
             },
-            "roi": {
-                "0": 0.2,
-                "1200": 0.01
-            }
-        }
+            "roi": {"0": 0.2, "1200": 0.01},
+        },
     }
 
-    mocker.patch('freqtrade.strategy.hyper.HyperoptTools.load_params',
-                 return_value=expected_result)
+    mocker.patch("freqtrade.strategy.hyper.HyperoptTools.load_params", return_value=expected_result)
     with pytest.raises(OperationalException, match="Invalid parameter file provided."):
         StrategyResolver.load_strategy(default_conf)
 
-    mocker.patch('freqtrade.strategy.hyper.HyperoptTools.load_params',
-                 MagicMock(side_effect=ValueError()))
+    mocker.patch(
+        "freqtrade.strategy.hyper.HyperoptTools.load_params", MagicMock(side_effect=ValueError())
+    )
 
     StrategyResolver.load_strategy(default_conf)
     assert log_has("Invalid parameter file format.", caplog)
 
 
-@pytest.mark.parametrize('function,raises', [
-    ('populate_entry_trend', False),
-    ('advise_entry', False),
-    ('populate_exit_trend', False),
-    ('advise_exit', False),
-])
+@pytest.mark.parametrize(
+    "function,raises",
+    [
+        ("populate_entry_trend", False),
+        ("advise_entry", False),
+        ("populate_exit_trend", False),
+        ("advise_exit", False),
+    ],
+)
 def test_pandas_warning_direct(ohlcv_history, function, raises, recwarn):
-
-    df = _STRATEGY.populate_indicators(ohlcv_history, {'pair': 'ETH/BTC'})
+    df = _STRATEGY.populate_indicators(ohlcv_history, {"pair": "ETH/BTC"})
     if raises:
         assert len(recwarn) == 1
         # https://github.com/pandas-dev/pandas/issues/56503
         # Fixed in 2.2.x
-        getattr(_STRATEGY, function)(df, {'pair': 'ETH/BTC'})
+        getattr(_STRATEGY, function)(df, {"pair": "ETH/BTC"})
     else:
         assert len(recwarn) == 0
 
-        getattr(_STRATEGY, function)(df, {'pair': 'ETH/BTC'})
+        getattr(_STRATEGY, function)(df, {"pair": "ETH/BTC"})
 
 
 def test_pandas_warning_through_analyze_pair(ohlcv_history, mocker, recwarn):
-
-    mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
-    _STRATEGY.analyze_pair('ETH/BTC')
+    mocker.patch.object(_STRATEGY.dp, "ohlcv", return_value=ohlcv_history)
+    _STRATEGY.analyze_pair("ETH/BTC")
     assert len(recwarn) == 0
diff --git a/tests/strategy/test_strategy_helpers.py b/tests/strategy/test_strategy_helpers.py
index b7fb7dea1..aeb46a4e4 100644
--- a/tests/strategy/test_strategy_helpers.py
+++ b/tests/strategy/test_strategy_helpers.py
@@ -10,217 +10,221 @@ from tests.conftest import generate_test_data, get_patched_exchange
 
 
 def test_merge_informative_pair():
-    data = generate_test_data('15m', 40)
-    informative = generate_test_data('1h', 40)
+    data = generate_test_data("15m", 40)
+    informative = generate_test_data("1h", 40)
     cols_inf = list(informative.columns)
 
-    result = merge_informative_pair(data, informative, '15m', '1h', ffill=True)
+    result = merge_informative_pair(data, informative, "15m", "1h", ffill=True)
     assert isinstance(result, pd.DataFrame)
     assert list(informative.columns) == cols_inf
     assert len(result) == len(data)
-    assert 'date' in result.columns
-    assert result['date'].equals(data['date'])
-    assert 'date_1h' in result.columns
+    assert "date" in result.columns
+    assert result["date"].equals(data["date"])
+    assert "date_1h" in result.columns
 
-    assert 'open' in result.columns
-    assert 'open_1h' in result.columns
-    assert result['open'].equals(data['open'])
+    assert "open" in result.columns
+    assert "open_1h" in result.columns
+    assert result["open"].equals(data["open"])
 
-    assert 'close' in result.columns
-    assert 'close_1h' in result.columns
-    assert result['close'].equals(data['close'])
+    assert "close" in result.columns
+    assert "close_1h" in result.columns
+    assert result["close"].equals(data["close"])
 
-    assert 'volume' in result.columns
-    assert 'volume_1h' in result.columns
-    assert result['volume'].equals(data['volume'])
+    assert "volume" in result.columns
+    assert "volume_1h" in result.columns
+    assert result["volume"].equals(data["volume"])
 
     # First 3 rows are empty
-    assert result.iloc[0]['date_1h'] is pd.NaT
-    assert result.iloc[1]['date_1h'] is pd.NaT
-    assert result.iloc[2]['date_1h'] is pd.NaT
+    assert result.iloc[0]["date_1h"] is pd.NaT
+    assert result.iloc[1]["date_1h"] is pd.NaT
+    assert result.iloc[2]["date_1h"] is pd.NaT
     # Next 4 rows contain the starting date (0:00)
-    assert result.iloc[3]['date_1h'] == result.iloc[0]['date']
-    assert result.iloc[4]['date_1h'] == result.iloc[0]['date']
-    assert result.iloc[5]['date_1h'] == result.iloc[0]['date']
-    assert result.iloc[6]['date_1h'] == result.iloc[0]['date']
+    assert result.iloc[3]["date_1h"] == result.iloc[0]["date"]
+    assert result.iloc[4]["date_1h"] == result.iloc[0]["date"]
+    assert result.iloc[5]["date_1h"] == result.iloc[0]["date"]
+    assert result.iloc[6]["date_1h"] == result.iloc[0]["date"]
     # Next 4 rows contain the next Hourly date original date row 4
-    assert result.iloc[7]['date_1h'] == result.iloc[4]['date']
-    assert result.iloc[8]['date_1h'] == result.iloc[4]['date']
+    assert result.iloc[7]["date_1h"] == result.iloc[4]["date"]
+    assert result.iloc[8]["date_1h"] == result.iloc[4]["date"]
 
-    informative = generate_test_data('1h', 40)
-    result = merge_informative_pair(data, informative, '15m', '1h', ffill=False)
+    informative = generate_test_data("1h", 40)
+    result = merge_informative_pair(data, informative, "15m", "1h", ffill=False)
     # First 3 rows are empty
-    assert result.iloc[0]['date_1h'] is pd.NaT
-    assert result.iloc[1]['date_1h'] is pd.NaT
-    assert result.iloc[2]['date_1h'] is pd.NaT
+    assert result.iloc[0]["date_1h"] is pd.NaT
+    assert result.iloc[1]["date_1h"] is pd.NaT
+    assert result.iloc[2]["date_1h"] is pd.NaT
     # Next 4 rows contain the starting date (0:00)
-    assert result.iloc[3]['date_1h'] == result.iloc[0]['date']
-    assert result.iloc[4]['date_1h'] is pd.NaT
-    assert result.iloc[5]['date_1h'] is pd.NaT
-    assert result.iloc[6]['date_1h'] is pd.NaT
+    assert result.iloc[3]["date_1h"] == result.iloc[0]["date"]
+    assert result.iloc[4]["date_1h"] is pd.NaT
+    assert result.iloc[5]["date_1h"] is pd.NaT
+    assert result.iloc[6]["date_1h"] is pd.NaT
     # Next 4 rows contain the next Hourly date original date row 4
-    assert result.iloc[7]['date_1h'] == result.iloc[4]['date']
-    assert result.iloc[8]['date_1h'] is pd.NaT
+    assert result.iloc[7]["date_1h"] == result.iloc[4]["date"]
+    assert result.iloc[8]["date_1h"] is pd.NaT
 
 
 def test_merge_informative_pair_weekly():
     # Covers roughly 2 months - until 2023-01-10
-    data = generate_test_data('1h', 1040, '2022-11-28')
-    informative = generate_test_data('1w', 40, '2022-11-01')
-    informative['day'] = informative['date'].dt.day_name()
+    data = generate_test_data("1h", 1040, "2022-11-28")
+    informative = generate_test_data("1w", 40, "2022-11-01")
+    informative["day"] = informative["date"].dt.day_name()
 
-    result = merge_informative_pair(data, informative, '1h', '1w', ffill=True)
+    result = merge_informative_pair(data, informative, "1h", "1w", ffill=True)
     assert isinstance(result, pd.DataFrame)
     # 2022-12-24 is a Saturday
-    candle1 = result.loc[(result['date'] == '2022-12-24T22:00:00.000Z')]
-    assert candle1.iloc[0]['date'] == pd.Timestamp('2022-12-24T22:00:00.000Z')
-    assert candle1.iloc[0]['date_1w'] == pd.Timestamp('2022-12-12T00:00:00.000Z')
+    candle1 = result.loc[(result["date"] == "2022-12-24T22:00:00.000Z")]
+    assert candle1.iloc[0]["date"] == pd.Timestamp("2022-12-24T22:00:00.000Z")
+    assert candle1.iloc[0]["date_1w"] == pd.Timestamp("2022-12-12T00:00:00.000Z")
 
-    candle2 = result.loc[(result['date'] == '2022-12-24T23:00:00.000Z')]
-    assert candle2.iloc[0]['date'] == pd.Timestamp('2022-12-24T23:00:00.000Z')
-    assert candle2.iloc[0]['date_1w'] == pd.Timestamp('2022-12-12T00:00:00.000Z')
+    candle2 = result.loc[(result["date"] == "2022-12-24T23:00:00.000Z")]
+    assert candle2.iloc[0]["date"] == pd.Timestamp("2022-12-24T23:00:00.000Z")
+    assert candle2.iloc[0]["date_1w"] == pd.Timestamp("2022-12-12T00:00:00.000Z")
 
     # 2022-12-25 is a Sunday
-    candle3 = result.loc[(result['date'] == '2022-12-25T22:00:00.000Z')]
-    assert candle3.iloc[0]['date'] == pd.Timestamp('2022-12-25T22:00:00.000Z')
+    candle3 = result.loc[(result["date"] == "2022-12-25T22:00:00.000Z")]
+    assert candle3.iloc[0]["date"] == pd.Timestamp("2022-12-25T22:00:00.000Z")
     # Still old candle
-    assert candle3.iloc[0]['date_1w'] == pd.Timestamp('2022-12-12T00:00:00.000Z')
+    assert candle3.iloc[0]["date_1w"] == pd.Timestamp("2022-12-12T00:00:00.000Z")
 
-    candle4 = result.loc[(result['date'] == '2022-12-25T23:00:00.000Z')]
-    assert candle4.iloc[0]['date'] == pd.Timestamp('2022-12-25T23:00:00.000Z')
-    assert candle4.iloc[0]['date_1w'] == pd.Timestamp('2022-12-19T00:00:00.000Z')
+    candle4 = result.loc[(result["date"] == "2022-12-25T23:00:00.000Z")]
+    assert candle4.iloc[0]["date"] == pd.Timestamp("2022-12-25T23:00:00.000Z")
+    assert candle4.iloc[0]["date_1w"] == pd.Timestamp("2022-12-19T00:00:00.000Z")
 
 
 def test_merge_informative_pair_monthly():
     # Covers roughly 2 months - until 2023-01-10
-    data = generate_test_data('1h', 1040, '2022-11-28')
-    informative = generate_test_data('1M', 40, '2022-01-01')
+    data = generate_test_data("1h", 1040, "2022-11-28")
+    informative = generate_test_data("1M", 40, "2022-01-01")
 
-    result = merge_informative_pair(data, informative, '1h', '1M', ffill=True)
+    result = merge_informative_pair(data, informative, "1h", "1M", ffill=True)
     assert isinstance(result, pd.DataFrame)
-    candle1 = result.loc[(result['date'] == '2022-12-31T22:00:00.000Z')]
-    assert candle1.iloc[0]['date'] == pd.Timestamp('2022-12-31T22:00:00.000Z')
-    assert candle1.iloc[0]['date_1M'] == pd.Timestamp('2022-11-01T00:00:00.000Z')
+    candle1 = result.loc[(result["date"] == "2022-12-31T22:00:00.000Z")]
+    assert candle1.iloc[0]["date"] == pd.Timestamp("2022-12-31T22:00:00.000Z")
+    assert candle1.iloc[0]["date_1M"] == pd.Timestamp("2022-11-01T00:00:00.000Z")
 
-    candle2 = result.loc[(result['date'] == '2022-12-31T23:00:00.000Z')]
-    assert candle2.iloc[0]['date'] == pd.Timestamp('2022-12-31T23:00:00.000Z')
-    assert candle2.iloc[0]['date_1M'] == pd.Timestamp('2022-12-01T00:00:00.000Z')
+    candle2 = result.loc[(result["date"] == "2022-12-31T23:00:00.000Z")]
+    assert candle2.iloc[0]["date"] == pd.Timestamp("2022-12-31T23:00:00.000Z")
+    assert candle2.iloc[0]["date_1M"] == pd.Timestamp("2022-12-01T00:00:00.000Z")
 
     # Candle is empty, as the start-date did fail.
-    candle3 = result.loc[(result['date'] == '2022-11-30T22:00:00.000Z')]
-    assert candle3.iloc[0]['date'] == pd.Timestamp('2022-11-30T22:00:00.000Z')
-    assert candle3.iloc[0]['date_1M'] is pd.NaT
+    candle3 = result.loc[(result["date"] == "2022-11-30T22:00:00.000Z")]
+    assert candle3.iloc[0]["date"] == pd.Timestamp("2022-11-30T22:00:00.000Z")
+    assert candle3.iloc[0]["date_1M"] is pd.NaT
 
     # First candle with 1M data merged.
-    candle4 = result.loc[(result['date'] == '2022-11-30T23:00:00.000Z')]
-    assert candle4.iloc[0]['date'] == pd.Timestamp('2022-11-30T23:00:00.000Z')
-    assert candle4.iloc[0]['date_1M'] == pd.Timestamp('2022-11-01T00:00:00.000Z')
+    candle4 = result.loc[(result["date"] == "2022-11-30T23:00:00.000Z")]
+    assert candle4.iloc[0]["date"] == pd.Timestamp("2022-11-30T23:00:00.000Z")
+    assert candle4.iloc[0]["date_1M"] == pd.Timestamp("2022-11-01T00:00:00.000Z")
 
 
 def test_merge_informative_pair_same():
-    data = generate_test_data('15m', 40)
-    informative = generate_test_data('15m', 40)
+    data = generate_test_data("15m", 40)
+    informative = generate_test_data("15m", 40)
 
-    result = merge_informative_pair(data, informative, '15m', '15m', ffill=True)
+    result = merge_informative_pair(data, informative, "15m", "15m", ffill=True)
     assert isinstance(result, pd.DataFrame)
     assert len(result) == len(data)
-    assert 'date' in result.columns
-    assert result['date'].equals(data['date'])
-    assert 'date_15m' in result.columns
+    assert "date" in result.columns
+    assert result["date"].equals(data["date"])
+    assert "date_15m" in result.columns
 
-    assert 'open' in result.columns
-    assert 'open_15m' in result.columns
-    assert result['open'].equals(data['open'])
+    assert "open" in result.columns
+    assert "open_15m" in result.columns
+    assert result["open"].equals(data["open"])
 
-    assert 'close' in result.columns
-    assert 'close_15m' in result.columns
-    assert result['close'].equals(data['close'])
+    assert "close" in result.columns
+    assert "close_15m" in result.columns
+    assert result["close"].equals(data["close"])
 
-    assert 'volume' in result.columns
-    assert 'volume_15m' in result.columns
-    assert result['volume'].equals(data['volume'])
+    assert "volume" in result.columns
+    assert "volume_15m" in result.columns
+    assert result["volume"].equals(data["volume"])
 
     # Dates match 1:1
-    assert result['date_15m'].equals(result['date'])
+    assert result["date_15m"].equals(result["date"])
 
 
 def test_merge_informative_pair_lower():
-    data = generate_test_data('1h', 40)
-    informative = generate_test_data('15m', 40)
+    data = generate_test_data("1h", 40)
+    informative = generate_test_data("15m", 40)
 
     with pytest.raises(ValueError, match=r"Tried to merge a faster timeframe .*"):
-        merge_informative_pair(data, informative, '1h', '15m', ffill=True)
+        merge_informative_pair(data, informative, "1h", "15m", ffill=True)
 
 
 def test_merge_informative_pair_empty():
-    data = generate_test_data('1h', 40)
+    data = generate_test_data("1h", 40)
     informative = pd.DataFrame(columns=data.columns)
 
-    result = merge_informative_pair(data, informative, '1h', '2h', ffill=True)
-    assert result['date'].equals(data['date'])
+    result = merge_informative_pair(data, informative, "1h", "2h", ffill=True)
+    assert result["date"].equals(data["date"])
 
     assert list(result.columns) == [
-        'date',
-        'open',
-        'high',
-        'low',
-        'close',
-        'volume',
-        'date_2h',
-        'open_2h',
-        'high_2h',
-        'low_2h',
-        'close_2h',
-        'volume_2h'
+        "date",
+        "open",
+        "high",
+        "low",
+        "close",
+        "volume",
+        "date_2h",
+        "open_2h",
+        "high_2h",
+        "low_2h",
+        "close_2h",
+        "volume_2h",
     ]
     # We merge an empty dataframe, so all values should be NaN
-    for col in ['date_2h', 'open_2h', 'high_2h', 'low_2h', 'close_2h', 'volume_2h']:
+    for col in ["date_2h", "open_2h", "high_2h", "low_2h", "close_2h", "volume_2h"]:
         assert result[col].isnull().all()
 
 
 def test_merge_informative_pair_suffix():
-    data = generate_test_data('15m', 20)
-    informative = generate_test_data('1h', 20)
+    data = generate_test_data("15m", 20)
+    informative = generate_test_data("1h", 20)
 
-    result = merge_informative_pair(data, informative, '15m', '1h',
-                                    append_timeframe=False, suffix="suf")
+    result = merge_informative_pair(
+        data, informative, "15m", "1h", append_timeframe=False, suffix="suf"
+    )
 
-    assert 'date' in result.columns
-    assert result['date'].equals(data['date'])
-    assert 'date_suf' in result.columns
+    assert "date" in result.columns
+    assert result["date"].equals(data["date"])
+    assert "date_suf" in result.columns
 
-    assert 'open_suf' in result.columns
-    assert 'open_1h' not in result.columns
+    assert "open_suf" in result.columns
+    assert "open_1h" not in result.columns
 
     assert list(result.columns) == [
-        'date',
-        'open',
-        'high',
-        'low',
-        'close',
-        'volume',
-        'date_suf',
-        'open_suf',
-        'high_suf',
-        'low_suf',
-        'close_suf',
-        'volume_suf'
+        "date",
+        "open",
+        "high",
+        "low",
+        "close",
+        "volume",
+        "date_suf",
+        "open_suf",
+        "high_suf",
+        "low_suf",
+        "close_suf",
+        "volume_suf",
     ]
 
 
 def test_merge_informative_pair_suffix_append_timeframe():
-    data = generate_test_data('15m', 20)
-    informative = generate_test_data('1h', 20)
+    data = generate_test_data("15m", 20)
+    informative = generate_test_data("1h", 20)
 
     with pytest.raises(ValueError, match=r"You can not specify `append_timeframe` .*"):
-        merge_informative_pair(data, informative, '15m', '1h', suffix="suf")
+        merge_informative_pair(data, informative, "15m", "1h", suffix="suf")
 
 
-@pytest.mark.parametrize("side,profitrange", [
-    # profit range for long is [-1, inf] while for shorts is [-inf, 1]
-    ("long", [-0.99, 2, 30]),
-    ("short", [-2.0, 0.99, 30]),
-])
+@pytest.mark.parametrize(
+    "side,profitrange",
+    [
+        # profit range for long is [-1, inf] while for shorts is [-inf, 1]
+        ("long", [-0.99, 2, 30]),
+        ("short", [-2.0, 0.99, 30]),
+    ],
+)
 def test_stoploss_from_open(side, profitrange):
     open_price_ranges = [
         [0.01, 1.00, 30],
@@ -231,8 +235,7 @@ def test_stoploss_from_open(side, profitrange):
     for open_range in open_price_ranges:
         for open_price in np.linspace(*open_range):
             for desired_stop in np.linspace(-0.50, 0.50, 30):
-
-                if side == 'long':
+                if side == "long":
                     # -1 is not a valid current_profit, should return 1
                     assert stoploss_from_open(desired_stop, -1) == 1
                 else:
@@ -240,7 +243,7 @@ def test_stoploss_from_open(side, profitrange):
                     assert stoploss_from_open(desired_stop, 1, True) == 1
 
                 for current_profit in np.linspace(*profitrange):
-                    if side == 'long':
+                    if side == "long":
                         current_price = open_price * (1 + current_profit)
                         expected_stop_price = open_price * (1 + desired_stop)
                         stoploss = stoploss_from_open(desired_stop, current_profit)
@@ -254,43 +257,45 @@ def test_stoploss_from_open(side, profitrange):
                     assert stoploss >= 0
                     # Technically the formula can yield values greater than 1 for shorts
                     # even though it doesn't make sense because the position would be liquidated
-                    if side == 'long':
+                    if side == "long":
                         assert stoploss <= 1
 
                     # there is no correct answer if the expected stop price is above
                     # the current price
-                    if ((side == 'long' and expected_stop_price > current_price)
-                            or (side == 'short' and expected_stop_price < current_price)):
+                    if (side == "long" and expected_stop_price > current_price) or (
+                        side == "short" and expected_stop_price < current_price
+                    ):
                         assert stoploss == 0
                     else:
                         assert pytest.approx(stop_price) == expected_stop_price
 
 
-@pytest.mark.parametrize("side,rel_stop,curr_profit,leverage,expected", [
-    # profit range for long is [-1, inf] while for shorts is [-inf, 1]
-    ("long", 0, -1, 1, 1),
-    ("long", 0, 0.1, 1, 0.09090909),
-    ("long", -0.1, 0.1, 1, 0.18181818),
-    ("long", 0.1, 0.2, 1, 0.08333333),
-    ("long", 0.1, 0.5, 1, 0.266666666),
-    ("long", 0.1, 5, 1, 0.816666666),  # 500% profit, set stoploss to 10% above open price
-    ("long", 0, 5, 10,  3.3333333),  # 500% profit, set stoploss break even
-    ("long", 0.1, 5, 10,  3.26666666),  # 500% profit, set stoploss to 10% above open price
-    ("long", -0.1, 5, 10,  3.3999999),  # 500% profit, set stoploss to 10% belowopen price
-
-    ("short", 0, 0.1, 1, 0.1111111),
-    ("short", -0.1, 0.1, 1, 0.2222222),
-    ("short", 0.1, 0.2, 1, 0.125),
-    ("short", 0.1, 1, 1, 1),
-    ("short", -0.01, 5, 10, 10.01999999),  # 500% profit at 10x
-])
+@pytest.mark.parametrize(
+    "side,rel_stop,curr_profit,leverage,expected",
+    [
+        # profit range for long is [-1, inf] while for shorts is [-inf, 1]
+        ("long", 0, -1, 1, 1),
+        ("long", 0, 0.1, 1, 0.09090909),
+        ("long", -0.1, 0.1, 1, 0.18181818),
+        ("long", 0.1, 0.2, 1, 0.08333333),
+        ("long", 0.1, 0.5, 1, 0.266666666),
+        ("long", 0.1, 5, 1, 0.816666666),  # 500% profit, set stoploss to 10% above open price
+        ("long", 0, 5, 10, 3.3333333),  # 500% profit, set stoploss break even
+        ("long", 0.1, 5, 10, 3.26666666),  # 500% profit, set stoploss to 10% above open price
+        ("long", -0.1, 5, 10, 3.3999999),  # 500% profit, set stoploss to 10% belowopen price
+        ("short", 0, 0.1, 1, 0.1111111),
+        ("short", -0.1, 0.1, 1, 0.2222222),
+        ("short", 0.1, 0.2, 1, 0.125),
+        ("short", 0.1, 1, 1, 1),
+        ("short", -0.01, 5, 10, 10.01999999),  # 500% profit at 10x
+    ],
+)
 def test_stoploss_from_open_leverage(side, rel_stop, curr_profit, leverage, expected):
-
-    stoploss = stoploss_from_open(rel_stop, curr_profit, side == 'short', leverage)
+    stoploss = stoploss_from_open(rel_stop, curr_profit, side == "short", leverage)
     assert pytest.approx(stoploss) == expected
     open_rate = 100
     if stoploss != 1:
-        if side == 'long':
+        if side == "long":
             current_rate = open_rate * (1 + curr_profit / leverage)
             stop = current_rate * (1 - stoploss / leverage)
             assert pytest.approx(stop) == open_rate * (1 + rel_stop / leverage)
@@ -322,73 +327,79 @@ def test_stoploss_from_absolute():
     assert pytest.approx(stoploss_from_absolute(100, 1, is_short=True, leverage=5)) == 5
 
 
-@pytest.mark.parametrize('trading_mode', ['futures', 'spot'])
+@pytest.mark.parametrize("trading_mode", ["futures", "spot"])
 def test_informative_decorator(mocker, default_conf_usdt, trading_mode):
     candle_def = CandleType.get_default(trading_mode)
-    default_conf_usdt['candle_type_def'] = candle_def
-    test_data_5m = generate_test_data('5m', 40)
-    test_data_30m = generate_test_data('30m', 40)
-    test_data_1h = generate_test_data('1h', 40)
+    default_conf_usdt["candle_type_def"] = candle_def
+    test_data_5m = generate_test_data("5m", 40)
+    test_data_30m = generate_test_data("30m", 40)
+    test_data_1h = generate_test_data("1h", 40)
     data = {
-        ('XRP/USDT', '5m', candle_def): test_data_5m,
-        ('XRP/USDT', '30m', candle_def): test_data_30m,
-        ('XRP/USDT', '1h', candle_def): test_data_1h,
-        ('XRP/BTC', '1h', candle_def): test_data_1h,  # from {base}/BTC
-        ('LTC/USDT', '5m', candle_def): test_data_5m,
-        ('LTC/USDT', '30m', candle_def): test_data_30m,
-        ('LTC/USDT', '1h', candle_def): test_data_1h,
-        ('LTC/BTC', '1h', candle_def): test_data_1h,  # from {base}/BTC
-        ('NEO/USDT', '30m', candle_def): test_data_30m,
-        ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m,  # Explicit request with '' as candletype
-        ('NEO/USDT', '15m', candle_def): test_data_5m,  # Explicit request with '' as candletype
-        ('NEO/USDT', '1h', candle_def): test_data_1h,
-        ('ETH/USDT', '1h', candle_def): test_data_1h,
-        ('ETH/USDT', '30m', candle_def): test_data_30m,
-        ('ETH/BTC', '1h', CandleType.SPOT): test_data_1h,  # Explicitly selected as spot
+        ("XRP/USDT", "5m", candle_def): test_data_5m,
+        ("XRP/USDT", "30m", candle_def): test_data_30m,
+        ("XRP/USDT", "1h", candle_def): test_data_1h,
+        ("XRP/BTC", "1h", candle_def): test_data_1h,  # from {base}/BTC
+        ("LTC/USDT", "5m", candle_def): test_data_5m,
+        ("LTC/USDT", "30m", candle_def): test_data_30m,
+        ("LTC/USDT", "1h", candle_def): test_data_1h,
+        ("LTC/BTC", "1h", candle_def): test_data_1h,  # from {base}/BTC
+        ("NEO/USDT", "30m", candle_def): test_data_30m,
+        ("NEO/USDT", "5m", CandleType.SPOT): test_data_5m,  # Explicit request with '' as candletype
+        ("NEO/USDT", "15m", candle_def): test_data_5m,  # Explicit request with '' as candletype
+        ("NEO/USDT", "1h", candle_def): test_data_1h,
+        ("ETH/USDT", "1h", candle_def): test_data_1h,
+        ("ETH/USDT", "30m", candle_def): test_data_30m,
+        ("ETH/BTC", "1h", CandleType.SPOT): test_data_1h,  # Explicitly selected as spot
     }
-    default_conf_usdt['strategy'] = 'InformativeDecoratorTest'
+    default_conf_usdt["strategy"] = "InformativeDecoratorTest"
     strategy = StrategyResolver.load_strategy(default_conf_usdt)
     exchange = get_patched_exchange(mocker, default_conf_usdt)
     strategy.dp = DataProvider({}, exchange, None)
-    mocker.patch.object(strategy.dp, 'current_whitelist', return_value=[
-        'XRP/USDT', 'LTC/USDT', 'NEO/USDT'
-    ])
+    mocker.patch.object(
+        strategy.dp, "current_whitelist", return_value=["XRP/USDT", "LTC/USDT", "NEO/USDT"]
+    )
 
-    assert len(strategy._ft_informative) == 7   # Equal to number of decorators used
+    assert len(strategy._ft_informative) == 7  # Equal to number of decorators used
     informative_pairs = [
-        ('XRP/USDT', '1h', candle_def),
-        ('XRP/BTC', '1h', candle_def),
-        ('LTC/USDT', '1h', candle_def),
-        ('LTC/BTC', '1h', candle_def),
-        ('XRP/USDT', '30m', candle_def),
-        ('LTC/USDT', '30m', candle_def),
-        ('NEO/USDT', '1h', candle_def),
-        ('NEO/USDT', '30m', candle_def),
-        ('NEO/USDT', '5m', candle_def),
-        ('NEO/USDT', '15m', candle_def),
-        ('NEO/USDT', '2h', CandleType.FUTURES),
-        ('ETH/BTC', '1h', CandleType.SPOT),  # One candle remains as spot
-        ('ETH/USDT', '30m', candle_def)]
+        ("XRP/USDT", "1h", candle_def),
+        ("XRP/BTC", "1h", candle_def),
+        ("LTC/USDT", "1h", candle_def),
+        ("LTC/BTC", "1h", candle_def),
+        ("XRP/USDT", "30m", candle_def),
+        ("LTC/USDT", "30m", candle_def),
+        ("NEO/USDT", "1h", candle_def),
+        ("NEO/USDT", "30m", candle_def),
+        ("NEO/USDT", "5m", candle_def),
+        ("NEO/USDT", "15m", candle_def),
+        ("NEO/USDT", "2h", CandleType.FUTURES),
+        ("ETH/BTC", "1h", CandleType.SPOT),  # One candle remains as spot
+        ("ETH/USDT", "30m", candle_def),
+    ]
     for inf_pair in informative_pairs:
         assert inf_pair in strategy.gather_informative_pairs()
 
     def test_historic_ohlcv(pair, timeframe, candle_type):
         return data[
-            (pair, timeframe or strategy.timeframe, CandleType.from_string(candle_type))].copy()
+            (pair, timeframe or strategy.timeframe, CandleType.from_string(candle_type))
+        ].copy()
 
-    mocker.patch('freqtrade.data.dataprovider.DataProvider.historic_ohlcv',
-                 side_effect=test_historic_ohlcv)
+    mocker.patch(
+        "freqtrade.data.dataprovider.DataProvider.historic_ohlcv", side_effect=test_historic_ohlcv
+    )
 
     analyzed = strategy.advise_all_indicators(
-        {p: data[(p, strategy.timeframe, candle_def)] for p in ('XRP/USDT', 'LTC/USDT')})
+        {p: data[(p, strategy.timeframe, candle_def)] for p in ("XRP/USDT", "LTC/USDT")}
+    )
     expected_columns = [
-        'rsi_1h', 'rsi_30m',                    # Stacked informative decorators
-        'neo_usdt_rsi_1h',                      # NEO 1h informative
-        'rsi_NEO_USDT_neo_usdt_NEO/USDT_30m',   # Column formatting
-        'rsi_from_callable',                    # Custom column formatter
-        'eth_btc_rsi_1h',                       # Quote currency not matching stake currency
-        'rsi', 'rsi_less',                      # Non-informative columns
-        'rsi_5m',                               # Manual informative dataframe
+        "rsi_1h",
+        "rsi_30m",  # Stacked informative decorators
+        "neo_usdt_rsi_1h",  # NEO 1h informative
+        "rsi_NEO_USDT_neo_usdt_NEO/USDT_30m",  # Column formatting
+        "rsi_from_callable",  # Custom column formatter
+        "eth_btc_rsi_1h",  # Quote currency not matching stake currency
+        "rsi",
+        "rsi_less",  # Non-informative columns
+        "rsi_5m",  # Manual informative dataframe
     ]
     for _, dataframe in analyzed.items():
         for col in expected_columns:
diff --git a/tests/strategy/test_strategy_loading.py b/tests/strategy/test_strategy_loading.py
index 33245cc5f..9b143ace6 100644
--- a/tests/strategy/test_strategy_loading.py
+++ b/tests/strategy/test_strategy_loading.py
@@ -14,7 +14,7 @@ from tests.conftest import CURRENT_TEST_STRATEGY, log_has, log_has_re
 
 
 def test_search_strategy():
-    default_location = Path(__file__).parent / 'strats'
+    default_location = Path(__file__).parent / "strats"
 
     s, _ = StrategyResolver._search_object(
         directory=default_location,
@@ -25,7 +25,7 @@ def test_search_strategy():
 
     s, _ = StrategyResolver._search_object(
         directory=default_location,
-        object_name='NotFoundStrategy',
+        object_name="NotFoundStrategy",
         add_source=True,
     )
     assert s is None
@@ -46,9 +46,9 @@ def test_search_all_strategies_with_failed():
     assert len(strategies) == 14
     # with enum_failed=True search_all_objects() shall find 2 good strategies
     # and 1 which fails to load
-    assert len([x for x in strategies if x['class'] is not None]) == 13
+    assert len([x for x in strategies if x["class"] is not None]) == 13
 
-    assert len([x for x in strategies if x['class'] is None]) == 1
+    assert len([x for x in strategies if x["class"] is None]) == 1
 
     directory = Path(__file__).parent / "strats_nonexistingdir"
     strategies = StrategyResolver._search_all_objects(directory, enum_failed=True)
@@ -56,123 +56,126 @@ def test_search_all_strategies_with_failed():
 
 
 def test_load_strategy(default_conf, dataframe_1m):
-    default_conf.update({'strategy': 'SampleStrategy',
-                         'strategy_path': str(Path(__file__).parents[2] / 'freqtrade/templates')
-                         })
+    default_conf.update(
+        {
+            "strategy": "SampleStrategy",
+            "strategy_path": str(Path(__file__).parents[2] / "freqtrade/templates"),
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
     assert isinstance(strategy.__source__, str)
-    assert 'class SampleStrategy' in strategy.__source__
+    assert "class SampleStrategy" in strategy.__source__
     assert isinstance(strategy.__file__, str)
-    assert 'rsi' in strategy.advise_indicators(dataframe_1m, {'pair': 'ETH/BTC'})
+    assert "rsi" in strategy.advise_indicators(dataframe_1m, {"pair": "ETH/BTC"})
 
 
 def test_load_strategy_base64(dataframe_1m, caplog, default_conf):
-    filepath = Path(__file__).parents[2] / 'freqtrade/templates/sample_strategy.py'
+    filepath = Path(__file__).parents[2] / "freqtrade/templates/sample_strategy.py"
     encoded_string = urlsafe_b64encode(filepath.read_bytes()).decode("utf-8")
-    default_conf.update({'strategy': f'SampleStrategy:{encoded_string}'})
+    default_conf.update({"strategy": f"SampleStrategy:{encoded_string}"})
 
     strategy = StrategyResolver.load_strategy(default_conf)
-    assert 'rsi' in strategy.advise_indicators(dataframe_1m, {'pair': 'ETH/BTC'})
+    assert "rsi" in strategy.advise_indicators(dataframe_1m, {"pair": "ETH/BTC"})
     # Make sure strategy was loaded from base64 (using temp directory)!!
-    assert log_has_re(r"Using resolved strategy SampleStrategy from '"
-                      r".*(/|\\).*(/|\\)SampleStrategy\.py'\.\.\.", caplog)
+    assert log_has_re(
+        r"Using resolved strategy SampleStrategy from '"
+        r".*(/|\\).*(/|\\)SampleStrategy\.py'\.\.\.",
+        caplog,
+    )
 
 
 def test_load_strategy_invalid_directory(caplog, default_conf, tmp_path):
-    default_conf['user_data_dir'] = tmp_path
+    default_conf["user_data_dir"] = tmp_path
 
-    extra_dir = Path.cwd() / 'some/path'
+    extra_dir = Path.cwd() / "some/path"
     with pytest.raises(OperationalException, match=r"Impossible to load Strategy.*"):
-        StrategyResolver._load_strategy('StrategyTestV333', config=default_conf,
-                                        extra_dir=extra_dir)
+        StrategyResolver._load_strategy(
+            "StrategyTestV333", config=default_conf, extra_dir=extra_dir
+        )
 
-    assert log_has_re(r'Path .*' + r'some.*path.*' + r'.* does not exist', caplog)
+    assert log_has_re(r"Path .*" + r"some.*path.*" + r".* does not exist", caplog)
 
 
 def test_load_not_found_strategy(default_conf, tmp_path):
-    default_conf['user_data_dir'] = tmp_path
-    default_conf['strategy'] = 'NotFoundStrategy'
-    with pytest.raises(OperationalException,
-                       match=r"Impossible to load Strategy 'NotFoundStrategy'. "
-                             r"This class does not exist or contains Python code errors."):
+    default_conf["user_data_dir"] = tmp_path
+    default_conf["strategy"] = "NotFoundStrategy"
+    with pytest.raises(
+        OperationalException,
+        match=r"Impossible to load Strategy 'NotFoundStrategy'. "
+        r"This class does not exist or contains Python code errors.",
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
 def test_load_strategy_noname(default_conf):
-    default_conf['strategy'] = ''
-    with pytest.raises(OperationalException,
-                       match="No strategy set. Please use `--strategy` to specify "
-                             "the strategy class to use."):
+    default_conf["strategy"] = ""
+    with pytest.raises(
+        OperationalException,
+        match="No strategy set. Please use `--strategy` to specify the strategy class to use.",
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
-@ pytest.mark.filterwarnings("ignore:deprecated")
-@ pytest.mark.parametrize('strategy_name', ['StrategyTestV2'])
+@pytest.mark.filterwarnings("ignore:deprecated")
+@pytest.mark.parametrize("strategy_name", ["StrategyTestV2"])
 def test_strategy_pre_v3(dataframe_1m, default_conf, strategy_name):
-    default_conf.update({'strategy': strategy_name})
+    default_conf.update({"strategy": strategy_name})
 
     strategy = StrategyResolver.load_strategy(default_conf)
-    metadata = {'pair': 'ETH/BTC'}
+    metadata = {"pair": "ETH/BTC"}
     assert strategy.minimal_roi[0] == 0.04
-    assert default_conf["minimal_roi"]['0'] == 0.04
+    assert default_conf["minimal_roi"]["0"] == 0.04
 
     assert strategy.stoploss == -0.10
-    assert default_conf['stoploss'] == -0.10
+    assert default_conf["stoploss"] == -0.10
 
-    assert strategy.timeframe == '5m'
-    assert default_conf['timeframe'] == '5m'
+    assert strategy.timeframe == "5m"
+    assert default_conf["timeframe"] == "5m"
 
     df_indicators = strategy.advise_indicators(dataframe_1m, metadata=metadata)
-    assert 'adx' in df_indicators
+    assert "adx" in df_indicators
 
     dataframe = strategy.advise_entry(df_indicators, metadata=metadata)
-    assert 'buy' not in dataframe.columns
-    assert 'enter_long' in dataframe.columns
+    assert "buy" not in dataframe.columns
+    assert "enter_long" in dataframe.columns
 
     dataframe = strategy.advise_exit(df_indicators, metadata=metadata)
-    assert 'sell' not in dataframe.columns
-    assert 'exit_long' in dataframe.columns
+    assert "sell" not in dataframe.columns
+    assert "exit_long" in dataframe.columns
 
 
 def test_strategy_can_short(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+        }
+    )
     strat = StrategyResolver.load_strategy(default_conf)
     assert isinstance(strat, IStrategy)
-    default_conf['strategy'] = 'StrategyTestV3Futures'
+    default_conf["strategy"] = "StrategyTestV3Futures"
     with pytest.raises(ImportError, match=""):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['trading_mode'] = 'futures'
+    default_conf["trading_mode"] = "futures"
     strat = StrategyResolver.load_strategy(default_conf)
     assert isinstance(strat, IStrategy)
 
 
 def test_strategy_override_minimal_roi(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'minimal_roi': {
-            "20": 0.1,
-            "0": 0.5
-        }
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "minimal_roi": {"20": 0.1, "0": 0.5}})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.minimal_roi[0] == 0.5
     assert log_has(
-        "Override strategy 'minimal_roi' with value in config file: {'20': 0.1, '0': 0.5}.",
-        caplog)
+        "Override strategy 'minimal_roi' with value in config file: {'20': 0.1, '0': 0.5}.", caplog
+    )
 
 
 def test_strategy_override_stoploss(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'stoploss': -0.5
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "stoploss": -0.5})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.stoploss == -0.5
@@ -181,10 +184,7 @@ def test_strategy_override_stoploss(caplog, default_conf):
 
 def test_strategy_override_max_open_trades(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'max_open_trades': 7
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "max_open_trades": 7})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.max_open_trades == 7
@@ -193,10 +193,7 @@ def test_strategy_override_max_open_trades(caplog, default_conf):
 
 def test_strategy_override_trailing_stop(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'trailing_stop': True
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "trailing_stop": True})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.trailing_stop
@@ -206,84 +203,81 @@ def test_strategy_override_trailing_stop(caplog, default_conf):
 
 def test_strategy_override_trailing_stop_positive(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'trailing_stop_positive': -0.1,
-        'trailing_stop_positive_offset': -0.2
-
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+            "trailing_stop_positive": -0.1,
+            "trailing_stop_positive_offset": -0.2,
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.trailing_stop_positive == -0.1
-    assert log_has("Override strategy 'trailing_stop_positive' with value in config file: -0.1.",
-                   caplog)
+    assert log_has(
+        "Override strategy 'trailing_stop_positive' with value in config file: -0.1.", caplog
+    )
 
     assert strategy.trailing_stop_positive_offset == -0.2
-    assert log_has("Override strategy 'trailing_stop_positive' with value in config file: -0.1.",
-                   caplog)
+    assert log_has(
+        "Override strategy 'trailing_stop_positive' with value in config file: -0.1.", caplog
+    )
 
 
 def test_strategy_override_timeframe(caplog, default_conf):
     caplog.set_level(logging.INFO)
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'timeframe': 60,
-        'stake_currency': 'ETH'
-    })
+    default_conf.update(
+        {"strategy": CURRENT_TEST_STRATEGY, "timeframe": 60, "stake_currency": "ETH"}
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.timeframe == 60
-    assert strategy.stake_currency == 'ETH'
-    assert log_has("Override strategy 'timeframe' with value in config file: 60.",
-                   caplog)
+    assert strategy.stake_currency == "ETH"
+    assert log_has("Override strategy 'timeframe' with value in config file: 60.", caplog)
 
 
 def test_strategy_override_process_only_new_candles(caplog, default_conf):
     caplog.set_level(logging.INFO)
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'process_only_new_candles': False
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "process_only_new_candles": False})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert not strategy.process_only_new_candles
-    assert log_has("Override strategy 'process_only_new_candles' with value in config file: False.",
-                   caplog)
+    assert log_has(
+        "Override strategy 'process_only_new_candles' with value in config file: False.", caplog
+    )
 
 
 def test_strategy_override_order_types(caplog, default_conf):
     caplog.set_level(logging.INFO)
 
     order_types = {
-        'entry': 'market',
-        'exit': 'limit',
-        'stoploss': 'limit',
-        'stoploss_on_exchange': True,
+        "entry": "market",
+        "exit": "limit",
+        "stoploss": "limit",
+        "stoploss_on_exchange": True,
     }
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'order_types': order_types
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "order_types": order_types})
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.order_types
-    for method in ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']:
+    for method in ["entry", "exit", "stoploss", "stoploss_on_exchange"]:
         assert strategy.order_types[method] == order_types[method]
 
-    assert log_has("Override strategy 'order_types' with value in config file:"
-                   " {'entry': 'market', 'exit': 'limit', 'stoploss': 'limit',"
-                   " 'stoploss_on_exchange': True}.", caplog)
+    assert log_has(
+        "Override strategy 'order_types' with value in config file:"
+        " {'entry': 'market', 'exit': 'limit', 'stoploss': 'limit',"
+        " 'stoploss_on_exchange': True}.",
+        caplog,
+    )
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'order_types': {'exit': 'market'}
-    })
+    default_conf.update({"strategy": CURRENT_TEST_STRATEGY, "order_types": {"exit": "market"}})
     # Raise error for invalid configuration
-    with pytest.raises(ImportError,
-                       match=r"Impossible to load Strategy '" + CURRENT_TEST_STRATEGY + "'. "
-                             r"Order-types mapping is incomplete."):
+    with pytest.raises(
+        ImportError,
+        match=r"Impossible to load Strategy '" + CURRENT_TEST_STRATEGY + "'. "
+        r"Order-types mapping is incomplete.",
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
@@ -291,50 +285,57 @@ def test_strategy_override_order_tif(caplog, default_conf):
     caplog.set_level(logging.INFO)
 
     order_time_in_force = {
-        'entry': 'FOK',
-        'exit': 'GTC',
+        "entry": "FOK",
+        "exit": "GTC",
     }
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'order_time_in_force': order_time_in_force
-    })
+    default_conf.update(
+        {"strategy": CURRENT_TEST_STRATEGY, "order_time_in_force": order_time_in_force}
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.order_time_in_force
-    for method in ['entry', 'exit']:
+    for method in ["entry", "exit"]:
         assert strategy.order_time_in_force[method] == order_time_in_force[method]
 
-    assert log_has("Override strategy 'order_time_in_force' with value in config file:"
-                   " {'entry': 'FOK', 'exit': 'GTC'}.", caplog)
+    assert log_has(
+        "Override strategy 'order_time_in_force' with value in config file:"
+        " {'entry': 'FOK', 'exit': 'GTC'}.",
+        caplog,
+    )
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'order_time_in_force': {'entry': 'FOK'}
-    })
+    default_conf.update(
+        {"strategy": CURRENT_TEST_STRATEGY, "order_time_in_force": {"entry": "FOK"}}
+    )
     # Raise error for invalid configuration
-    with pytest.raises(ImportError,
-                       match=f"Impossible to load Strategy '{CURRENT_TEST_STRATEGY}'. "
-                             "Order-time-in-force mapping is incomplete."):
+    with pytest.raises(
+        ImportError,
+        match=f"Impossible to load Strategy '{CURRENT_TEST_STRATEGY}'. "
+        "Order-time-in-force mapping is incomplete.",
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
 def test_strategy_override_use_exit_signal(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
     assert strategy.use_exit_signal
     assert isinstance(strategy.use_exit_signal, bool)
     # must be inserted to configuration
-    assert 'use_exit_signal' in default_conf
-    assert default_conf['use_exit_signal']
+    assert "use_exit_signal" in default_conf
+    assert default_conf["use_exit_signal"]
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'use_exit_signal': False,
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+            "use_exit_signal": False,
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert not strategy.use_exit_signal
@@ -344,20 +345,24 @@ def test_strategy_override_use_exit_signal(caplog, default_conf):
 
 def test_strategy_override_use_exit_profit_only(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
     assert not strategy.exit_profit_only
     assert isinstance(strategy.exit_profit_only, bool)
     # must be inserted to configuration
-    assert 'exit_profit_only' in default_conf
-    assert not default_conf['exit_profit_only']
+    assert "exit_profit_only" in default_conf
+    assert not default_conf["exit_profit_only"]
 
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'exit_profit_only': True,
-    })
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+            "exit_profit_only": True,
+        }
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
 
     assert strategy.exit_profit_only
@@ -367,138 +372,135 @@ def test_strategy_override_use_exit_profit_only(caplog, default_conf):
 
 def test_strategy_max_open_trades_infinity_from_strategy(caplog, default_conf):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-    })
-    del default_conf['max_open_trades']
+    default_conf.update(
+        {
+            "strategy": CURRENT_TEST_STRATEGY,
+        }
+    )
+    del default_conf["max_open_trades"]
 
     strategy = StrategyResolver.load_strategy(default_conf)
 
     # this test assumes -1 set to 'max_open_trades' in CURRENT_TEST_STRATEGY
-    assert strategy.max_open_trades == float('inf')
-    assert default_conf['max_open_trades'] == float('inf')
+    assert strategy.max_open_trades == float("inf")
+    assert default_conf["max_open_trades"] == float("inf")
 
 
 def test_strategy_max_open_trades_infinity_from_config(caplog, default_conf, mocker):
     caplog.set_level(logging.INFO)
-    default_conf.update({
-        'strategy': CURRENT_TEST_STRATEGY,
-        'max_open_trades': -1,
-        'exchange': 'binance'
-    })
+    default_conf.update(
+        {"strategy": CURRENT_TEST_STRATEGY, "max_open_trades": -1, "exchange": "binance"}
+    )
 
     configuration = Configuration(args=default_conf)
     parsed_config = configuration.get_config()
 
-    assert parsed_config['max_open_trades'] == float('inf')
+    assert parsed_config["max_open_trades"] == float("inf")
 
     strategy = StrategyResolver.load_strategy(parsed_config)
 
-    assert strategy.max_open_trades == float('inf')
+    assert strategy.max_open_trades == float("inf")
 
 
-@ pytest.mark.filterwarnings("ignore:deprecated")
+@pytest.mark.filterwarnings("ignore:deprecated")
 def test_missing_implements(default_conf, caplog):
-
     default_location = Path(__file__).parent / "strats"
-    default_conf.update({'strategy': 'StrategyTestV2',
-                         'strategy_path': default_location})
+    default_conf.update({"strategy": "StrategyTestV2", "strategy_path": default_location})
     StrategyResolver.load_strategy(default_conf)
 
     log_has_re(r"DEPRECATED: .*use_sell_signal.*use_exit_signal.", caplog)
 
-    default_conf['trading_mode'] = 'futures'
-    with pytest.raises(OperationalException,
-                       match=r"DEPRECATED: .*use_sell_signal.*use_exit_signal."):
+    default_conf["trading_mode"] = "futures"
+    with pytest.raises(
+        OperationalException, match=r"DEPRECATED: .*use_sell_signal.*use_exit_signal."
+    ):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['trading_mode'] = 'spot'
+    default_conf["trading_mode"] = "spot"
 
     default_location = Path(__file__).parent / "strats/broken_strats"
-    default_conf.update({'strategy': 'TestStrategyNoImplements',
-                         'strategy_path': default_location})
-    with pytest.raises(OperationalException,
-                       match=r"`populate_entry_trend` or `populate_buy_trend`.*"):
+    default_conf.update({"strategy": "TestStrategyNoImplements", "strategy_path": default_location})
+    with pytest.raises(
+        OperationalException, match=r"`populate_entry_trend` or `populate_buy_trend`.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['strategy'] = 'TestStrategyNoImplementSell'
+    default_conf["strategy"] = "TestStrategyNoImplementSell"
 
-    with pytest.raises(OperationalException,
-                       match=r"`populate_exit_trend` or `populate_sell_trend`.*"):
+    with pytest.raises(
+        OperationalException, match=r"`populate_exit_trend` or `populate_sell_trend`.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
     # Futures mode is more strict ...
-    default_conf['trading_mode'] = 'futures'
+    default_conf["trading_mode"] = "futures"
 
-    with pytest.raises(OperationalException,
-                       match=r"`populate_exit_trend` must be implemented.*"):
+    with pytest.raises(OperationalException, match=r"`populate_exit_trend` must be implemented.*"):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['strategy'] = 'TestStrategyNoImplements'
-    with pytest.raises(OperationalException,
-                       match=r"`populate_entry_trend` must be implemented.*"):
+    default_conf["strategy"] = "TestStrategyNoImplements"
+    with pytest.raises(OperationalException, match=r"`populate_entry_trend` must be implemented.*"):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['strategy'] = 'TestStrategyImplementCustomSell'
-    with pytest.raises(OperationalException,
-                       match=r"Please migrate your implementation of `custom_sell`.*"):
+    default_conf["strategy"] = "TestStrategyImplementCustomSell"
+    with pytest.raises(
+        OperationalException, match=r"Please migrate your implementation of `custom_sell`.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['strategy'] = 'TestStrategyImplementBuyTimeout'
-    with pytest.raises(OperationalException,
-                       match=r"Please migrate your implementation of `check_buy_timeout`.*"):
+    default_conf["strategy"] = "TestStrategyImplementBuyTimeout"
+    with pytest.raises(
+        OperationalException, match=r"Please migrate your implementation of `check_buy_timeout`.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
-    default_conf['strategy'] = 'TestStrategyImplementSellTimeout'
-    with pytest.raises(OperationalException,
-                       match=r"Please migrate your implementation of `check_sell_timeout`.*"):
+    default_conf["strategy"] = "TestStrategyImplementSellTimeout"
+    with pytest.raises(
+        OperationalException, match=r"Please migrate your implementation of `check_sell_timeout`.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
 def test_call_deprecated_function(default_conf):
     default_location = Path(__file__).parent / "strats/broken_strats/"
-    del default_conf['timeframe']
-    default_conf.update({'strategy': 'TestStrategyLegacyV1',
-                         'strategy_path': default_location})
-    with pytest.raises(OperationalException,
-                       match=r"Strategy Interface v1 is no longer supported.*"):
+    del default_conf["timeframe"]
+    default_conf.update({"strategy": "TestStrategyLegacyV1", "strategy_path": default_location})
+    with pytest.raises(
+        OperationalException, match=r"Strategy Interface v1 is no longer supported.*"
+    ):
         StrategyResolver.load_strategy(default_conf)
 
 
 def test_strategy_interface_versioning(dataframe_1m, default_conf):
-    default_conf.update({'strategy': 'StrategyTestV2'})
+    default_conf.update({"strategy": "StrategyTestV2"})
     strategy = StrategyResolver.load_strategy(default_conf)
-    metadata = {'pair': 'ETH/BTC'}
+    metadata = {"pair": "ETH/BTC"}
 
     assert strategy.INTERFACE_VERSION == 2
 
     indicator_df = strategy.advise_indicators(dataframe_1m, metadata=metadata)
     assert isinstance(indicator_df, DataFrame)
-    assert 'adx' in indicator_df.columns
+    assert "adx" in indicator_df.columns
 
     enterdf = strategy.advise_entry(dataframe_1m, metadata=metadata)
     assert isinstance(enterdf, DataFrame)
 
-    assert 'buy' not in enterdf.columns
-    assert 'enter_long' in enterdf.columns
+    assert "buy" not in enterdf.columns
+    assert "enter_long" in enterdf.columns
 
     exitdf = strategy.advise_exit(dataframe_1m, metadata=metadata)
     assert isinstance(exitdf, DataFrame)
-    assert 'sell' not in exitdf
-    assert 'exit_long' in exitdf
+    assert "sell" not in exitdf
+    assert "exit_long" in exitdf
 
 
 def test_strategy_ft_load_params_from_file(mocker, default_conf):
-    default_conf.update({'strategy': 'StrategyTestV2'})
-    del default_conf['max_open_trades']
-    mocker.patch('freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file',
-                 return_value={
-                     'params': {
-                         'max_open_trades':  {
-                            'max_open_trades': -1
-                         }
-                         }
-                     })
+    default_conf.update({"strategy": "StrategyTestV2"})
+    del default_conf["max_open_trades"]
+    mocker.patch(
+        "freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file",
+        return_value={"params": {"max_open_trades": {"max_open_trades": -1}}},
+    )
     strategy = StrategyResolver.load_strategy(default_conf)
-    assert strategy.max_open_trades == float('inf')
-    assert strategy.config['max_open_trades'] == float('inf')
+    assert strategy.max_open_trades == float("inf")
+    assert strategy.config["max_open_trades"] == float("inf")
diff --git a/tests/test_arguments.py b/tests/test_arguments.py
index e14b09719..488363685 100644
--- a/tests/test_arguments.py
+++ b/tests/test_arguments.py
@@ -12,7 +12,7 @@ from tests.conftest import CURRENT_TEST_STRATEGY
 
 # Parse common command-line-arguments. Used for all tools
 def test_parse_args_none() -> None:
-    arguments = Arguments(['trade'])
+    arguments = Arguments(["trade"])
     assert isinstance(arguments, Arguments)
     x = arguments.get_parsed_arg()
     assert isinstance(x, dict)
@@ -20,265 +20,278 @@ def test_parse_args_none() -> None:
 
 
 def test_parse_args_defaults(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(side_effect=[False, True]))
-    args = Arguments(['trade']).get_parsed_arg()
-    assert args['config'] == ['config.json']
-    assert args['strategy_path'] is None
-    assert args['datadir'] is None
-    assert args['verbosity'] == 0
+    mocker.patch.object(Path, "is_file", MagicMock(side_effect=[False, True]))
+    args = Arguments(["trade"]).get_parsed_arg()
+    assert args["config"] == ["config.json"]
+    assert args["strategy_path"] is None
+    assert args["datadir"] is None
+    assert args["verbosity"] == 0
 
 
 def test_parse_args_default_userdatadir(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=True))
-    args = Arguments(['trade']).get_parsed_arg()
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=True))
+    args = Arguments(["trade"]).get_parsed_arg()
     # configuration defaults to user_data if that is available.
-    assert args['config'] == [str(Path('user_data/config.json'))]
-    assert args['strategy_path'] is None
-    assert args['datadir'] is None
-    assert args['verbosity'] == 0
+    assert args["config"] == [str(Path("user_data/config.json"))]
+    assert args["strategy_path"] is None
+    assert args["datadir"] is None
+    assert args["verbosity"] == 0
 
 
 def test_parse_args_userdatadir(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=True))
-    args = Arguments(['trade', '--user-data-dir', 'user_data']).get_parsed_arg()
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=True))
+    args = Arguments(["trade", "--user-data-dir", "user_data"]).get_parsed_arg()
     # configuration defaults to user_data if that is available.
-    assert args['config'] == [str(Path('user_data/config.json'))]
-    assert args['strategy_path'] is None
-    assert args['datadir'] is None
-    assert args['verbosity'] == 0
+    assert args["config"] == [str(Path("user_data/config.json"))]
+    assert args["strategy_path"] is None
+    assert args["datadir"] is None
+    assert args["verbosity"] == 0
 
 
 def test_parse_args_config() -> None:
-    args = Arguments(['trade', '-c', '/dev/null']).get_parsed_arg()
-    assert args['config'] == ['/dev/null']
+    args = Arguments(["trade", "-c", "/dev/null"]).get_parsed_arg()
+    assert args["config"] == ["/dev/null"]
 
-    args = Arguments(['trade', '--config', '/dev/null']).get_parsed_arg()
-    assert args['config'] == ['/dev/null']
+    args = Arguments(["trade", "--config", "/dev/null"]).get_parsed_arg()
+    assert args["config"] == ["/dev/null"]
 
-    args = Arguments(['trade', '--config', '/dev/null',
-                      '--config', '/dev/zero'],).get_parsed_arg()
-    assert args['config'] == ['/dev/null', '/dev/zero']
+    args = Arguments(
+        ["trade", "--config", "/dev/null", "--config", "/dev/zero"],
+    ).get_parsed_arg()
+    assert args["config"] == ["/dev/null", "/dev/zero"]
 
 
 def test_parse_args_db_url() -> None:
-    args = Arguments(['trade', '--db-url', 'sqlite:///test.sqlite']).get_parsed_arg()
-    assert args['db_url'] == 'sqlite:///test.sqlite'
+    args = Arguments(["trade", "--db-url", "sqlite:///test.sqlite"]).get_parsed_arg()
+    assert args["db_url"] == "sqlite:///test.sqlite"
 
 
 def test_parse_args_verbose() -> None:
-    args = Arguments(['trade', '-v']).get_parsed_arg()
-    assert args['verbosity'] == 1
+    args = Arguments(["trade", "-v"]).get_parsed_arg()
+    assert args["verbosity"] == 1
 
-    args = Arguments(['trade', '--verbose']).get_parsed_arg()
-    assert args['verbosity'] == 1
+    args = Arguments(["trade", "--verbose"]).get_parsed_arg()
+    assert args["verbosity"] == 1
 
 
 def test_common_scripts_options() -> None:
-    args = Arguments(['download-data', '-p', 'ETH/BTC', 'XRP/BTC']).get_parsed_arg()
+    args = Arguments(["download-data", "-p", "ETH/BTC", "XRP/BTC"]).get_parsed_arg()
 
-    assert args['pairs'] == ['ETH/BTC', 'XRP/BTC']
-    assert 'func' in args
+    assert args["pairs"] == ["ETH/BTC", "XRP/BTC"]
+    assert "func" in args
 
 
 def test_parse_args_version() -> None:
-    with pytest.raises(SystemExit, match=r'0'):
-        Arguments(['--version']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"0"):
+        Arguments(["--version"]).get_parsed_arg()
 
 
 def test_parse_args_invalid() -> None:
-    with pytest.raises(SystemExit, match=r'2'):
-        Arguments(['-c']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"2"):
+        Arguments(["-c"]).get_parsed_arg()
 
 
 def test_parse_args_strategy() -> None:
-    args = Arguments(['trade', '--strategy', 'SomeStrategy']).get_parsed_arg()
-    assert args['strategy'] == 'SomeStrategy'
+    args = Arguments(["trade", "--strategy", "SomeStrategy"]).get_parsed_arg()
+    assert args["strategy"] == "SomeStrategy"
 
 
 def test_parse_args_strategy_invalid() -> None:
-    with pytest.raises(SystemExit, match=r'2'):
-        Arguments(['--strategy']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"2"):
+        Arguments(["--strategy"]).get_parsed_arg()
 
 
 def test_parse_args_strategy_path() -> None:
-    args = Arguments(['trade', '--strategy-path', '/some/path']).get_parsed_arg()
-    assert args['strategy_path'] == '/some/path'
+    args = Arguments(["trade", "--strategy-path", "/some/path"]).get_parsed_arg()
+    assert args["strategy_path"] == "/some/path"
 
 
 def test_parse_args_strategy_path_invalid() -> None:
-    with pytest.raises(SystemExit, match=r'2'):
-        Arguments(['--strategy-path']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"2"):
+        Arguments(["--strategy-path"]).get_parsed_arg()
 
 
 def test_parse_args_backtesting_invalid() -> None:
-    with pytest.raises(SystemExit, match=r'2'):
-        Arguments(['backtesting --timeframe']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"2"):
+        Arguments(["backtesting --timeframe"]).get_parsed_arg()
 
-    with pytest.raises(SystemExit, match=r'2'):
-        Arguments(['backtesting --timeframe', 'abc']).get_parsed_arg()
+    with pytest.raises(SystemExit, match=r"2"):
+        Arguments(["backtesting --timeframe", "abc"]).get_parsed_arg()
 
 
 def test_parse_args_backtesting_custom() -> None:
     args = [
-        'backtesting',
-        '-c', 'test_conf.json',
-        '--timeframe', '1m',
-        '--strategy-list',
+        "backtesting",
+        "-c",
+        "test_conf.json",
+        "--timeframe",
+        "1m",
+        "--strategy-list",
         CURRENT_TEST_STRATEGY,
-        'SampleStrategy'
+        "SampleStrategy",
     ]
     call_args = Arguments(args).get_parsed_arg()
-    assert call_args['config'] == ['test_conf.json']
-    assert call_args['verbosity'] == 0
-    assert call_args['command'] == 'backtesting'
-    assert call_args['func'] is not None
-    assert call_args['timeframe'] == '1m'
-    assert isinstance(call_args['strategy_list'], list)
-    assert len(call_args['strategy_list']) == 2
+    assert call_args["config"] == ["test_conf.json"]
+    assert call_args["verbosity"] == 0
+    assert call_args["command"] == "backtesting"
+    assert call_args["func"] is not None
+    assert call_args["timeframe"] == "1m"
+    assert isinstance(call_args["strategy_list"], list)
+    assert len(call_args["strategy_list"]) == 2
 
 
 def test_parse_args_hyperopt_custom() -> None:
-    args = [
-        'hyperopt',
-        '-c', 'test_conf.json',
-        '--epochs', '20',
-        '--spaces', 'buy'
-    ]
+    args = ["hyperopt", "-c", "test_conf.json", "--epochs", "20", "--spaces", "buy"]
     call_args = Arguments(args).get_parsed_arg()
-    assert call_args['config'] == ['test_conf.json']
-    assert call_args['epochs'] == 20
-    assert call_args['verbosity'] == 0
-    assert call_args['command'] == 'hyperopt'
-    assert call_args['spaces'] == ['buy']
-    assert call_args['func'] is not None
-    assert callable(call_args['func'])
+    assert call_args["config"] == ["test_conf.json"]
+    assert call_args["epochs"] == 20
+    assert call_args["verbosity"] == 0
+    assert call_args["command"] == "hyperopt"
+    assert call_args["spaces"] == ["buy"]
+    assert call_args["func"] is not None
+    assert callable(call_args["func"])
 
 
 def test_download_data_options() -> None:
     args = [
-        'download-data',
-        '--datadir', 'datadir/directory',
-        '--pairs-file', 'file_with_pairs',
-        '--days', '30',
-        '--exchange', 'binance'
+        "download-data",
+        "--datadir",
+        "datadir/directory",
+        "--pairs-file",
+        "file_with_pairs",
+        "--days",
+        "30",
+        "--exchange",
+        "binance",
     ]
     pargs = Arguments(args).get_parsed_arg()
 
-    assert pargs['pairs_file'] == 'file_with_pairs'
-    assert pargs['datadir'] == 'datadir/directory'
-    assert pargs['days'] == 30
-    assert pargs['exchange'] == 'binance'
+    assert pargs["pairs_file"] == "file_with_pairs"
+    assert pargs["datadir"] == "datadir/directory"
+    assert pargs["days"] == 30
+    assert pargs["exchange"] == "binance"
 
 
 def test_plot_dataframe_options() -> None:
     args = [
-        'plot-dataframe',
-        '-c', 'tests/testdata/testconfigs/main_test_config.json',
-        '--indicators1', 'sma10', 'sma100',
-        '--indicators2', 'macd', 'fastd', 'fastk',
-        '--plot-limit', '30',
-        '-p', 'UNITTEST/BTC',
+        "plot-dataframe",
+        "-c",
+        "tests/testdata/testconfigs/main_test_config.json",
+        "--indicators1",
+        "sma10",
+        "sma100",
+        "--indicators2",
+        "macd",
+        "fastd",
+        "fastk",
+        "--plot-limit",
+        "30",
+        "-p",
+        "UNITTEST/BTC",
     ]
     pargs = Arguments(args).get_parsed_arg()
 
-    assert pargs['indicators1'] == ['sma10', 'sma100']
-    assert pargs['indicators2'] == ['macd', 'fastd', 'fastk']
-    assert pargs['plot_limit'] == 30
-    assert pargs['pairs'] == ['UNITTEST/BTC']
+    assert pargs["indicators1"] == ["sma10", "sma100"]
+    assert pargs["indicators2"] == ["macd", "fastd", "fastk"]
+    assert pargs["plot_limit"] == 30
+    assert pargs["pairs"] == ["UNITTEST/BTC"]
 
 
-@pytest.mark.parametrize('auto_open_arg', [True, False])
+@pytest.mark.parametrize("auto_open_arg", [True, False])
 def test_plot_profit_options(auto_open_arg: bool) -> None:
     args = [
-        'plot-profit',
-        '-p', 'UNITTEST/BTC',
-        '--trade-source', 'DB',
-        '--db-url', 'sqlite:///whatever.sqlite',
+        "plot-profit",
+        "-p",
+        "UNITTEST/BTC",
+        "--trade-source",
+        "DB",
+        "--db-url",
+        "sqlite:///whatever.sqlite",
     ]
     if auto_open_arg:
-        args.append('--auto-open')
+        args.append("--auto-open")
     pargs = Arguments(args).get_parsed_arg()
 
-    assert pargs['trade_source'] == 'DB'
-    assert pargs['pairs'] == ['UNITTEST/BTC']
-    assert pargs['db_url'] == 'sqlite:///whatever.sqlite'
-    assert pargs['plot_auto_open'] == auto_open_arg
+    assert pargs["trade_source"] == "DB"
+    assert pargs["pairs"] == ["UNITTEST/BTC"]
+    assert pargs["db_url"] == "sqlite:///whatever.sqlite"
+    assert pargs["plot_auto_open"] == auto_open_arg
 
 
 def test_config_notallowed(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=False))
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=False))
     args = [
-        'create-userdir',
+        "create-userdir",
     ]
     pargs = Arguments(args).get_parsed_arg()
 
-    assert 'config' not in pargs
+    assert "config" not in pargs
 
     # When file exists:
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=True))
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=True))
     args = [
-        'create-userdir',
+        "create-userdir",
     ]
     pargs = Arguments(args).get_parsed_arg()
     # config is not added even if it exists, since create-userdir is in the notallowed list
-    assert 'config' not in pargs
+    assert "config" not in pargs
 
 
 def test_config_notrequired(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(return_value=False))
+    mocker.patch.object(Path, "is_file", MagicMock(return_value=False))
     args = [
-        'download-data',
+        "download-data",
     ]
     pargs = Arguments(args).get_parsed_arg()
 
-    assert pargs['config'] is None
+    assert pargs["config"] is None
 
     # When file exists:
-    mocker.patch.object(Path, 'is_file', MagicMock(side_effect=[False, True]))
+    mocker.patch.object(Path, "is_file", MagicMock(side_effect=[False, True]))
     args = [
-        'download-data',
+        "download-data",
     ]
     pargs = Arguments(args).get_parsed_arg()
     # config is added if it exists
-    assert pargs['config'] == ['config.json']
+    assert pargs["config"] == ["config.json"]
 
 
 def test_check_int_positive() -> None:
-    assert check_int_positive('3') == 3
-    assert check_int_positive('1') == 1
-    assert check_int_positive('100') == 100
+    assert check_int_positive("3") == 3
+    assert check_int_positive("1") == 1
+    assert check_int_positive("100") == 100
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_positive('-2')
+        check_int_positive("-2")
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_positive('0')
+        check_int_positive("0")
 
     with pytest.raises(argparse.ArgumentTypeError):
         check_int_positive(0)
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_positive('3.5')
+        check_int_positive("3.5")
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_positive('DeadBeef')
+        check_int_positive("DeadBeef")
 
 
 def test_check_int_nonzero() -> None:
-    assert check_int_nonzero('3') == 3
-    assert check_int_nonzero('1') == 1
-    assert check_int_nonzero('100') == 100
+    assert check_int_nonzero("3") == 3
+    assert check_int_nonzero("1") == 1
+    assert check_int_nonzero("100") == 100
 
-    assert check_int_nonzero('-2') == -2
+    assert check_int_nonzero("-2") == -2
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_nonzero('0')
+        check_int_nonzero("0")
 
     with pytest.raises(argparse.ArgumentTypeError):
         check_int_nonzero(0)
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_nonzero('3.5')
+        check_int_nonzero("3.5")
 
     with pytest.raises(argparse.ArgumentTypeError):
-        check_int_nonzero('DeadBeef')
+        check_int_nonzero("DeadBeef")
diff --git a/tests/test_configuration.py b/tests/test_configuration.py
index 124258c04..d0e0aa2c6 100644
--- a/tests/test_configuration.py
+++ b/tests/test_configuration.py
@@ -12,18 +12,28 @@ from freqtrade.commands import Arguments
 from freqtrade.configuration import Configuration, validate_config_consistency
 from freqtrade.configuration.config_secrets import sanitize_config
 from freqtrade.configuration.config_validation import validate_config_schema
-from freqtrade.configuration.deprecated_settings import (check_conflicting_settings,
-                                                         process_deprecated_setting,
-                                                         process_removed_setting,
-                                                         process_temporary_deprecated_settings)
+from freqtrade.configuration.deprecated_settings import (
+    check_conflicting_settings,
+    process_deprecated_setting,
+    process_removed_setting,
+    process_temporary_deprecated_settings,
+)
 from freqtrade.configuration.environment_vars import _flat_vars_to_nested_dict
-from freqtrade.configuration.load_config import (load_config_file, load_file, load_from_files,
-                                                 log_config_error_range)
+from freqtrade.configuration.load_config import (
+    load_config_file,
+    load_file,
+    load_from_files,
+    log_config_error_range,
+)
 from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX
 from freqtrade.enums import RunMode
 from freqtrade.exceptions import OperationalException
-from tests.conftest import (CURRENT_TEST_STRATEGY, log_has, log_has_re,
-                            patched_configuration_load_config_file)
+from tests.conftest import (
+    CURRENT_TEST_STRATEGY,
+    log_has,
+    log_has_re,
+    patched_configuration_load_config_file,
+)
 
 
 @pytest.fixture(scope="function")
@@ -35,85 +45,86 @@ def all_conf():
 
 def test_load_config_missing_attributes(default_conf) -> None:
     conf = deepcopy(default_conf)
-    conf.pop('exchange')
+    conf.pop("exchange")
 
     with pytest.raises(ValidationError, match=r".*'exchange' is a required property.*"):
         validate_config_schema(conf)
 
     conf = deepcopy(default_conf)
-    conf.pop('stake_currency')
-    conf['runmode'] = RunMode.DRY_RUN
+    conf.pop("stake_currency")
+    conf["runmode"] = RunMode.DRY_RUN
     with pytest.raises(ValidationError, match=r".*'stake_currency' is a required property.*"):
         validate_config_schema(conf)
 
 
 def test_load_config_incorrect_stake_amount(default_conf) -> None:
-    default_conf['stake_amount'] = 'fake'
+    default_conf["stake_amount"] = "fake"
 
     with pytest.raises(ValidationError, match=r".*'fake' does not match 'unlimited'.*"):
         validate_config_schema(default_conf)
 
 
 def test_load_config_file(default_conf, mocker, caplog) -> None:
-    del default_conf['user_data_dir']
-    default_conf['datadir'] = str(default_conf['datadir'])
-    file_mock = mocker.patch('freqtrade.configuration.load_config.Path.open', mocker.mock_open(
-        read_data=json.dumps(default_conf)
-    ))
+    del default_conf["user_data_dir"]
+    default_conf["datadir"] = str(default_conf["datadir"])
+    file_mock = mocker.patch(
+        "freqtrade.configuration.load_config.Path.open",
+        mocker.mock_open(read_data=json.dumps(default_conf)),
+    )
 
-    validated_conf = load_config_file('somefile')
+    validated_conf = load_config_file("somefile")
     assert file_mock.call_count == 1
     assert validated_conf.items() >= default_conf.items()
 
 
 def test_load_config_file_error(default_conf, mocker, caplog) -> None:
-    del default_conf['user_data_dir']
-    default_conf['datadir'] = str(default_conf['datadir'])
-    filedata = json.dumps(default_conf).replace(
-        '"stake_amount": 0.001,', '"stake_amount": .001,')
-    mocker.patch('freqtrade.configuration.load_config.Path.open',
-                 mocker.mock_open(read_data=filedata))
+    del default_conf["user_data_dir"]
+    default_conf["datadir"] = str(default_conf["datadir"])
+    filedata = json.dumps(default_conf).replace('"stake_amount": 0.001,', '"stake_amount": .001,')
+    mocker.patch(
+        "freqtrade.configuration.load_config.Path.open", mocker.mock_open(read_data=filedata)
+    )
     mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
 
     with pytest.raises(OperationalException, match=r".*Please verify the following segment.*"):
-        load_config_file('somefile')
+        load_config_file("somefile")
 
 
 def test_load_config_file_error_range(default_conf, mocker, caplog) -> None:
-    del default_conf['user_data_dir']
-    default_conf['datadir'] = str(default_conf['datadir'])
-    filedata = json.dumps(default_conf).replace(
-        '"stake_amount": 0.001,', '"stake_amount": .001,')
+    del default_conf["user_data_dir"]
+    default_conf["datadir"] = str(default_conf["datadir"])
+    filedata = json.dumps(default_conf).replace('"stake_amount": 0.001,', '"stake_amount": .001,')
     mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
 
-    x = log_config_error_range('somefile', 'Parse error at offset 64: Invalid value.')
+    x = log_config_error_range("somefile", "Parse error at offset 64: Invalid value.")
     assert isinstance(x, str)
-    assert (x == '{"max_open_trades": 1, "stake_currency": "BTC", '
-            '"stake_amount": .001, "fiat_display_currency": "USD", '
-            '"timeframe": "5m", "dry_run": true, "cance')
+    assert (
+        x == '{"max_open_trades": 1, "stake_currency": "BTC", '
+        '"stake_amount": .001, "fiat_display_currency": "USD", '
+        '"timeframe": "5m", "dry_run": true, "cance'
+    )
 
     filedata = json.dumps(default_conf, indent=2).replace(
-        '"stake_amount": 0.001,', '"stake_amount": .001,')
+        '"stake_amount": 0.001,', '"stake_amount": .001,'
+    )
     mocker.patch.object(Path, "read_text", MagicMock(return_value=filedata))
 
-    x = log_config_error_range('somefile', 'Parse error at offset 4: Invalid value.')
+    x = log_config_error_range("somefile", "Parse error at offset 4: Invalid value.")
     assert isinstance(x, str)
-    assert (x == '  "max_open_trades": 1,\n  "stake_currency": "BTC",\n'
-            '  "stake_amount": .001,')
+    assert x == '  "max_open_trades": 1,\n  "stake_currency": "BTC",\n' '  "stake_amount": .001,'
 
-    x = log_config_error_range('-', '')
-    assert x == ''
+    x = log_config_error_range("-", "")
+    assert x == ""
 
 
 def test_load_file_error(tmp_path):
-    testpath = tmp_path / 'config.json'
+    testpath = tmp_path / "config.json"
     with pytest.raises(OperationalException, match=r"File .* not found!"):
         load_file(testpath)
 
 
 def test__args_to_config(caplog):
-
-    arg_list = ['trade', '--strategy-path', 'TestTest']
+    arg_list = ["trade", "--strategy-path", "TestTest"]
     args = Arguments(arg_list).get_parsed_arg()
     configuration = Configuration(args)
     config = {}
@@ -123,90 +134,94 @@ def test__args_to_config(caplog):
         configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef")
         assert len(w) == 0
         assert log_has("DeadBeef", caplog)
-        assert config['strategy_path'] == "TestTest"
+        assert config["strategy_path"] == "TestTest"
 
     configuration = Configuration(args)
     config = {}
     with warnings.catch_warnings(record=True) as w:
         warnings.simplefilter("always")
         # Deprecation warnings!
-        configuration._args_to_config(config, argname="strategy_path", logstring="DeadBeef",
-                                      deprecated_msg="Going away soon!")
+        configuration._args_to_config(
+            config, argname="strategy_path", logstring="DeadBeef", deprecated_msg="Going away soon!"
+        )
         assert len(w) == 1
         assert issubclass(w[-1].category, DeprecationWarning)
         assert "DEPRECATED: Going away soon!" in str(w[-1].message)
         assert log_has("DeadBeef", caplog)
-        assert config['strategy_path'] == "TestTest"
+        assert config["strategy_path"] == "TestTest"
 
 
 def test_load_config_max_open_trades_zero(default_conf, mocker, caplog) -> None:
-    default_conf['max_open_trades'] = 0
+    default_conf["max_open_trades"] = 0
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = Arguments(['trade']).get_parsed_arg()
+    args = Arguments(["trade"]).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf['max_open_trades'] == 0
-    assert 'internals' in validated_conf
+    assert validated_conf["max_open_trades"] == 0
+    assert "internals" in validated_conf
 
 
 def test_load_config_combine_dicts(default_conf, mocker, caplog) -> None:
     conf1 = deepcopy(default_conf)
     conf2 = deepcopy(default_conf)
-    del conf1['exchange']['key']
-    del conf1['exchange']['secret']
-    del conf2['exchange']['name']
-    conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
+    del conf1["exchange"]["key"]
+    del conf1["exchange"]["secret"]
+    del conf2["exchange"]["name"]
+    conf2["exchange"]["pair_whitelist"] += ["NANO/BTC"]
 
     config_files = [conf1, conf2]
 
     configsmock = MagicMock(side_effect=config_files)
-    mocker.patch(
-        'freqtrade.configuration.load_config.load_config_file',
-        configsmock
-    )
+    mocker.patch("freqtrade.configuration.load_config.load_config_file", configsmock)
 
-    arg_list = ['trade', '-c', 'test_conf.json', '--config', 'test2_conf.json', ]
+    arg_list = [
+        "trade",
+        "-c",
+        "test_conf.json",
+        "--config",
+        "test2_conf.json",
+    ]
     args = Arguments(arg_list).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    exchange_conf = default_conf['exchange']
-    assert validated_conf['exchange']['name'] == exchange_conf['name']
-    assert validated_conf['exchange']['key'] == exchange_conf['key']
-    assert validated_conf['exchange']['secret'] == exchange_conf['secret']
-    assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
-    assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
+    exchange_conf = default_conf["exchange"]
+    assert validated_conf["exchange"]["name"] == exchange_conf["name"]
+    assert validated_conf["exchange"]["key"] == exchange_conf["key"]
+    assert validated_conf["exchange"]["secret"] == exchange_conf["secret"]
+    assert validated_conf["exchange"]["pair_whitelist"] != conf1["exchange"]["pair_whitelist"]
+    assert validated_conf["exchange"]["pair_whitelist"] == conf2["exchange"]["pair_whitelist"]
 
-    assert 'internals' in validated_conf
+    assert "internals" in validated_conf
 
 
 def test_from_config(default_conf, mocker, caplog) -> None:
     conf1 = deepcopy(default_conf)
     conf2 = deepcopy(default_conf)
-    del conf1['exchange']['key']
-    del conf1['exchange']['secret']
-    del conf2['exchange']['name']
-    conf2['exchange']['pair_whitelist'] += ['NANO/BTC']
-    conf2['fiat_display_currency'] = "EUR"
+    del conf1["exchange"]["key"]
+    del conf1["exchange"]["secret"]
+    del conf2["exchange"]["name"]
+    conf2["exchange"]["pair_whitelist"] += ["NANO/BTC"]
+    conf2["fiat_display_currency"] = "EUR"
     config_files = [conf1, conf2]
-    mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
+    mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x)
 
     configsmock = MagicMock(side_effect=config_files)
-    mocker.patch('freqtrade.configuration.load_config.load_config_file', configsmock)
+    mocker.patch("freqtrade.configuration.load_config.load_config_file", configsmock)
 
-    validated_conf = Configuration.from_files(['test_conf.json', 'test2_conf.json'])
+    validated_conf = Configuration.from_files(["test_conf.json", "test2_conf.json"])
 
-    exchange_conf = default_conf['exchange']
-    assert validated_conf['exchange']['name'] == exchange_conf['name']
-    assert validated_conf['exchange']['key'] == exchange_conf['key']
-    assert validated_conf['exchange']['secret'] == exchange_conf['secret']
-    assert validated_conf['exchange']['pair_whitelist'] != conf1['exchange']['pair_whitelist']
-    assert validated_conf['exchange']['pair_whitelist'] == conf2['exchange']['pair_whitelist']
-    assert validated_conf['fiat_display_currency'] == "EUR"
-    assert 'internals' in validated_conf
-    assert isinstance(validated_conf['user_data_dir'], Path)
+    exchange_conf = default_conf["exchange"]
+    assert validated_conf["exchange"]["name"] == exchange_conf["name"]
+    assert validated_conf["exchange"]["key"] == exchange_conf["key"]
+    assert validated_conf["exchange"]["secret"] == exchange_conf["secret"]
+    assert validated_conf["exchange"]["pair_whitelist"] != conf1["exchange"]["pair_whitelist"]
+    assert validated_conf["exchange"]["pair_whitelist"] == conf2["exchange"]["pair_whitelist"]
+    assert validated_conf["fiat_display_currency"] == "EUR"
+    assert "internals" in validated_conf
+    assert isinstance(validated_conf["user_data_dir"], Path)
 
 
 def test_from_recursive_files(testdatadir) -> None:
@@ -216,20 +231,20 @@ def test_from_recursive_files(testdatadir) -> None:
 
     assert conf
     # Exchange comes from "the first config"
-    assert conf['exchange']
+    assert conf["exchange"]
     # Pricing comes from the 2nd config
-    assert conf['entry_pricing']
-    assert conf['entry_pricing']['price_side'] == "same"
-    assert conf['exit_pricing']
+    assert conf["entry_pricing"]
+    assert conf["entry_pricing"]["price_side"] == "same"
+    assert conf["exit_pricing"]
     # The other key comes from pricing2, which is imported by pricing.json.
     # pricing.json is a level higher, therefore wins.
-    assert conf['exit_pricing']['price_side'] == "same"
+    assert conf["exit_pricing"]["price_side"] == "same"
 
-    assert len(conf['config_files']) == 4
-    assert 'testconfig.json' in conf['config_files'][0]
-    assert 'test_pricing_conf.json' in conf['config_files'][1]
-    assert 'test_base_config.json' in conf['config_files'][2]
-    assert 'test_pricing2_conf.json' in conf['config_files'][3]
+    assert len(conf["config_files"]) == 4
+    assert "testconfig.json" in conf["config_files"][0]
+    assert "test_pricing_conf.json" in conf["config_files"][1]
+    assert "test_base_config.json" in conf["config_files"][2]
+    assert "test_pricing2_conf.json" in conf["config_files"][3]
 
     files = testdatadir / "testconfigs/recursive.json"
     with pytest.raises(OperationalException, match="Config loop detected."):
@@ -239,74 +254,77 @@ def test_from_recursive_files(testdatadir) -> None:
 def test_print_config(default_conf, mocker, caplog) -> None:
     conf1 = deepcopy(default_conf)
     # Delete non-json elements from default_conf
-    del conf1['user_data_dir']
-    conf1['datadir'] = str(conf1['datadir'])
+    del conf1["user_data_dir"]
+    conf1["datadir"] = str(conf1["datadir"])
     config_files = [conf1]
 
     configsmock = MagicMock(side_effect=config_files)
-    mocker.patch('freqtrade.configuration.configuration.create_datadir', lambda c, x: x)
-    mocker.patch('freqtrade.configuration.configuration.load_from_files', configsmock)
+    mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x)
+    mocker.patch("freqtrade.configuration.configuration.load_from_files", configsmock)
 
-    validated_conf = Configuration.from_files(['test_conf.json'])
+    validated_conf = Configuration.from_files(["test_conf.json"])
 
-    assert isinstance(validated_conf['user_data_dir'], Path)
+    assert isinstance(validated_conf["user_data_dir"], Path)
     assert "user_data_dir" in validated_conf
     assert "original_config" in validated_conf
-    assert isinstance(json.dumps(validated_conf['original_config']), str)
+    assert isinstance(json.dumps(validated_conf["original_config"]), str)
 
 
 def test_load_config_max_open_trades_minus_one(default_conf, mocker, caplog) -> None:
-    default_conf['max_open_trades'] = -1
+    default_conf["max_open_trades"] = -1
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = Arguments(['trade']).get_parsed_arg()
+    args = Arguments(["trade"]).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf['max_open_trades'] > 999999999
-    assert validated_conf['max_open_trades'] == float('inf')
+    assert validated_conf["max_open_trades"] > 999999999
+    assert validated_conf["max_open_trades"] == float("inf")
     assert "runmode" in validated_conf
-    assert validated_conf['runmode'] == RunMode.DRY_RUN
+    assert validated_conf["runmode"] == RunMode.DRY_RUN
 
 
 def test_load_config_file_exception(mocker) -> None:
     mocker.patch(
-        'freqtrade.configuration.configuration.Path.open',
-        MagicMock(side_effect=FileNotFoundError('File not found'))
+        "freqtrade.configuration.configuration.Path.open",
+        MagicMock(side_effect=FileNotFoundError("File not found")),
     )
 
     with pytest.raises(OperationalException, match=r'.*Config file "somefile" not found!*'):
-        load_config_file('somefile')
+        load_config_file("somefile")
 
 
 def test_load_config(default_conf, mocker) -> None:
-    del default_conf['strategy_path']
+    del default_conf["strategy_path"]
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = Arguments(['trade']).get_parsed_arg()
+    args = Arguments(["trade"]).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf.get('strategy_path') is None
-    assert 'edge' not in validated_conf
+    assert validated_conf.get("strategy_path") is None
+    assert "edge" not in validated_conf
 
 
 def test_load_config_with_params(default_conf, mocker) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--strategy-path', '/some/path',
-        '--db-url', 'sqlite:///someurl',
+        "trade",
+        "--strategy",
+        "TestStrategy",
+        "--strategy-path",
+        "/some/path",
+        "--db-url",
+        "sqlite:///someurl",
     ]
     args = Arguments(arglist).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf.get('strategy') == 'TestStrategy'
-    assert validated_conf.get('strategy_path') == '/some/path'
-    assert validated_conf.get('db_url') == 'sqlite:///someurl'
+    assert validated_conf.get("strategy") == "TestStrategy"
+    assert validated_conf.get("strategy_path") == "/some/path"
+    assert validated_conf.get("db_url") == "sqlite:///someurl"
 
     # Test conf provided db_url prod
     conf = default_conf.copy()
@@ -314,16 +332,12 @@ def test_load_config_with_params(default_conf, mocker) -> None:
     conf["db_url"] = "sqlite:///path/to/db.sqlite"
     patched_configuration_load_config_file(mocker, conf)
 
-    arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--strategy-path', '/some/path'
-    ]
+    arglist = ["trade", "--strategy", "TestStrategy", "--strategy-path", "/some/path"]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
-    assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
+    assert validated_conf.get("db_url") == "sqlite:///path/to/db.sqlite"
 
     # Test conf provided db_url dry_run
     conf = default_conf.copy()
@@ -331,16 +345,12 @@ def test_load_config_with_params(default_conf, mocker) -> None:
     conf["db_url"] = "sqlite:///path/to/db.sqlite"
     patched_configuration_load_config_file(mocker, conf)
 
-    arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--strategy-path', '/some/path'
-    ]
+    arglist = ["trade", "--strategy", "TestStrategy", "--strategy-path", "/some/path"]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
-    assert validated_conf.get('db_url') == "sqlite:///path/to/db.sqlite"
+    assert validated_conf.get("db_url") == "sqlite:///path/to/db.sqlite"
 
     # Test args provided db_url prod
     conf = default_conf.copy()
@@ -348,18 +358,14 @@ def test_load_config_with_params(default_conf, mocker) -> None:
     del conf["db_url"]
     patched_configuration_load_config_file(mocker, conf)
 
-    arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--strategy-path', '/some/path'
-    ]
+    arglist = ["trade", "--strategy", "TestStrategy", "--strategy-path", "/some/path"]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
-    assert validated_conf.get('db_url') == DEFAULT_DB_PROD_URL
+    assert validated_conf.get("db_url") == DEFAULT_DB_PROD_URL
     assert "runmode" in validated_conf
-    assert validated_conf['runmode'] == RunMode.LIVE
+    assert validated_conf["runmode"] == RunMode.LIVE
 
     # Test args provided db_url dry_run
     conf = default_conf.copy()
@@ -367,58 +373,60 @@ def test_load_config_with_params(default_conf, mocker) -> None:
     conf["db_url"] = DEFAULT_DB_PROD_URL
     patched_configuration_load_config_file(mocker, conf)
 
-    arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--strategy-path', '/some/path'
-    ]
+    arglist = ["trade", "--strategy", "TestStrategy", "--strategy-path", "/some/path"]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
-    assert validated_conf.get('db_url') == DEFAULT_DB_DRYRUN_URL
+    assert validated_conf.get("db_url") == DEFAULT_DB_DRYRUN_URL
 
 
-@pytest.mark.parametrize("config_value,expected,arglist", [
-    (True, True, ['trade', '--dry-run']),  # Leave config untouched
-    (False, True, ['trade', '--dry-run']),  # Override config untouched
-    (False, False, ['trade']),  # Leave config untouched
-    (True, True, ['trade']),  # Leave config untouched
-])
+@pytest.mark.parametrize(
+    "config_value,expected,arglist",
+    [
+        (True, True, ["trade", "--dry-run"]),  # Leave config untouched
+        (False, True, ["trade", "--dry-run"]),  # Override config untouched
+        (False, False, ["trade"]),  # Leave config untouched
+        (True, True, ["trade"]),  # Leave config untouched
+    ],
+)
 def test_load_dry_run(default_conf, mocker, config_value, expected, arglist) -> None:
-
-    default_conf['dry_run'] = config_value
+    default_conf["dry_run"] = config_value
     patched_configuration_load_config_file(mocker, default_conf)
 
     configuration = Configuration(Arguments(arglist).get_parsed_arg())
     validated_conf = configuration.load_config()
 
-    assert validated_conf['dry_run'] is expected
-    assert validated_conf['runmode'] == (RunMode.DRY_RUN if expected else RunMode.LIVE)
+    assert validated_conf["dry_run"] is expected
+    assert validated_conf["runmode"] == (RunMode.DRY_RUN if expected else RunMode.LIVE)
 
 
 def test_load_custom_strategy(default_conf, mocker) -> None:
-    default_conf.update({
-        'strategy': 'CustomStrategy',
-        'strategy_path': '/tmp/strategies',
-    })
+    default_conf.update(
+        {
+            "strategy": "CustomStrategy",
+            "strategy_path": "/tmp/strategies",
+        }
+    )
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = Arguments(['trade']).get_parsed_arg()
+    args = Arguments(["trade"]).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf.get('strategy') == 'CustomStrategy'
-    assert validated_conf.get('strategy_path') == '/tmp/strategies'
+    assert validated_conf.get("strategy") == "CustomStrategy"
+    assert validated_conf.get("strategy_path") == "/tmp/strategies"
 
 
 def test_show_info(default_conf, mocker, caplog) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'trade',
-        '--strategy', 'TestStrategy',
-        '--db-url', 'sqlite:///tmp/testdb',
+        "trade",
+        "--strategy",
+        "TestStrategy",
+        "--db-url",
+        "sqlite:///tmp/testdb",
     ]
     args = Arguments(arglist).get_parsed_arg()
 
@@ -426,95 +434,101 @@ def test_show_info(default_conf, mocker, caplog) -> None:
     configuration.get_config()
 
     assert log_has('Using DB: "sqlite:///tmp/testdb"', caplog)
-    assert log_has('Dry run is enabled', caplog)
+    assert log_has("Dry run is enabled", caplog)
 
 
 def test_setup_configuration_without_arguments(mocker, default_conf, caplog) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'backtesting',
-        '--config', 'config.json',
-        '--strategy', CURRENT_TEST_STRATEGY,
+        "backtesting",
+        "--config",
+        "config.json",
+        "--strategy",
+        CURRENT_TEST_STRATEGY,
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     config = configuration.get_config()
-    assert 'max_open_trades' in config
-    assert 'stake_currency' in config
-    assert 'stake_amount' in config
-    assert 'exchange' in config
-    assert 'pair_whitelist' in config['exchange']
-    assert 'datadir' in config
-    assert 'user_data_dir' in config
-    assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
-    assert 'timeframe' in config
-    assert not log_has('Parameter -i/--timeframe detected ...', caplog)
+    assert "max_open_trades" in config
+    assert "stake_currency" in config
+    assert "stake_amount" in config
+    assert "exchange" in config
+    assert "pair_whitelist" in config["exchange"]
+    assert "datadir" in config
+    assert "user_data_dir" in config
+    assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog)
+    assert "timeframe" in config
+    assert not log_has("Parameter -i/--timeframe detected ...", caplog)
 
-    assert 'position_stacking' not in config
-    assert not log_has('Parameter --enable-position-stacking detected ...', caplog)
+    assert "position_stacking" not in config
+    assert not log_has("Parameter --enable-position-stacking detected ...", caplog)
 
-    assert 'timerange' not in config
+    assert "timerange" not in config
 
 
 def test_setup_configuration_with_arguments(mocker, default_conf, caplog) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
+    mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x)
     mocker.patch(
-        'freqtrade.configuration.configuration.create_datadir',
-        lambda c, x: x
-    )
-    mocker.patch(
-        'freqtrade.configuration.configuration.create_userdata_dir',
-        lambda x, *args, **kwargs: Path(x)
+        "freqtrade.configuration.configuration.create_userdata_dir",
+        lambda x, *args, **kwargs: Path(x),
     )
     arglist = [
-        'backtesting',
-        '--config', 'config.json',
-        '--strategy', CURRENT_TEST_STRATEGY,
-        '--datadir', '/foo/bar',
-        '--userdir', "/tmp/freqtrade",
-        '--timeframe', '1m',
-        '--enable-position-stacking',
-        '--disable-max-market-positions',
-        '--timerange', ':100',
-        '--export', 'trades',
-        '--stake-amount', 'unlimited'
+        "backtesting",
+        "--config",
+        "config.json",
+        "--strategy",
+        CURRENT_TEST_STRATEGY,
+        "--datadir",
+        "/foo/bar",
+        "--userdir",
+        "/tmp/freqtrade",
+        "--timeframe",
+        "1m",
+        "--enable-position-stacking",
+        "--disable-max-market-positions",
+        "--timerange",
+        ":100",
+        "--export",
+        "trades",
+        "--stake-amount",
+        "unlimited",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     config = configuration.get_config()
-    assert 'max_open_trades' in config
-    assert 'stake_currency' in config
-    assert 'stake_amount' in config
-    assert 'exchange' in config
-    assert 'pair_whitelist' in config['exchange']
-    assert 'datadir' in config
-    assert log_has('Using data directory: {} ...'.format("/foo/bar"), caplog)
-    assert log_has('Using user-data directory: {} ...'.format(Path("/tmp/freqtrade")), caplog)
-    assert 'user_data_dir' in config
+    assert "max_open_trades" in config
+    assert "stake_currency" in config
+    assert "stake_amount" in config
+    assert "exchange" in config
+    assert "pair_whitelist" in config["exchange"]
+    assert "datadir" in config
+    assert log_has("Using data directory: {} ...".format("/foo/bar"), caplog)
+    assert log_has("Using user-data directory: {} ...".format(Path("/tmp/freqtrade")), caplog)
+    assert "user_data_dir" in config
 
-    assert 'timeframe' in config
-    assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
-                   caplog)
+    assert "timeframe" in config
+    assert log_has("Parameter -i/--timeframe detected ... Using timeframe: 1m ...", caplog)
 
-    assert 'position_stacking' in config
-    assert log_has('Parameter --enable-position-stacking detected ...', caplog)
+    assert "position_stacking" in config
+    assert log_has("Parameter --enable-position-stacking detected ...", caplog)
 
-    assert 'use_max_market_positions' in config
-    assert log_has('Parameter --disable-max-market-positions detected ...', caplog)
-    assert log_has('max_open_trades set to unlimited ...', caplog)
+    assert "use_max_market_positions" in config
+    assert log_has("Parameter --disable-max-market-positions detected ...", caplog)
+    assert log_has("max_open_trades set to unlimited ...", caplog)
 
-    assert 'timerange' in config
-    assert log_has('Parameter --timerange detected: {} ...'.format(config['timerange']), caplog)
+    assert "timerange" in config
+    assert log_has("Parameter --timerange detected: {} ...".format(config["timerange"]), caplog)
 
-    assert 'export' in config
-    assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
-    assert 'stake_amount' in config
-    assert config['stake_amount'] == 'unlimited'
+    assert "export" in config
+    assert log_has("Parameter --export detected: {} ...".format(config["export"]), caplog)
+    assert "stake_amount" in config
+    assert config["stake_amount"] == "unlimited"
 
 
 def test_setup_configuration_with_stratlist(mocker, default_conf, caplog) -> None:
@@ -524,82 +538,87 @@ def test_setup_configuration_with_stratlist(mocker, default_conf, caplog) -> Non
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'backtesting',
-        '--config', 'config.json',
-        '--timeframe', '1m',
-        '--export', 'trades',
-        '--strategy-list',
+        "backtesting",
+        "--config",
+        "config.json",
+        "--timeframe",
+        "1m",
+        "--export",
+        "trades",
+        "--strategy-list",
         CURRENT_TEST_STRATEGY,
-        'TestStrategy'
+        "TestStrategy",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args, RunMode.BACKTEST)
     config = configuration.get_config()
-    assert config['runmode'] == RunMode.BACKTEST
-    assert 'max_open_trades' in config
-    assert 'stake_currency' in config
-    assert 'stake_amount' in config
-    assert 'exchange' in config
-    assert 'pair_whitelist' in config['exchange']
-    assert 'datadir' in config
-    assert log_has('Using data directory: {} ...'.format(config['datadir']), caplog)
-    assert 'timeframe' in config
-    assert log_has('Parameter -i/--timeframe detected ... Using timeframe: 1m ...',
-                   caplog)
+    assert config["runmode"] == RunMode.BACKTEST
+    assert "max_open_trades" in config
+    assert "stake_currency" in config
+    assert "stake_amount" in config
+    assert "exchange" in config
+    assert "pair_whitelist" in config["exchange"]
+    assert "datadir" in config
+    assert log_has("Using data directory: {} ...".format(config["datadir"]), caplog)
+    assert "timeframe" in config
+    assert log_has("Parameter -i/--timeframe detected ... Using timeframe: 1m ...", caplog)
 
-    assert 'strategy_list' in config
-    assert log_has('Using strategy list of 2 strategies', caplog)
+    assert "strategy_list" in config
+    assert log_has("Using strategy list of 2 strategies", caplog)
 
-    assert 'position_stacking' not in config
+    assert "position_stacking" not in config
 
-    assert 'use_max_market_positions' not in config
+    assert "use_max_market_positions" not in config
 
-    assert 'timerange' not in config
+    assert "timerange" not in config
 
-    assert 'export' in config
-    assert log_has('Parameter --export detected: {} ...'.format(config['export']), caplog)
+    assert "export" in config
+    assert log_has("Parameter --export detected: {} ...".format(config["export"]), caplog)
 
 
 def test_hyperopt_with_arguments(mocker, default_conf, caplog) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'hyperopt',
-        '--epochs', '10',
-        '--spaces', 'all',
+        "hyperopt",
+        "--epochs",
+        "10",
+        "--spaces",
+        "all",
     ]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args, RunMode.HYPEROPT)
     config = configuration.get_config()
 
-    assert 'epochs' in config
-    assert int(config['epochs']) == 10
-    assert log_has('Parameter --epochs detected ... Will run Hyperopt with for 10 epochs ...',
-                   caplog)
+    assert "epochs" in config
+    assert int(config["epochs"]) == 10
+    assert log_has(
+        "Parameter --epochs detected ... Will run Hyperopt with for 10 epochs ...", caplog
+    )
 
-    assert 'spaces' in config
-    assert config['spaces'] == ['all']
+    assert "spaces" in config
+    assert config["spaces"] == ["all"]
     assert log_has("Parameter -s/--spaces detected: ['all']", caplog)
     assert "runmode" in config
-    assert config['runmode'] == RunMode.HYPEROPT
+    assert config["runmode"] == RunMode.HYPEROPT
 
 
 def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
 
     # Prevent setting loggers
-    mocker.patch('freqtrade.loggers.set_loggers', MagicMock)
-    arglist = ['trade', '-vvv']
+    mocker.patch("freqtrade.loggers.set_loggers", MagicMock)
+    arglist = ["trade", "-vvv"]
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf.get('verbosity') == 3
-    assert log_has('Verbosity set to 3', caplog)
+    assert validated_conf.get("verbosity") == 3
+    assert log_has("Verbosity set to 3", caplog)
 
 
 def test_set_logfile(default_conf, mocker, tmp_path):
@@ -607,13 +626,15 @@ def test_set_logfile(default_conf, mocker, tmp_path):
     f = tmp_path / "test_file.log"
     assert not f.is_file()
     arglist = [
-        'trade', '--logfile', str(f),
+        "trade",
+        "--logfile",
+        str(f),
     ]
     args = Arguments(arglist).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf['logfile'] == str(f)
+    assert validated_conf["logfile"] == str(f)
     assert f.is_file()
     try:
         f.unlink()
@@ -622,15 +643,15 @@ def test_set_logfile(default_conf, mocker, tmp_path):
 
 
 def test_load_config_warn_forcebuy(default_conf, mocker, caplog) -> None:
-    default_conf['force_entry_enable'] = True
+    default_conf["force_entry_enable"] = True
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = Arguments(['trade']).get_parsed_arg()
+    args = Arguments(["trade"]).get_parsed_arg()
     configuration = Configuration(args)
     validated_conf = configuration.load_config()
 
-    assert validated_conf.get('force_entry_enable')
-    assert log_has('`force_entry_enable` RPC message enabled.', caplog)
+    assert validated_conf.get("force_entry_enable")
+    assert log_has("`force_entry_enable` RPC message enabled.", caplog)
 
 
 def test_validate_default_conf(default_conf) -> None:
@@ -638,16 +659,28 @@ def test_validate_default_conf(default_conf) -> None:
     validate_config_schema(default_conf)
 
 
+@pytest.mark.parametrize("fiat", ["EUR", "USD", "", None])
+def test_validate_fiat_currency_options(default_conf, fiat) -> None:
+    # Validate via our validator - we allow setting defaults!
+    if fiat is not None:
+        default_conf["fiat_display_currency"] = fiat
+    else:
+        del default_conf["fiat_display_currency"]
+    validate_config_schema(default_conf)
+
+
 def test_validate_max_open_trades(default_conf):
-    default_conf['max_open_trades'] = float('inf')
-    default_conf['stake_amount'] = 'unlimited'
-    with pytest.raises(OperationalException, match='`max_open_trades` and `stake_amount` '
-                                                   'cannot both be unlimited.'):
+    default_conf["max_open_trades"] = float("inf")
+    default_conf["stake_amount"] = "unlimited"
+    with pytest.raises(
+        OperationalException,
+        match="`max_open_trades` and `stake_amount` cannot both be unlimited.",
+    ):
         validate_config_consistency(default_conf)
 
 
 def test_validate_price_side(default_conf):
-    default_conf['order_types'] = {
+    default_conf["order_types"] = {
         "entry": "limit",
         "exit": "limit",
         "stoploss": "limit",
@@ -657,116 +690,161 @@ def test_validate_price_side(default_conf):
     validate_config_consistency(default_conf)
 
     conf = deepcopy(default_conf)
-    conf['order_types']['entry'] = 'market'
-    with pytest.raises(OperationalException,
-                       match='Market entry orders require entry_pricing.price_side = "other".'):
+    conf["order_types"]["entry"] = "market"
+    with pytest.raises(
+        OperationalException,
+        match='Market entry orders require entry_pricing.price_side = "other".',
+    ):
         validate_config_consistency(conf)
 
     conf = deepcopy(default_conf)
-    conf['order_types']['exit'] = 'market'
-    with pytest.raises(OperationalException,
-                       match='Market exit orders require exit_pricing.price_side = "other".'):
+    conf["order_types"]["exit"] = "market"
+    with pytest.raises(
+        OperationalException, match='Market exit orders require exit_pricing.price_side = "other".'
+    ):
         validate_config_consistency(conf)
 
     # Validate inversed case
     conf = deepcopy(default_conf)
-    conf['order_types']['exit'] = 'market'
-    conf['order_types']['entry'] = 'market'
-    conf['exit_pricing']['price_side'] = 'bid'
-    conf['entry_pricing']['price_side'] = 'ask'
+    conf["order_types"]["exit"] = "market"
+    conf["order_types"]["entry"] = "market"
+    conf["exit_pricing"]["price_side"] = "bid"
+    conf["entry_pricing"]["price_side"] = "ask"
 
     validate_config_consistency(conf)
 
 
 def test_validate_tsl(default_conf):
-    default_conf['stoploss'] = 0.0
-    with pytest.raises(OperationalException, match='The config stoploss needs to be different '
-                                                   'from 0 to avoid problems with sell orders.'):
+    default_conf["stoploss"] = 0.0
+    with pytest.raises(
+        OperationalException,
+        match="The config stoploss needs to be different "
+        "from 0 to avoid problems with sell orders.",
+    ):
         validate_config_consistency(default_conf)
-    default_conf['stoploss'] = -0.10
+    default_conf["stoploss"] = -0.10
 
-    default_conf['trailing_stop'] = True
-    default_conf['trailing_stop_positive'] = 0
-    default_conf['trailing_stop_positive_offset'] = 0
+    default_conf["trailing_stop"] = True
+    default_conf["trailing_stop_positive"] = 0
+    default_conf["trailing_stop_positive_offset"] = 0
 
-    default_conf['trailing_only_offset_is_reached'] = True
-    with pytest.raises(OperationalException,
-                       match=r'The config trailing_only_offset_is_reached needs '
-                       'trailing_stop_positive_offset to be more than 0 in your config.'):
+    default_conf["trailing_only_offset_is_reached"] = True
+    with pytest.raises(
+        OperationalException,
+        match=r"The config trailing_only_offset_is_reached needs "
+        "trailing_stop_positive_offset to be more than 0 in your config.",
+    ):
         validate_config_consistency(default_conf)
 
-    default_conf['trailing_stop_positive_offset'] = 0.01
-    default_conf['trailing_stop_positive'] = 0.015
-    with pytest.raises(OperationalException,
-                       match=r'The config trailing_stop_positive_offset needs '
-                       'to be greater than trailing_stop_positive in your config.'):
+    default_conf["trailing_stop_positive_offset"] = 0.01
+    default_conf["trailing_stop_positive"] = 0.015
+    with pytest.raises(
+        OperationalException,
+        match=r"The config trailing_stop_positive_offset needs "
+        "to be greater than trailing_stop_positive in your config.",
+    ):
         validate_config_consistency(default_conf)
 
-    default_conf['trailing_stop_positive'] = 0.01
-    default_conf['trailing_stop_positive_offset'] = 0.015
+    default_conf["trailing_stop_positive"] = 0.01
+    default_conf["trailing_stop_positive_offset"] = 0.015
     validate_config_consistency(default_conf)
 
     # 0 trailing stop positive - results in "Order would trigger immediately"
-    default_conf['trailing_stop_positive'] = 0
-    default_conf['trailing_stop_positive_offset'] = 0.02
-    default_conf['trailing_only_offset_is_reached'] = False
-    with pytest.raises(OperationalException,
-                       match='The config trailing_stop_positive needs to be different from 0 '
-                       'to avoid problems with sell orders'):
+    default_conf["trailing_stop_positive"] = 0
+    default_conf["trailing_stop_positive_offset"] = 0.02
+    default_conf["trailing_only_offset_is_reached"] = False
+    with pytest.raises(
+        OperationalException,
+        match="The config trailing_stop_positive needs to be different from 0 "
+        "to avoid problems with sell orders",
+    ):
         validate_config_consistency(default_conf)
 
 
 def test_validate_edge2(edge_conf):
-    edge_conf.update({
-        "use_exit_signal": True,
-    })
+    edge_conf.update(
+        {
+            "use_exit_signal": True,
+        }
+    )
     # Passes test
     validate_config_consistency(edge_conf)
 
-    edge_conf.update({
-        "use_exit_signal": False,
-    })
-    with pytest.raises(OperationalException, match="Edge requires `use_exit_signal` to be True, "
-                       "otherwise no sells will happen."):
+    edge_conf.update(
+        {
+            "use_exit_signal": False,
+        }
+    )
+    with pytest.raises(
+        OperationalException,
+        match="Edge requires `use_exit_signal` to be True, otherwise no sells will happen.",
+    ):
         validate_config_consistency(edge_conf)
 
 
 def test_validate_whitelist(default_conf):
-    default_conf['runmode'] = RunMode.DRY_RUN
+    default_conf["runmode"] = RunMode.DRY_RUN
     # Test regular case - has whitelist and uses StaticPairlist
     validate_config_consistency(default_conf)
     conf = deepcopy(default_conf)
-    del conf['exchange']['pair_whitelist']
+    del conf["exchange"]["pair_whitelist"]
     # Test error case
-    with pytest.raises(OperationalException,
-                       match="StaticPairList requires pair_whitelist to be set."):
-
+    with pytest.raises(
+        OperationalException, match="StaticPairList requires pair_whitelist to be set."
+    ):
         validate_config_consistency(conf)
 
     conf = deepcopy(default_conf)
 
-    conf.update({"pairlists": [{
-        "method": "VolumePairList",
-    }]})
+    conf.update(
+        {
+            "pairlists": [
+                {
+                    "method": "VolumePairList",
+                }
+            ]
+        }
+    )
     # Dynamic whitelist should not care about pair_whitelist
     validate_config_consistency(conf)
-    del conf['exchange']['pair_whitelist']
+    del conf["exchange"]["pair_whitelist"]
 
     validate_config_consistency(conf)
 
 
-@pytest.mark.parametrize('protconf,expected', [
-    ([], None),
-    ([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
-    ([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
-    ([{"method": "StoplossGuard", "lookback_period_candles": 20, "lookback_period": 2000,
-       "stop_duration": 10}], r'Protections must specify either `lookback_period`.*'),
-    ([{"method": "StoplossGuard", "lookback_period": 20, "stop_duration": 10,
-       "stop_duration_candles": 10}], r'Protections must specify either `stop_duration`.*'),
-])
+@pytest.mark.parametrize(
+    "protconf,expected",
+    [
+        ([], None),
+        ([{"method": "StoplossGuard", "lookback_period": 2000, "stop_duration_candles": 10}], None),
+        ([{"method": "StoplossGuard", "lookback_period_candles": 20, "stop_duration": 10}], None),
+        (
+            [
+                {
+                    "method": "StoplossGuard",
+                    "lookback_period_candles": 20,
+                    "lookback_period": 2000,
+                    "stop_duration": 10,
+                }
+            ],
+            r"Protections must specify either `lookback_period`.*",
+        ),
+        (
+            [
+                {
+                    "method": "StoplossGuard",
+                    "lookback_period": 20,
+                    "stop_duration": 10,
+                    "stop_duration_candles": 10,
+                }
+            ],
+            r"Protections must specify either `stop_duration`.*",
+        ),
+    ],
+)
 def test_validate_protections(default_conf, protconf, expected):
     conf = deepcopy(default_conf)
-    conf['protections'] = protconf
+    conf["protections"] = protconf
     if expected:
         with pytest.raises(OperationalException, match=expected):
             validate_config_consistency(conf)
@@ -776,144 +854,150 @@ def test_validate_protections(default_conf, protconf, expected):
 
 def test_validate_ask_orderbook(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf['exit_pricing']['use_order_book'] = True
-    conf['exit_pricing']['order_book_min'] = 2
-    conf['exit_pricing']['order_book_max'] = 2
+    conf["exit_pricing"]["use_order_book"] = True
+    conf["exit_pricing"]["order_book_min"] = 2
+    conf["exit_pricing"]["order_book_max"] = 2
 
     validate_config_consistency(conf)
     assert log_has_re(r"DEPRECATED: Please use `order_book_top` instead of.*", caplog)
-    assert conf['exit_pricing']['order_book_top'] == 2
+    assert conf["exit_pricing"]["order_book_top"] == 2
 
-    conf['exit_pricing']['order_book_max'] = 5
+    conf["exit_pricing"]["order_book_max"] = 5
 
-    with pytest.raises(OperationalException,
-                       match=r"Using order_book_max != order_book_min in exit_pricing.*"):
+    with pytest.raises(
+        OperationalException, match=r"Using order_book_max != order_book_min in exit_pricing.*"
+    ):
         validate_config_consistency(conf)
 
 
 def test_validate_time_in_force(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf['order_time_in_force'] = {
-        'buy': 'gtc',
-        'sell': 'GTC',
+    conf["order_time_in_force"] = {
+        "buy": "gtc",
+        "sell": "GTC",
     }
     validate_config_consistency(conf)
     assert log_has_re(r"DEPRECATED: Using 'buy' and 'sell' for time_in_force is.*", caplog)
-    assert conf['order_time_in_force']['entry'] == 'gtc'
-    assert conf['order_time_in_force']['exit'] == 'GTC'
+    assert conf["order_time_in_force"]["entry"] == "gtc"
+    assert conf["order_time_in_force"]["exit"] == "GTC"
 
     conf = deepcopy(default_conf)
-    conf['order_time_in_force'] = {
-        'buy': 'GTC',
-        'sell': 'GTC',
+    conf["order_time_in_force"] = {
+        "buy": "GTC",
+        "sell": "GTC",
     }
-    conf['trading_mode'] = 'futures'
-    with pytest.raises(OperationalException,
-                       match=r"Please migrate your time_in_force settings .* 'entry' and 'exit'\."):
+    conf["trading_mode"] = "futures"
+    with pytest.raises(
+        OperationalException,
+        match=r"Please migrate your time_in_force settings .* 'entry' and 'exit'\.",
+    ):
         validate_config_consistency(conf)
 
 
 def test__validate_order_types(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf['order_types'] = {
-        'buy': 'limit',
-        'sell': 'market',
-        'forcesell': 'market',
-        'forcebuy': 'limit',
-        'stoploss': 'market',
-        'stoploss_on_exchange': False,
+    conf["order_types"] = {
+        "buy": "limit",
+        "sell": "market",
+        "forcesell": "market",
+        "forcebuy": "limit",
+        "stoploss": "market",
+        "stoploss_on_exchange": False,
     }
     validate_config_consistency(conf)
     assert log_has_re(r"DEPRECATED: Using 'buy' and 'sell' for order_types is.*", caplog)
-    assert conf['order_types']['entry'] == 'limit'
-    assert conf['order_types']['exit'] == 'market'
-    assert conf['order_types']['force_entry'] == 'limit'
-    assert 'buy' not in conf['order_types']
-    assert 'sell' not in conf['order_types']
-    assert 'forcebuy' not in conf['order_types']
-    assert 'forcesell' not in conf['order_types']
+    assert conf["order_types"]["entry"] == "limit"
+    assert conf["order_types"]["exit"] == "market"
+    assert conf["order_types"]["force_entry"] == "limit"
+    assert "buy" not in conf["order_types"]
+    assert "sell" not in conf["order_types"]
+    assert "forcebuy" not in conf["order_types"]
+    assert "forcesell" not in conf["order_types"]
 
     conf = deepcopy(default_conf)
-    conf['order_types'] = {
-        'buy': 'limit',
-        'sell': 'market',
-        'forcesell': 'market',
-        'forcebuy': 'limit',
-        'stoploss': 'market',
-        'stoploss_on_exchange': False,
+    conf["order_types"] = {
+        "buy": "limit",
+        "sell": "market",
+        "forcesell": "market",
+        "forcebuy": "limit",
+        "stoploss": "market",
+        "stoploss_on_exchange": False,
     }
-    conf['trading_mode'] = 'futures'
-    with pytest.raises(OperationalException,
-                       match=r"Please migrate your order_types settings to use the new wording\."):
+    conf["trading_mode"] = "futures"
+    with pytest.raises(
+        OperationalException,
+        match=r"Please migrate your order_types settings to use the new wording\.",
+    ):
         validate_config_consistency(conf)
 
 
 def test__validate_unfilledtimeout(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf['unfilledtimeout'] = {
-        'buy': 30,
-        'sell': 35,
+    conf["unfilledtimeout"] = {
+        "buy": 30,
+        "sell": 35,
     }
     validate_config_consistency(conf)
     assert log_has_re(r"DEPRECATED: Using 'buy' and 'sell' for unfilledtimeout is.*", caplog)
-    assert conf['unfilledtimeout']['entry'] == 30
-    assert conf['unfilledtimeout']['exit'] == 35
-    assert 'buy' not in conf['unfilledtimeout']
-    assert 'sell' not in conf['unfilledtimeout']
+    assert conf["unfilledtimeout"]["entry"] == 30
+    assert conf["unfilledtimeout"]["exit"] == 35
+    assert "buy" not in conf["unfilledtimeout"]
+    assert "sell" not in conf["unfilledtimeout"]
 
     conf = deepcopy(default_conf)
-    conf['unfilledtimeout'] = {
-        'buy': 30,
-        'sell': 35,
+    conf["unfilledtimeout"] = {
+        "buy": 30,
+        "sell": 35,
     }
-    conf['trading_mode'] = 'futures'
+    conf["trading_mode"] = "futures"
     with pytest.raises(
-            OperationalException,
-            match=r"Please migrate your unfilledtimeout settings to use the new wording\."):
+        OperationalException,
+        match=r"Please migrate your unfilledtimeout settings to use the new wording\.",
+    ):
         validate_config_consistency(conf)
 
 
 def test__validate_pricing_rules(default_conf, caplog) -> None:
     def_conf = deepcopy(default_conf)
-    del def_conf['entry_pricing']
-    del def_conf['exit_pricing']
+    del def_conf["entry_pricing"]
+    del def_conf["exit_pricing"]
 
-    def_conf['ask_strategy'] = {
-        'price_side': 'ask',
-        'use_order_book': True,
-        'bid_last_balance': 0.5
+    def_conf["ask_strategy"] = {
+        "price_side": "ask",
+        "use_order_book": True,
+        "bid_last_balance": 0.5,
     }
-    def_conf['bid_strategy'] = {
-        'price_side': 'bid',
-        'use_order_book': False,
-        'ask_last_balance': 0.7
+    def_conf["bid_strategy"] = {
+        "price_side": "bid",
+        "use_order_book": False,
+        "ask_last_balance": 0.7,
     }
     conf = deepcopy(def_conf)
 
     validate_config_consistency(conf)
-    assert log_has_re(
-        r"DEPRECATED: Using 'ask_strategy' and 'bid_strategy' is.*", caplog)
-    assert conf['exit_pricing']['price_side'] == 'ask'
-    assert conf['exit_pricing']['use_order_book'] is True
-    assert conf['exit_pricing']['price_last_balance'] == 0.5
-    assert conf['entry_pricing']['price_side'] == 'bid'
-    assert conf['entry_pricing']['use_order_book'] is False
-    assert conf['entry_pricing']['price_last_balance'] == 0.7
-    assert 'ask_strategy' not in conf
-    assert 'bid_strategy' not in conf
+    assert log_has_re(r"DEPRECATED: Using 'ask_strategy' and 'bid_strategy' is.*", caplog)
+    assert conf["exit_pricing"]["price_side"] == "ask"
+    assert conf["exit_pricing"]["use_order_book"] is True
+    assert conf["exit_pricing"]["price_last_balance"] == 0.5
+    assert conf["entry_pricing"]["price_side"] == "bid"
+    assert conf["entry_pricing"]["use_order_book"] is False
+    assert conf["entry_pricing"]["price_last_balance"] == 0.7
+    assert "ask_strategy" not in conf
+    assert "bid_strategy" not in conf
 
     conf = deepcopy(def_conf)
 
-    conf['trading_mode'] = 'futures'
+    conf["trading_mode"] = "futures"
     with pytest.raises(
-            OperationalException,
-            match=r"Please migrate your pricing settings to use the new wording\."):
+        OperationalException, match=r"Please migrate your pricing settings to use the new wording\."
+    ):
         validate_config_consistency(conf)
 
 
 def test__validate_freqai_include_timeframes(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf.update({
+    conf.update(
+        {
             "freqai": {
                 "enabled": True,
                 "feature_parameters": {
@@ -921,74 +1005,81 @@ def test__validate_freqai_include_timeframes(default_conf, caplog) -> None:
                     "include_corr_pairlist": [],
                 },
                 "data_split_parameters": {},
-                "model_training_parameters": {}
+                "model_training_parameters": {},
             }
-    })
+        }
+    )
     with pytest.raises(OperationalException, match=r"Main timeframe of .*"):
         validate_config_consistency(conf)
     # Validation pass
-    conf.update({'timeframe': '1m'})
+    conf.update({"timeframe": "1m"})
     validate_config_consistency(conf)
 
     # Ensure base timeframe is in include_timeframes
-    conf['freqai']['feature_parameters']['include_timeframes'] = ["5m", "15m"]
+    conf["freqai"]["feature_parameters"]["include_timeframes"] = ["5m", "15m"]
     validate_config_consistency(conf)
-    assert conf['freqai']['feature_parameters']['include_timeframes'] == ["1m", "5m", "15m"]
+    assert conf["freqai"]["feature_parameters"]["include_timeframes"] == ["1m", "5m", "15m"]
 
-    conf.update({'analyze_per_epoch': True})
-    with pytest.raises(OperationalException,
-                       match=r"Using analyze-per-epoch .* not supported with a FreqAI strategy."):
+    conf.update({"analyze_per_epoch": True})
+    with pytest.raises(
+        OperationalException,
+        match=r"Using analyze-per-epoch .* not supported with a FreqAI strategy.",
+    ):
         validate_config_consistency(conf)
 
 
 def test__validate_consumers(default_conf, caplog) -> None:
     conf = deepcopy(default_conf)
-    conf.update({
+    conf.update({"external_message_consumer": {"enabled": True, "producers": []}})
+    with pytest.raises(
+        OperationalException, match="You must specify at least 1 Producer to connect to."
+    ):
+        validate_config_consistency(conf)
+
+    conf = deepcopy(default_conf)
+    conf.update(
+        {
             "external_message_consumer": {
                 "enabled": True,
-                "producers": []
-                }
-            })
-    with pytest.raises(OperationalException,
-                       match="You must specify at least 1 Producer to connect to."):
+                "producers": [
+                    {
+                        "name": "default",
+                        "host": "127.0.0.1",
+                        "port": 8081,
+                        "ws_token": "secret_ws_t0ken.",
+                    },
+                    {
+                        "name": "default",
+                        "host": "127.0.0.1",
+                        "port": 8080,
+                        "ws_token": "secret_ws_t0ken.",
+                    },
+                ],
+            }
+        }
+    )
+    with pytest.raises(
+        OperationalException, match="Producer names must be unique. Duplicate: default"
+    ):
         validate_config_consistency(conf)
 
     conf = deepcopy(default_conf)
-    conf.update({
-        "external_message_consumer": {
-            "enabled": True,
-            "producers": [
-                {
-                    "name": "default",
-                    "host": "127.0.0.1",
-                    "port": 8081,
-                    "ws_token": "secret_ws_t0ken."
-                }, {
-                    "name": "default",
-                    "host": "127.0.0.1",
-                    "port": 8080,
-                    "ws_token": "secret_ws_t0ken."
-                }
-            ]}
-        })
-    with pytest.raises(OperationalException,
-                       match="Producer names must be unique. Duplicate: default"):
-        validate_config_consistency(conf)
-
-    conf = deepcopy(default_conf)
-    conf.update({
-        "process_only_new_candles": True,
-        "external_message_consumer": {
-            "enabled": True,
-            "producers": [
-                {
-                    "name": "default",
-                    "host": "127.0.0.1",
-                    "port": 8081,
-                    "ws_token": "secret_ws_t0ken."
-                }
-            ]}
-        })
+    conf.update(
+        {
+            "process_only_new_candles": True,
+            "external_message_consumer": {
+                "enabled": True,
+                "producers": [
+                    {
+                        "name": "default",
+                        "host": "127.0.0.1",
+                        "port": 8081,
+                        "ws_token": "secret_ws_t0ken.",
+                    }
+                ],
+            },
+        }
+    )
     validate_config_consistency(conf)
     assert log_has_re("To receive best performance with external data.*", caplog)
 
@@ -1008,12 +1099,11 @@ def test_load_config_default_exchange(all_conf) -> None:
     config['exchange'] subtree has required options in it
     so it cannot be omitted in the config
     """
-    del all_conf['exchange']
+    del all_conf["exchange"]
 
-    assert 'exchange' not in all_conf
+    assert "exchange" not in all_conf
 
-    with pytest.raises(ValidationError,
-                       match=r"'exchange' is a required property"):
+    with pytest.raises(ValidationError, match=r"'exchange' is a required property"):
         validate_config_schema(all_conf)
 
 
@@ -1022,27 +1112,29 @@ def test_load_config_default_exchange_name(all_conf) -> None:
     config['exchange']['name'] option is required
     so it cannot be omitted in the config
     """
-    del all_conf['exchange']['name']
+    del all_conf["exchange"]["name"]
 
-    assert 'name' not in all_conf['exchange']
+    assert "name" not in all_conf["exchange"]
 
-    with pytest.raises(ValidationError,
-                       match=r"'name' is a required property"):
+    with pytest.raises(ValidationError, match=r"'name' is a required property"):
         validate_config_schema(all_conf)
 
 
 def test_load_config_stoploss_exchange_limit_ratio(all_conf) -> None:
-    all_conf['order_types']['stoploss_on_exchange_limit_ratio'] = 1.15
+    all_conf["order_types"]["stoploss_on_exchange_limit_ratio"] = 1.15
 
-    with pytest.raises(ValidationError,
-                       match=r"1.15 is greater than the maximum"):
+    with pytest.raises(ValidationError, match=r"1.15 is greater than the maximum"):
         validate_config_schema(all_conf)
 
 
-@pytest.mark.parametrize("keys", [("exchange", "key", ""),
-                                  ("exchange", "secret", ""),
-                                  ("exchange", "password", ""),
-                                  ])
+@pytest.mark.parametrize(
+    "keys",
+    [
+        ("exchange", "key", ""),
+        ("exchange", "secret", ""),
+        ("exchange", "password", ""),
+    ],
+)
 def test_load_config_default_subkeys(all_conf, keys) -> None:
     """
     Test for parameters with default values in sub-paths
@@ -1064,27 +1156,24 @@ def test_load_config_default_subkeys(all_conf, keys) -> None:
 
 
 def test_pairlist_resolving():
-    arglist = [
-        'download-data',
-        '--pairs', 'ETH/BTC', 'XRP/BTC',
-        '--exchange', 'binance'
-    ]
+    arglist = ["download-data", "--pairs", "ETH/BTC", "XRP/BTC", "--exchange", "binance"]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args, RunMode.OTHER)
     config = configuration.get_config()
 
-    assert config['pairs'] == ['ETH/BTC', 'XRP/BTC']
-    assert config['exchange']['pair_whitelist'] == ['ETH/BTC', 'XRP/BTC']
-    assert config['exchange']['name'] == 'binance'
+    assert config["pairs"] == ["ETH/BTC", "XRP/BTC"]
+    assert config["exchange"]["pair_whitelist"] == ["ETH/BTC", "XRP/BTC"]
+    assert config["exchange"]["name"] == "binance"
 
 
 def test_pairlist_resolving_with_config(mocker, default_conf):
     patched_configuration_load_config_file(mocker, default_conf)
     arglist = [
-        'download-data',
-        '--config', 'config.json',
+        "download-data",
+        "--config",
+        "config.json",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
@@ -1092,14 +1181,17 @@ def test_pairlist_resolving_with_config(mocker, default_conf):
     configuration = Configuration(args)
     config = configuration.get_config()
 
-    assert config['pairs'] == default_conf['exchange']['pair_whitelist']
-    assert config['exchange']['name'] == default_conf['exchange']['name']
+    assert config["pairs"] == default_conf["exchange"]["pair_whitelist"]
+    assert config["exchange"]["name"] == default_conf["exchange"]["name"]
 
     # Override pairs
     arglist = [
-        'download-data',
-        '--config', 'config.json',
-        '--pairs', 'ETH/BTC', 'XRP/BTC',
+        "download-data",
+        "--config",
+        "config.json",
+        "--pairs",
+        "ETH/BTC",
+        "XRP/BTC",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
@@ -1107,36 +1199,40 @@ def test_pairlist_resolving_with_config(mocker, default_conf):
     configuration = Configuration(args)
     config = configuration.get_config()
 
-    assert config['pairs'] == ['ETH/BTC', 'XRP/BTC']
-    assert config['exchange']['name'] == default_conf['exchange']['name']
+    assert config["pairs"] == ["ETH/BTC", "XRP/BTC"]
+    assert config["exchange"]["name"] == default_conf["exchange"]["name"]
 
 
 def test_pairlist_resolving_with_config_pl(mocker, default_conf):
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'download-data',
-        '--config', 'config.json',
-        '--pairs-file', 'tests/testdata/pairs.json',
+        "download-data",
+        "--config",
+        "config.json",
+        "--pairs-file",
+        "tests/testdata/pairs.json",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     config = configuration.get_config()
-    assert len(config['pairs']) == 23
-    assert 'ETH/BTC' in config['pairs']
-    assert 'XRP/BTC' in config['pairs']
-    assert config['exchange']['name'] == default_conf['exchange']['name']
+    assert len(config["pairs"]) == 23
+    assert "ETH/BTC" in config["pairs"]
+    assert "XRP/BTC" in config["pairs"]
+    assert config["exchange"]["name"] == default_conf["exchange"]["name"]
 
 
 def test_pairlist_resolving_with_config_pl_not_exists(mocker, default_conf):
     patched_configuration_load_config_file(mocker, default_conf)
 
     arglist = [
-        'download-data',
-        '--config', 'config.json',
-        '--pairs-file', 'tests/testdata/pairs_doesnotexist.json',
+        "download-data",
+        "--config",
+        "config.json",
+        "--pairs-file",
+        "tests/testdata/pairs_doesnotexist.json",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
@@ -1149,31 +1245,38 @@ def test_pairlist_resolving_with_config_pl_not_exists(mocker, default_conf):
 def test_pairlist_resolving_fallback(mocker, tmp_path):
     mocker.patch.object(Path, "exists", MagicMock(return_value=True))
     mocker.patch.object(Path, "open", MagicMock(return_value=MagicMock()))
-    mocker.patch("freqtrade.configuration.configuration.load_file",
-                 MagicMock(return_value=['XRP/BTC', 'ETH/BTC']))
-    arglist = [
-        'download-data',
-        '--exchange', 'binance'
-    ]
+    mocker.patch(
+        "freqtrade.configuration.configuration.load_file",
+        MagicMock(return_value=["XRP/BTC", "ETH/BTC"]),
+    )
+    arglist = ["download-data", "--exchange", "binance"]
 
     args = Arguments(arglist).get_parsed_arg()
     # Fix flaky tests if config.json exists
-    args['config'] = None
+    args["config"] = None
 
     configuration = Configuration(args, RunMode.OTHER)
     config = configuration.get_config()
 
-    assert config['pairs'] == ['ETH/BTC', 'XRP/BTC']
-    assert config['exchange']['name'] == 'binance'
-    assert config['datadir'] == tmp_path / "user_data/data/binance"
+    assert config["pairs"] == ["ETH/BTC", "XRP/BTC"]
+    assert config["exchange"]["name"] == "binance"
+    assert config["datadir"] == tmp_path / "user_data/data/binance"
 
 
-@pytest.mark.parametrize("setting", [
-    ("webhook", "webhookbuy", 'testWEbhook',
-     "webhook", "webhookentry", 'testWEbhook'),
-    ("ask_strategy", "ignore_buying_expired_candle_after", 5,
-     None, "ignore_buying_expired_candle_after", 6),
-])
+@pytest.mark.parametrize(
+    "setting",
+    [
+        ("webhook", "webhookbuy", "testWEbhook", "webhook", "webhookentry", "testWEbhook"),
+        (
+            "ask_strategy",
+            "ignore_buying_expired_candle_after",
+            5,
+            None,
+            "ignore_buying_expired_candle_after",
+            6,
+        ),
+    ],
+)
 def test_process_temporary_deprecated_settings(mocker, default_conf, setting, caplog):
     patched_configuration_load_config_file(mocker, default_conf)
 
@@ -1191,7 +1294,7 @@ def test_process_temporary_deprecated_settings(mocker, default_conf, setting, ca
         default_conf[setting[4]] = setting[5]
 
     # New and deprecated settings are conflicting ones
-    with pytest.raises(OperationalException, match=r'DEPRECATED'):
+    with pytest.raises(OperationalException, match=r"DEPRECATED"):
         process_temporary_deprecated_settings(default_conf)
 
     caplog.clear()
@@ -1203,7 +1306,7 @@ def test_process_temporary_deprecated_settings(mocker, default_conf, setting, ca
         del default_conf[setting[4]]
 
     process_temporary_deprecated_settings(default_conf)
-    assert log_has_re('DEPRECATED', caplog)
+    assert log_has_re("DEPRECATED", caplog)
     # The value of the new setting shall have been set to the
     # value of the deprecated one
     if setting[3]:
@@ -1212,11 +1315,14 @@ def test_process_temporary_deprecated_settings(mocker, default_conf, setting, ca
         assert default_conf[setting[4]] == setting[2]
 
 
-@pytest.mark.parametrize("setting", [
-    ("experimental", "use_sell_signal", False),
-    ("experimental", "sell_profit_only", True),
-    ("experimental", "ignore_roi_if_buy_signal", True),
-])
+@pytest.mark.parametrize(
+    "setting",
+    [
+        ("experimental", "use_sell_signal", False),
+        ("experimental", "sell_profit_only", True),
+        ("experimental", "ignore_roi_if_buy_signal", True),
+    ],
+)
 def test_process_removed_settings(mocker, default_conf, setting):
     patched_configuration_load_config_file(mocker, default_conf)
 
@@ -1227,20 +1333,24 @@ def test_process_removed_settings(mocker, default_conf, setting):
     default_conf[setting[0]][setting[1]] = setting[2]
 
     # New and deprecated settings are conflicting ones
-    with pytest.raises(OperationalException,
-                       match=r'Setting .* has been moved'):
+    with pytest.raises(OperationalException, match=r"Setting .* has been moved"):
         process_temporary_deprecated_settings(default_conf)
 
 
 def test_process_deprecated_setting_edge(mocker, edge_conf):
     patched_configuration_load_config_file(mocker, edge_conf)
-    edge_conf.update({'edge': {
-        'enabled': True,
-        'capital_available_percentage': 0.5,
-    }})
+    edge_conf.update(
+        {
+            "edge": {
+                "enabled": True,
+                "capital_available_percentage": 0.5,
+            }
+        }
+    )
 
-    with pytest.raises(OperationalException,
-                       match=r"DEPRECATED.*Using 'edge.capital_available_percentage'*"):
+    with pytest.raises(
+        OperationalException, match=r"DEPRECATED.*Using 'edge.capital_available_percentage'*"
+    ):
         process_temporary_deprecated_settings(edge_conf)
 
 
@@ -1249,40 +1359,40 @@ def test_check_conflicting_settings(mocker, default_conf, caplog):
 
     # Create sections for new and deprecated settings
     # (they may not exist in the config)
-    default_conf['sectionA'] = {}
-    default_conf['sectionB'] = {}
+    default_conf["sectionA"] = {}
+    default_conf["sectionB"] = {}
     # Assign new setting
-    default_conf['sectionA']['new_setting'] = 'valA'
+    default_conf["sectionA"]["new_setting"] = "valA"
     # Assign deprecated setting
-    default_conf['sectionB']['deprecated_setting'] = 'valB'
+    default_conf["sectionB"]["deprecated_setting"] = "valB"
 
     # New and deprecated settings are conflicting ones
-    with pytest.raises(OperationalException, match=r'DEPRECATED'):
-        check_conflicting_settings(default_conf,
-                                   'sectionB', 'deprecated_setting',
-                                   'sectionA', 'new_setting')
+    with pytest.raises(OperationalException, match=r"DEPRECATED"):
+        check_conflicting_settings(
+            default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+        )
 
     caplog.clear()
 
     # Delete new setting (deprecated exists)
-    del default_conf['sectionA']['new_setting']
-    check_conflicting_settings(default_conf,
-                               'sectionB', 'deprecated_setting',
-                               'sectionA', 'new_setting')
-    assert not log_has_re('DEPRECATED', caplog)
-    assert 'new_setting' not in default_conf['sectionA']
+    del default_conf["sectionA"]["new_setting"]
+    check_conflicting_settings(
+        default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+    )
+    assert not log_has_re("DEPRECATED", caplog)
+    assert "new_setting" not in default_conf["sectionA"]
 
     caplog.clear()
 
     # Assign new setting
-    default_conf['sectionA']['new_setting'] = 'valA'
+    default_conf["sectionA"]["new_setting"] = "valA"
     # Delete deprecated setting
-    del default_conf['sectionB']['deprecated_setting']
-    check_conflicting_settings(default_conf,
-                               'sectionB', 'deprecated_setting',
-                               'sectionA', 'new_setting')
-    assert not log_has_re('DEPRECATED', caplog)
-    assert default_conf['sectionA']['new_setting'] == 'valA'
+    del default_conf["sectionB"]["deprecated_setting"]
+    check_conflicting_settings(
+        default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+    )
+    assert not log_has_re("DEPRECATED", caplog)
+    assert default_conf["sectionA"]["new_setting"] == "valA"
 
 
 def test_process_deprecated_setting(mocker, default_conf, caplog):
@@ -1290,56 +1400,54 @@ def test_process_deprecated_setting(mocker, default_conf, caplog):
 
     # Create sections for new and deprecated settings
     # (they may not exist in the config)
-    default_conf['sectionA'] = {}
-    default_conf['sectionB'] = {}
+    default_conf["sectionA"] = {}
+    default_conf["sectionB"] = {}
     # Assign deprecated setting
-    default_conf['sectionB']['deprecated_setting'] = 'valB'
+    default_conf["sectionB"]["deprecated_setting"] = "valB"
 
     # Both new and deprecated settings exists
-    process_deprecated_setting(default_conf,
-                               'sectionB', 'deprecated_setting',
-                               'sectionA', 'new_setting')
-    assert log_has_re('DEPRECATED', caplog)
+    process_deprecated_setting(
+        default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+    )
+    assert log_has_re("DEPRECATED", caplog)
     # The value of the new setting shall have been set to the
     # value of the deprecated one
-    assert default_conf['sectionA']['new_setting'] == 'valB'
+    assert default_conf["sectionA"]["new_setting"] == "valB"
     # Old setting is removed
-    assert 'deprecated_setting' not in default_conf['sectionB']
+    assert "deprecated_setting" not in default_conf["sectionB"]
 
     caplog.clear()
 
     # Delete new setting (deprecated exists)
-    del default_conf['sectionA']['new_setting']
-    default_conf['sectionB']['deprecated_setting'] = 'valB'
-    process_deprecated_setting(default_conf,
-                               'sectionB', 'deprecated_setting',
-                               'sectionA', 'new_setting')
-    assert log_has_re('DEPRECATED', caplog)
+    del default_conf["sectionA"]["new_setting"]
+    default_conf["sectionB"]["deprecated_setting"] = "valB"
+    process_deprecated_setting(
+        default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+    )
+    assert log_has_re("DEPRECATED", caplog)
     # The value of the new setting shall have been set to the
     # value of the deprecated one
-    assert default_conf['sectionA']['new_setting'] == 'valB'
+    assert default_conf["sectionA"]["new_setting"] == "valB"
 
     caplog.clear()
 
     # Assign new setting
-    default_conf['sectionA']['new_setting'] = 'valA'
+    default_conf["sectionA"]["new_setting"] = "valA"
     # Delete deprecated setting
-    default_conf['sectionB'].pop('deprecated_setting', None)
-    process_deprecated_setting(default_conf,
-                               'sectionB', 'deprecated_setting',
-                               'sectionA', 'new_setting')
-    assert not log_has_re('DEPRECATED', caplog)
-    assert default_conf['sectionA']['new_setting'] == 'valA'
+    default_conf["sectionB"].pop("deprecated_setting", None)
+    process_deprecated_setting(
+        default_conf, "sectionB", "deprecated_setting", "sectionA", "new_setting"
+    )
+    assert not log_has_re("DEPRECATED", caplog)
+    assert default_conf["sectionA"]["new_setting"] == "valA"
 
     caplog.clear()
     # Test moving to root
-    default_conf['sectionB']['deprecated_setting2'] = "DeadBeef"
-    process_deprecated_setting(default_conf,
-                               'sectionB', 'deprecated_setting2',
-                               None, 'new_setting')
+    default_conf["sectionB"]["deprecated_setting2"] = "DeadBeef"
+    process_deprecated_setting(default_conf, "sectionB", "deprecated_setting2", None, "new_setting")
 
-    assert log_has_re('DEPRECATED', caplog)
-    assert default_conf['new_setting']
+    assert log_has_re("DEPRECATED", caplog)
+    assert default_conf["new_setting"]
 
 
 def test_process_removed_setting(mocker, default_conf, caplog):
@@ -1347,23 +1455,18 @@ def test_process_removed_setting(mocker, default_conf, caplog):
 
     # Create sections for new and deprecated settings
     # (they may not exist in the config)
-    default_conf['sectionA'] = {}
-    default_conf['sectionB'] = {}
+    default_conf["sectionA"] = {}
+    default_conf["sectionB"] = {}
     # Assign new setting
-    default_conf['sectionB']['somesetting'] = 'valA'
+    default_conf["sectionB"]["somesetting"] = "valA"
 
     # Only new setting exists (nothing should happen)
-    process_removed_setting(default_conf,
-                            'sectionA', 'somesetting',
-                            'sectionB', 'somesetting')
+    process_removed_setting(default_conf, "sectionA", "somesetting", "sectionB", "somesetting")
     # Assign removed setting
-    default_conf['sectionA']['somesetting'] = 'valB'
+    default_conf["sectionA"]["somesetting"] = "valB"
 
-    with pytest.raises(OperationalException,
-                       match=r"Setting .* has been moved"):
-        process_removed_setting(default_conf,
-                                'sectionA', 'somesetting',
-                                'sectionB', 'somesetting')
+    with pytest.raises(OperationalException, match=r"Setting .* has been moved"):
+        process_removed_setting(default_conf, "sectionA", "somesetting", "sectionB", "somesetting")
 
 
 def test_process_deprecated_ticker_interval(default_conf, caplog):
@@ -1373,10 +1476,11 @@ def test_process_deprecated_ticker_interval(default_conf, caplog):
     process_temporary_deprecated_settings(config)
     assert not log_has(message, caplog)
 
-    del config['timeframe']
-    config['ticker_interval'] = '15m'
-    with pytest.raises(OperationalException,
-                       match=r"DEPRECATED: 'ticker_interval' detected. Please use.*"):
+    del config["timeframe"]
+    config["ticker_interval"] = "15m"
+    with pytest.raises(
+        OperationalException, match=r"DEPRECATED: 'ticker_interval' detected. Please use.*"
+    ):
         process_temporary_deprecated_settings(config)
 
 
@@ -1386,39 +1490,36 @@ def test_process_deprecated_protections(default_conf, caplog):
     process_temporary_deprecated_settings(config)
     assert not log_has(message, caplog)
 
-    config['protections'] = []
+    config["protections"] = []
     process_temporary_deprecated_settings(config)
     assert log_has(message, caplog)
 
 
 def test_flat_vars_to_nested_dict(caplog):
-
     test_args = {
-        'FREQTRADE__EXCHANGE__SOME_SETTING': 'true',
-        'FREQTRADE__EXCHANGE__SOME_FALSE_SETTING': 'false',
-        'FREQTRADE__EXCHANGE__CONFIG__whatever': 'sometime',
-        'FREQTRADE__EXIT_PRICING__PRICE_SIDE': 'bid',
-        'FREQTRADE__EXIT_PRICING__cccc': '500',
-        'FREQTRADE__STAKE_AMOUNT': '200.05',
-        'FREQTRADE__TELEGRAM__CHAT_ID': '2151',
-        'NOT_RELEVANT': '200.0',  # Will be ignored
+        "FREQTRADE__EXCHANGE__SOME_SETTING": "true",
+        "FREQTRADE__EXCHANGE__SOME_FALSE_SETTING": "false",
+        "FREQTRADE__EXCHANGE__CONFIG__whatever": "sometime",
+        "FREQTRADE__EXIT_PRICING__PRICE_SIDE": "bid",
+        "FREQTRADE__EXIT_PRICING__cccc": "500",
+        "FREQTRADE__STAKE_AMOUNT": "200.05",
+        "FREQTRADE__TELEGRAM__CHAT_ID": "2151",
+        "NOT_RELEVANT": "200.0",  # Will be ignored
     }
     expected = {
-        'stake_amount': 200.05,
-        'exit_pricing': {
-            'price_side': 'bid',
-            'cccc': 500,
+        "stake_amount": 200.05,
+        "exit_pricing": {
+            "price_side": "bid",
+            "cccc": 500,
         },
-        'exchange': {
-            'config': {
-                'whatever': 'sometime',
+        "exchange": {
+            "config": {
+                "whatever": "sometime",
             },
-            'some_setting': True,
-            'some_false_setting': False,
+            "some_setting": True,
+            "some_false_setting": False,
         },
-        'telegram': {
-            'chat_id': '2151'
-        }
+        "telegram": {"chat_id": "2151"},
     }
     res = _flat_vars_to_nested_dict(test_args, ENV_VAR_PREFIX)
     assert res == expected
@@ -1429,31 +1530,29 @@ def test_flat_vars_to_nested_dict(caplog):
 
 def test_setup_hyperopt_freqai(mocker, default_conf) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
+    mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x)
     mocker.patch(
-        'freqtrade.configuration.configuration.create_datadir',
-        lambda c, x: x
-    )
-    mocker.patch(
-        'freqtrade.configuration.configuration.create_userdata_dir',
-        lambda x, *args, **kwargs: Path(x)
+        "freqtrade.configuration.configuration.create_userdata_dir",
+        lambda x, *args, **kwargs: Path(x),
     )
     arglist = [
-        'hyperopt',
-        '--config', 'config.json',
-        '--strategy', CURRENT_TEST_STRATEGY,
-        '--timerange', '20220801-20220805',
+        "hyperopt",
+        "--config",
+        "config.json",
+        "--strategy",
+        CURRENT_TEST_STRATEGY,
+        "--timerange",
+        "20220801-20220805",
         "--freqaimodel",
         "LightGBMRegressorMultiTarget",
-        "--analyze-per-epoch"
+        "--analyze-per-epoch",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     config = configuration.get_config()
-    config['freqai'] = {
-        "enabled": True
-    }
+    config["freqai"] = {"enabled": True}
     with pytest.raises(
         OperationalException, match=r".*analyze-per-epoch parameter is not supported.*"
     ):
@@ -1462,29 +1561,29 @@ def test_setup_hyperopt_freqai(mocker, default_conf) -> None:
 
 def test_setup_freqai_backtesting(mocker, default_conf) -> None:
     patched_configuration_load_config_file(mocker, default_conf)
+    mocker.patch("freqtrade.configuration.configuration.create_datadir", lambda c, x: x)
     mocker.patch(
-        'freqtrade.configuration.configuration.create_datadir',
-        lambda c, x: x
-    )
-    mocker.patch(
-        'freqtrade.configuration.configuration.create_userdata_dir',
-        lambda x, *args, **kwargs: Path(x)
+        "freqtrade.configuration.configuration.create_userdata_dir",
+        lambda x, *args, **kwargs: Path(x),
     )
     arglist = [
-        'backtesting',
-        '--config', 'config.json',
-        '--strategy', CURRENT_TEST_STRATEGY,
-        '--timerange', '20220801-20220805',
+        "backtesting",
+        "--config",
+        "config.json",
+        "--strategy",
+        CURRENT_TEST_STRATEGY,
+        "--timerange",
+        "20220801-20220805",
         "--freqaimodel",
         "LightGBMRegressorMultiTarget",
-        "--freqai-backtest-live-models"
+        "--freqai-backtest-live-models",
     ]
 
     args = Arguments(arglist).get_parsed_arg()
 
     configuration = Configuration(args)
     config = configuration.get_config()
-    config['runmode'] = RunMode.BACKTEST
+    config["runmode"] = RunMode.BACKTEST
 
     with pytest.raises(
         OperationalException, match=r".*--freqai-backtest-live-models parameter is only.*"
@@ -1492,16 +1591,14 @@ def test_setup_freqai_backtesting(mocker, default_conf) -> None:
         validate_config_consistency(config)
 
     conf = deepcopy(config)
-    conf['freqai'] = {
-        "enabled": True
-    }
+    conf["freqai"] = {"enabled": True}
     with pytest.raises(
         OperationalException, match=r".* timerange parameter is not supported with .*"
     ):
         validate_config_consistency(conf)
 
-    conf['timerange'] = None
-    conf['freqai_backtest_live_models'] = False
+    conf["timerange"] = None
+    conf["freqai_backtest_live_models"] = False
 
     with pytest.raises(
         OperationalException, match=r".* pass --timerange if you intend to use FreqAI .*"
@@ -1510,14 +1607,14 @@ def test_setup_freqai_backtesting(mocker, default_conf) -> None:
 
 
 def test_sanitize_config(default_conf_usdt):
-    assert default_conf_usdt['exchange']['key'] != 'REDACTED'
+    assert default_conf_usdt["exchange"]["key"] != "REDACTED"
     res = sanitize_config(default_conf_usdt)
     # Didn't modify original dict
-    assert default_conf_usdt['exchange']['key'] != 'REDACTED'
+    assert default_conf_usdt["exchange"]["key"] != "REDACTED"
 
-    assert res['exchange']['key'] == 'REDACTED'
-    assert res['exchange']['secret'] == 'REDACTED'
+    assert res["exchange"]["key"] == "REDACTED"
+    assert res["exchange"]["secret"] == "REDACTED"
 
     res = sanitize_config(default_conf_usdt, show_sensitive=True)
-    assert res['exchange']['key'] == default_conf_usdt['exchange']['key']
-    assert res['exchange']['secret'] == default_conf_usdt['exchange']['secret']
+    assert res["exchange"]["key"] == default_conf_usdt["exchange"]["key"]
+    assert res["exchange"]["secret"] == default_conf_usdt["exchange"]["secret"]
diff --git a/tests/test_directory_operations.py b/tests/test_directory_operations.py
index 8bd07f18a..45297fba8 100644
--- a/tests/test_directory_operations.py
+++ b/tests/test_directory_operations.py
@@ -5,101 +5,112 @@ from unittest.mock import MagicMock
 
 import pytest
 
-from freqtrade.configuration.directory_operations import (chown_user_directory, copy_sample_files,
-                                                          create_datadir, create_userdata_dir)
+from freqtrade.configuration.directory_operations import (
+    chown_user_directory,
+    copy_sample_files,
+    create_datadir,
+    create_userdata_dir,
+)
 from freqtrade.exceptions import OperationalException
 from tests.conftest import log_has, log_has_re
 
 
 def test_create_datadir(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=False))
-    md = mocker.patch.object(Path, 'mkdir', MagicMock())
+    md = mocker.patch.object(Path, "mkdir", MagicMock())
 
-    create_datadir(default_conf, '/foo/bar')
-    assert md.call_args[1]['parents'] is True
-    assert log_has('Created data directory: /foo/bar', caplog)
+    create_datadir(default_conf, "/foo/bar")
+    assert md.call_args[1]["parents"] is True
+    assert log_has("Created data directory: /foo/bar", caplog)
 
 
 def test_create_userdata_dir(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=False))
-    md = mocker.patch.object(Path, 'mkdir', MagicMock())
+    md = mocker.patch.object(Path, "mkdir", MagicMock())
 
-    x = create_userdata_dir('/tmp/bar', create_dir=True)
+    x = create_userdata_dir("/tmp/bar", create_dir=True)
     assert md.call_count == 10
-    assert md.call_args[1]['parents'] is False
+    assert md.call_args[1]["parents"] is False
     assert log_has(f'Created user-data directory: {Path("/tmp/bar")}', caplog)
     assert isinstance(x, Path)
     assert str(x) == str(Path("/tmp/bar"))
 
 
 def test_create_userdata_dir_and_chown(mocker, tmp_path, caplog) -> None:
-    sp_mock = mocker.patch('subprocess.check_output')
-    path = tmp_path / 'bar'
+    sp_mock = mocker.patch("subprocess.check_output")
+    path = tmp_path / "bar"
     assert not path.is_dir()
 
     x = create_userdata_dir(str(path), create_dir=True)
     assert sp_mock.call_count == 0
-    assert log_has(f'Created user-data directory: {path}', caplog)
+    assert log_has(f"Created user-data directory: {path}", caplog)
     assert isinstance(x, Path)
     assert path.is_dir()
-    assert (path / 'data').is_dir()
+    assert (path / "data").is_dir()
 
-    os.environ['FT_APP_ENV'] = 'docker'
-    chown_user_directory(path / 'data')
+    os.environ["FT_APP_ENV"] = "docker"
+    chown_user_directory(path / "data")
     assert sp_mock.call_count == 1
-    del os.environ['FT_APP_ENV']
+    del os.environ["FT_APP_ENV"]
 
 
 def test_create_userdata_dir_exists(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=True))
-    md = mocker.patch.object(Path, 'mkdir', MagicMock())
+    md = mocker.patch.object(Path, "mkdir", MagicMock())
 
-    create_userdata_dir('/tmp/bar')
+    create_userdata_dir("/tmp/bar")
     assert md.call_count == 0
 
 
 def test_create_userdata_dir_exists_exception(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=False))
-    md = mocker.patch.object(Path, 'mkdir', MagicMock())
+    md = mocker.patch.object(Path, "mkdir", MagicMock())
 
-    with pytest.raises(OperationalException,
-                       match=r'Directory `.{1,2}tmp.{1,2}bar` does not exist.*'):
-        create_userdata_dir('/tmp/bar',  create_dir=False)
+    with pytest.raises(
+        OperationalException, match=r"Directory `.{1,2}tmp.{1,2}bar` does not exist.*"
+    ):
+        create_userdata_dir("/tmp/bar", create_dir=False)
     assert md.call_count == 0
 
 
 def test_copy_sample_files(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=True))
     mocker.patch.object(Path, "exists", MagicMock(return_value=False))
-    copymock = mocker.patch('shutil.copy', MagicMock())
+    copymock = mocker.patch("shutil.copy", MagicMock())
 
-    copy_sample_files(Path('/tmp/bar'))
+    copy_sample_files(Path("/tmp/bar"))
     assert copymock.call_count == 3
     assert copymock.call_args_list[0][0][1] == str(
-        Path('/tmp/bar') / 'strategies/sample_strategy.py')
+        Path("/tmp/bar") / "strategies/sample_strategy.py"
+    )
     assert copymock.call_args_list[1][0][1] == str(
-        Path('/tmp/bar') / 'hyperopts/sample_hyperopt_loss.py')
+        Path("/tmp/bar") / "hyperopts/sample_hyperopt_loss.py"
+    )
     assert copymock.call_args_list[2][0][1] == str(
-        Path('/tmp/bar') / 'notebooks/strategy_analysis_example.ipynb')
+        Path("/tmp/bar") / "notebooks/strategy_analysis_example.ipynb"
+    )
 
 
 def test_copy_sample_files_errors(mocker, default_conf, caplog) -> None:
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=False))
     mocker.patch.object(Path, "exists", MagicMock(return_value=False))
-    mocker.patch('shutil.copy', MagicMock())
-    with pytest.raises(OperationalException,
-                       match=r"Directory `.{1,2}tmp.{1,2}bar` does not exist\."):
-        copy_sample_files(Path('/tmp/bar'))
+    mocker.patch("shutil.copy", MagicMock())
+    with pytest.raises(
+        OperationalException, match=r"Directory `.{1,2}tmp.{1,2}bar` does not exist\."
+    ):
+        copy_sample_files(Path("/tmp/bar"))
 
     mocker.patch.object(Path, "is_dir", MagicMock(side_effect=[True, False]))
 
-    with pytest.raises(OperationalException,
-                       match=r"Directory `.{1,2}tmp.{1,2}bar.{1,2}strategies` does not exist\."):
-        copy_sample_files(Path('/tmp/bar'))
+    with pytest.raises(
+        OperationalException,
+        match=r"Directory `.{1,2}tmp.{1,2}bar.{1,2}strategies` does not exist\.",
+    ):
+        copy_sample_files(Path("/tmp/bar"))
     mocker.patch.object(Path, "is_dir", MagicMock(return_value=True))
     mocker.patch.object(Path, "exists", MagicMock(return_value=True))
-    copy_sample_files(Path('/tmp/bar'))
+    copy_sample_files(Path("/tmp/bar"))
     assert log_has_re(r"File `.*` exists already, not deploying sample file\.", caplog)
     caplog.clear()
-    copy_sample_files(Path('/tmp/bar'), overwrite=True)
+    copy_sample_files(Path("/tmp/bar"), overwrite=True)
     assert log_has_re(r"File `.*` exists already, overwriting\.", caplog)
diff --git a/tests/test_log_setup.py b/tests/test_log_setup.py
index 4310b8f09..142134b34 100644
--- a/tests/test_log_setup.py
+++ b/tests/test_log_setup.py
@@ -4,50 +4,57 @@ import sys
 import pytest
 
 from freqtrade.exceptions import OperationalException
-from freqtrade.loggers import (FTBufferingHandler, FTStdErrStreamHandler, set_loggers,
-                               setup_logging, setup_logging_pre)
-from freqtrade.loggers.set_log_levels import (reduce_verbosity_for_bias_tester,
-                                              restore_verbosity_for_bias_tester)
+from freqtrade.loggers import (
+    FTBufferingHandler,
+    FTStdErrStreamHandler,
+    set_loggers,
+    setup_logging,
+    setup_logging_pre,
+)
+from freqtrade.loggers.set_log_levels import (
+    reduce_verbosity_for_bias_tester,
+    restore_verbosity_for_bias_tester,
+)
 
 
 def test_set_loggers() -> None:
     # Reset Logging to Debug, otherwise this fails randomly as it's set globally
-    logging.getLogger('requests').setLevel(logging.DEBUG)
+    logging.getLogger("requests").setLevel(logging.DEBUG)
     logging.getLogger("urllib3").setLevel(logging.DEBUG)
-    logging.getLogger('ccxt.base.exchange').setLevel(logging.DEBUG)
-    logging.getLogger('telegram').setLevel(logging.DEBUG)
+    logging.getLogger("ccxt.base.exchange").setLevel(logging.DEBUG)
+    logging.getLogger("telegram").setLevel(logging.DEBUG)
 
-    previous_value1 = logging.getLogger('requests').level
-    previous_value2 = logging.getLogger('ccxt.base.exchange').level
-    previous_value3 = logging.getLogger('telegram').level
+    previous_value1 = logging.getLogger("requests").level
+    previous_value2 = logging.getLogger("ccxt.base.exchange").level
+    previous_value3 = logging.getLogger("telegram").level
 
     set_loggers()
 
-    value1 = logging.getLogger('requests').level
+    value1 = logging.getLogger("requests").level
     assert previous_value1 is not value1
     assert value1 is logging.INFO
 
-    value2 = logging.getLogger('ccxt.base.exchange').level
+    value2 = logging.getLogger("ccxt.base.exchange").level
     assert previous_value2 is not value2
     assert value2 is logging.INFO
 
-    value3 = logging.getLogger('telegram').level
+    value3 = logging.getLogger("telegram").level
     assert previous_value3 is not value3
     assert value3 is logging.INFO
 
     set_loggers(verbosity=2)
 
-    assert logging.getLogger('requests').level is logging.DEBUG
-    assert logging.getLogger('ccxt.base.exchange').level is logging.INFO
-    assert logging.getLogger('telegram').level is logging.INFO
-    assert logging.getLogger('werkzeug').level is logging.INFO
+    assert logging.getLogger("requests").level is logging.DEBUG
+    assert logging.getLogger("ccxt.base.exchange").level is logging.INFO
+    assert logging.getLogger("telegram").level is logging.INFO
+    assert logging.getLogger("werkzeug").level is logging.INFO
 
-    set_loggers(verbosity=3, api_verbosity='error')
+    set_loggers(verbosity=3, api_verbosity="error")
 
-    assert logging.getLogger('requests').level is logging.DEBUG
-    assert logging.getLogger('ccxt.base.exchange').level is logging.DEBUG
-    assert logging.getLogger('telegram').level is logging.INFO
-    assert logging.getLogger('werkzeug').level is logging.ERROR
+    assert logging.getLogger("requests").level is logging.DEBUG
+    assert logging.getLogger("ccxt.base.exchange").level is logging.DEBUG
+    assert logging.getLogger("telegram").level is logging.INFO
+    assert logging.getLogger("werkzeug").level is logging.ERROR
 
 
 @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
@@ -56,9 +63,10 @@ def test_set_loggers_syslog():
     orig_handlers = logger.handlers
     logger.handlers = []
 
-    config = {'verbosity': 2,
-              'logfile': 'syslog:/dev/log',
-              }
+    config = {
+        "verbosity": 2,
+        "logfile": "syslog:/dev/log",
+    }
 
     setup_logging_pre()
     setup_logging(config)
@@ -78,10 +86,11 @@ def test_set_loggers_Filehandler(tmp_path):
     logger = logging.getLogger()
     orig_handlers = logger.handlers
     logger.handlers = []
-    logfile = tmp_path / 'ft_logfile.log'
-    config = {'verbosity': 2,
-              'logfile': str(logfile),
-              }
+    logfile = tmp_path / "ft_logfile.log"
+    config = {
+        "verbosity": 2,
+        "logfile": str(logfile),
+    }
 
     setup_logging_pre()
     setup_logging(config)
@@ -104,9 +113,10 @@ def test_set_loggers_journald(mocker):
     orig_handlers = logger.handlers
     logger.handlers = []
 
-    config = {'verbosity': 2,
-              'logfile': 'journald',
-              }
+    config = {
+        "verbosity": 2,
+        "logfile": "journald",
+    }
 
     setup_logging_pre()
     setup_logging(config)
@@ -122,11 +132,11 @@ def test_set_loggers_journald_importerror(import_fails):
     orig_handlers = logger.handlers
     logger.handlers = []
 
-    config = {'verbosity': 2,
-              'logfile': 'journald',
-              }
-    with pytest.raises(OperationalException,
-                       match=r'You need the cysystemd python package.*'):
+    config = {
+        "verbosity": 2,
+        "logfile": "journald",
+    }
+    with pytest.raises(OperationalException, match=r"You need the cysystemd python package.*"):
         setup_logging(config)
     logger.handlers = orig_handlers
 
@@ -134,16 +144,16 @@ def test_set_loggers_journald_importerror(import_fails):
 def test_reduce_verbosity():
     setup_logging_pre()
     reduce_verbosity_for_bias_tester()
-    prior_level = logging.getLogger('freqtrade').getEffectiveLevel()
+    prior_level = logging.getLogger("freqtrade").getEffectiveLevel()
 
-    assert logging.getLogger('freqtrade.resolvers').getEffectiveLevel() == logging.WARNING
-    assert logging.getLogger('freqtrade.strategy.hyper').getEffectiveLevel() == logging.WARNING
+    assert logging.getLogger("freqtrade.resolvers").getEffectiveLevel() == logging.WARNING
+    assert logging.getLogger("freqtrade.strategy.hyper").getEffectiveLevel() == logging.WARNING
     # base level wasn't changed
-    assert logging.getLogger('freqtrade').getEffectiveLevel() == prior_level
+    assert logging.getLogger("freqtrade").getEffectiveLevel() == prior_level
 
     restore_verbosity_for_bias_tester()
 
-    assert logging.getLogger('freqtrade.resolvers').getEffectiveLevel() == prior_level
-    assert logging.getLogger('freqtrade.strategy.hyper').getEffectiveLevel() == prior_level
-    assert logging.getLogger('freqtrade').getEffectiveLevel() == prior_level
+    assert logging.getLogger("freqtrade.resolvers").getEffectiveLevel() == prior_level
+    assert logging.getLogger("freqtrade.strategy.hyper").getEffectiveLevel() == prior_level
+    assert logging.getLogger("freqtrade").getEffectiveLevel() == prior_level
     # base level wasn't changed
diff --git a/tests/test_main.py b/tests/test_main.py
index 4b28c094f..b230d4e99 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -12,8 +12,12 @@ from freqtrade.exceptions import ConfigurationError, FreqtradeException, Operati
 from freqtrade.freqtradebot import FreqtradeBot
 from freqtrade.main import main
 from freqtrade.worker import Worker
-from tests.conftest import (log_has, log_has_re, patch_exchange,
-                            patched_configuration_load_config_file)
+from tests.conftest import (
+    log_has,
+    log_has_re,
+    patch_exchange,
+    patched_configuration_load_config_file,
+)
 
 
 def test_parse_args_None(caplog) -> None:
@@ -28,161 +32,162 @@ def test_parse_args_backtesting(mocker) -> None:
     further argument parsing is done in test_arguments.py
     """
     mocker.patch.object(Path, "is_file", MagicMock(side_effect=[False, True]))
-    backtesting_mock = mocker.patch('freqtrade.commands.start_backtesting')
+    backtesting_mock = mocker.patch("freqtrade.commands.start_backtesting")
     backtesting_mock.__name__ = PropertyMock("start_backtesting")
     # it's sys.exit(0) at the end of backtesting
     with pytest.raises(SystemExit):
-        main(['backtesting'])
+        main(["backtesting"])
     assert backtesting_mock.call_count == 1
     call_args = backtesting_mock.call_args[0][0]
-    assert call_args['config'] == ['config.json']
-    assert call_args['verbosity'] == 0
-    assert call_args['command'] == 'backtesting'
-    assert call_args['func'] is not None
-    assert callable(call_args['func'])
-    assert call_args['timeframe'] is None
+    assert call_args["config"] == ["config.json"]
+    assert call_args["verbosity"] == 0
+    assert call_args["command"] == "backtesting"
+    assert call_args["func"] is not None
+    assert callable(call_args["func"])
+    assert call_args["timeframe"] is None
 
 
 def test_main_start_hyperopt(mocker) -> None:
-    mocker.patch.object(Path, 'is_file', MagicMock(side_effect=[False, True]))
-    hyperopt_mock = mocker.patch('freqtrade.commands.start_hyperopt', MagicMock())
-    hyperopt_mock.__name__ = PropertyMock('start_hyperopt')
+    mocker.patch.object(Path, "is_file", MagicMock(side_effect=[False, True]))
+    hyperopt_mock = mocker.patch("freqtrade.commands.start_hyperopt", MagicMock())
+    hyperopt_mock.__name__ = PropertyMock("start_hyperopt")
     # it's sys.exit(0) at the end of hyperopt
     with pytest.raises(SystemExit):
-        main(['hyperopt'])
+        main(["hyperopt"])
     assert hyperopt_mock.call_count == 1
     call_args = hyperopt_mock.call_args[0][0]
-    assert call_args['config'] == ['config.json']
-    assert call_args['verbosity'] == 0
-    assert call_args['command'] == 'hyperopt'
-    assert call_args['func'] is not None
-    assert callable(call_args['func'])
+    assert call_args["config"] == ["config.json"]
+    assert call_args["verbosity"] == 0
+    assert call_args["command"] == "hyperopt"
+    assert call_args["func"] is not None
+    assert callable(call_args["func"])
 
 
 def test_main_fatal_exception(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
-    mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cleanup', MagicMock())
-    mocker.patch('freqtrade.worker.Worker._worker', MagicMock(side_effect=Exception))
+    mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cleanup", MagicMock())
+    mocker.patch("freqtrade.worker.Worker._worker", MagicMock(side_effect=Exception))
     patched_configuration_load_config_file(mocker, default_conf)
-    mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.init_db', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.init_db", MagicMock())
 
-    args = ['trade', '-c', 'tests/testdata/testconfigs/main_test_config.json']
+    args = ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"]
 
     # Test Main + the KeyboardInterrupt exception
     with pytest.raises(SystemExit):
         main(args)
-    assert log_has('Using config: tests/testdata/testconfigs/main_test_config.json ...', caplog)
-    assert log_has('Fatal exception!', caplog)
+    assert log_has("Using config: tests/testdata/testconfigs/main_test_config.json ...", caplog)
+    assert log_has("Fatal exception!", caplog)
 
 
 def test_main_keyboard_interrupt(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
-    mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cleanup', MagicMock())
-    mocker.patch('freqtrade.worker.Worker._worker', MagicMock(side_effect=KeyboardInterrupt))
+    mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cleanup", MagicMock())
+    mocker.patch("freqtrade.worker.Worker._worker", MagicMock(side_effect=KeyboardInterrupt))
     patched_configuration_load_config_file(mocker, default_conf)
-    mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
-    mocker.patch('freqtrade.wallets.Wallets.update', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.init_db', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock())
+    mocker.patch("freqtrade.wallets.Wallets.update", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.init_db", MagicMock())
 
-    args = ['trade', '-c', 'tests/testdata/testconfigs/main_test_config.json']
+    args = ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"]
 
     # Test Main + the KeyboardInterrupt exception
     with pytest.raises(SystemExit):
         main(args)
-    assert log_has('Using config: tests/testdata/testconfigs/main_test_config.json ...', caplog)
-    assert log_has('SIGINT received, aborting ...', caplog)
+    assert log_has("Using config: tests/testdata/testconfigs/main_test_config.json ...", caplog)
+    assert log_has("SIGINT received, aborting ...", caplog)
 
 
 def test_main_operational_exception(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
-    mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cleanup', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cleanup", MagicMock())
     mocker.patch(
-        'freqtrade.worker.Worker._worker',
-        MagicMock(side_effect=FreqtradeException('Oh snap!'))
+        "freqtrade.worker.Worker._worker", MagicMock(side_effect=FreqtradeException("Oh snap!"))
     )
     patched_configuration_load_config_file(mocker, default_conf)
-    mocker.patch('freqtrade.wallets.Wallets.update', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.init_db', MagicMock())
+    mocker.patch("freqtrade.wallets.Wallets.update", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.init_db", MagicMock())
 
-    args = ['trade', '-c', 'tests/testdata/testconfigs/main_test_config.json']
+    args = ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"]
 
     # Test Main + the KeyboardInterrupt exception
     with pytest.raises(SystemExit):
         main(args)
-    assert log_has('Using config: tests/testdata/testconfigs/main_test_config.json ...', caplog)
-    assert log_has('Oh snap!', caplog)
+    assert log_has("Using config: tests/testdata/testconfigs/main_test_config.json ...", caplog)
+    assert log_has("Oh snap!", caplog)
 
 
 def test_main_operational_exception1(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
     mocker.patch(
-        'freqtrade.commands.list_commands.list_available_exchanges',
-        MagicMock(side_effect=ValueError('Oh snap!'))
+        "freqtrade.commands.list_commands.list_available_exchanges",
+        MagicMock(side_effect=ValueError("Oh snap!")),
     )
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = ['list-exchanges']
+    args = ["list-exchanges"]
 
     # Test Main + the KeyboardInterrupt exception
     with pytest.raises(SystemExit):
         main(args)
 
-    assert log_has('Fatal exception!', caplog)
-    assert not log_has_re(r'SIGINT.*', caplog)
+    assert log_has("Fatal exception!", caplog)
+    assert not log_has_re(r"SIGINT.*", caplog)
     mocker.patch(
-        'freqtrade.commands.list_commands.list_available_exchanges',
-        MagicMock(side_effect=KeyboardInterrupt)
+        "freqtrade.commands.list_commands.list_available_exchanges",
+        MagicMock(side_effect=KeyboardInterrupt),
     )
     with pytest.raises(SystemExit):
         main(args)
 
-    assert log_has_re(r'SIGINT.*', caplog)
+    assert log_has_re(r"SIGINT.*", caplog)
 
 
 def test_main_ConfigurationError(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
     mocker.patch(
-        'freqtrade.commands.list_commands.list_available_exchanges',
-        MagicMock(side_effect=ConfigurationError('Oh snap!'))
+        "freqtrade.commands.list_commands.list_available_exchanges",
+        MagicMock(side_effect=ConfigurationError("Oh snap!")),
     )
     patched_configuration_load_config_file(mocker, default_conf)
 
-    args = ['list-exchanges']
+    args = ["list-exchanges"]
 
     # Test Main + the KeyboardInterrupt exception
     with pytest.raises(SystemExit):
         main(args)
-    assert log_has_re('Configuration error: Oh snap!', caplog)
+    assert log_has_re("Configuration error: Oh snap!", caplog)
 
 
 def test_main_reload_config(mocker, default_conf, caplog) -> None:
     patch_exchange(mocker)
-    mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cleanup', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cleanup", MagicMock())
     # Simulate Running, reload, running workflow
-    worker_mock = MagicMock(side_effect=[State.RUNNING,
-                                         State.RELOAD_CONFIG,
-                                         State.RUNNING,
-                                         OperationalException("Oh snap!")])
-    mocker.patch('freqtrade.worker.Worker._worker', worker_mock)
+    worker_mock = MagicMock(
+        side_effect=[
+            State.RUNNING,
+            State.RELOAD_CONFIG,
+            State.RUNNING,
+            OperationalException("Oh snap!"),
+        ]
+    )
+    mocker.patch("freqtrade.worker.Worker._worker", worker_mock)
     patched_configuration_load_config_file(mocker, default_conf)
-    mocker.patch('freqtrade.wallets.Wallets.update', MagicMock())
-    reconfigure_mock = mocker.patch('freqtrade.worker.Worker._reconfigure', MagicMock())
+    mocker.patch("freqtrade.wallets.Wallets.update", MagicMock())
+    reconfigure_mock = mocker.patch("freqtrade.worker.Worker._reconfigure", MagicMock())
 
-    mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.init_db', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.init_db", MagicMock())
 
-    args = Arguments([
-        'trade',
-        '-c',
-        'tests/testdata/testconfigs/main_test_config.json'
-    ]).get_parsed_arg()
+    args = Arguments(
+        ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"]
+    ).get_parsed_arg()
     worker = Worker(args=args, config=default_conf)
     with pytest.raises(SystemExit):
-        main(['trade', '-c', 'tests/testdata/testconfigs/main_test_config.json'])
+        main(["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"])
 
-    assert log_has('Using config: tests/testdata/testconfigs/main_test_config.json ...', caplog)
+    assert log_has("Using config: tests/testdata/testconfigs/main_test_config.json ...", caplog)
     assert worker_mock.call_count == 4
     assert reconfigure_mock.call_count == 1
     assert isinstance(worker.freqtrade, FreqtradeBot)
@@ -190,27 +195,24 @@ def test_main_reload_config(mocker, default_conf, caplog) -> None:
 
 def test_reconfigure(mocker, default_conf) -> None:
     patch_exchange(mocker)
-    mocker.patch('freqtrade.freqtradebot.FreqtradeBot.cleanup', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.FreqtradeBot.cleanup", MagicMock())
     mocker.patch(
-        'freqtrade.worker.Worker._worker',
-        MagicMock(side_effect=OperationalException('Oh snap!'))
+        "freqtrade.worker.Worker._worker", MagicMock(side_effect=OperationalException("Oh snap!"))
     )
-    mocker.patch('freqtrade.wallets.Wallets.update', MagicMock())
+    mocker.patch("freqtrade.wallets.Wallets.update", MagicMock())
     patched_configuration_load_config_file(mocker, default_conf)
-    mocker.patch('freqtrade.freqtradebot.RPCManager', MagicMock())
-    mocker.patch('freqtrade.freqtradebot.init_db', MagicMock())
+    mocker.patch("freqtrade.freqtradebot.RPCManager", MagicMock())
+    mocker.patch("freqtrade.freqtradebot.init_db", MagicMock())
 
-    args = Arguments([
-        'trade',
-        '-c',
-        'tests/testdata/testconfigs/main_test_config.json'
-    ]).get_parsed_arg()
+    args = Arguments(
+        ["trade", "-c", "tests/testdata/testconfigs/main_test_config.json"]
+    ).get_parsed_arg()
     worker = Worker(args=args, config=default_conf)
     freqtrade = worker.freqtrade
 
     # Renew mock to return modified data
     conf = deepcopy(default_conf)
-    conf['stake_amount'] += 1
+    conf["stake_amount"] += 1
     patched_configuration_load_config_file(mocker, conf)
 
     worker._config = conf
@@ -220,4 +222,4 @@ def test_reconfigure(mocker, default_conf) -> None:
 
     # Verify we have a new instance with the new config
     assert freqtrade is not freqtrade2
-    assert freqtrade.config['stake_amount'] + 1 == freqtrade2.config['stake_amount']
+    assert freqtrade.config["stake_amount"] + 1 == freqtrade2.config["stake_amount"]
diff --git a/tests/test_misc.py b/tests/test_misc.py
index c9a196259..f213d6759 100644
--- a/tests/test_misc.py
+++ b/tests/test_misc.py
@@ -7,40 +7,47 @@ from unittest.mock import MagicMock
 import pandas as pd
 import pytest
 
-from freqtrade.misc import (dataframe_to_json, deep_merge_dicts, file_dump_json, file_load_json,
-                            is_file_in_dir, json_to_dataframe, pair_to_filename,
-                            parse_db_uri_for_logging, plural, safe_value_fallback,
-                            safe_value_fallback2)
+from freqtrade.misc import (
+    dataframe_to_json,
+    deep_merge_dicts,
+    file_dump_json,
+    file_load_json,
+    is_file_in_dir,
+    json_to_dataframe,
+    pair_to_filename,
+    parse_db_uri_for_logging,
+    plural,
+    safe_value_fallback,
+    safe_value_fallback2,
+)
 
 
 def test_file_dump_json(mocker) -> None:
-    file_open = mocker.patch('freqtrade.misc.Path.open', MagicMock())
-    json_dump = mocker.patch('rapidjson.dump', MagicMock())
-    file_dump_json(Path('somefile'), [1, 2, 3])
+    file_open = mocker.patch("freqtrade.misc.Path.open", MagicMock())
+    json_dump = mocker.patch("rapidjson.dump", MagicMock())
+    file_dump_json(Path("somefile"), [1, 2, 3])
     assert file_open.call_count == 1
     assert json_dump.call_count == 1
-    file_open = mocker.patch('freqtrade.misc.gzip.open', MagicMock())
-    json_dump = mocker.patch('rapidjson.dump', MagicMock())
-    file_dump_json(Path('somefile'), [1, 2, 3], True)
+    file_open = mocker.patch("freqtrade.misc.gzip.open", MagicMock())
+    json_dump = mocker.patch("rapidjson.dump", MagicMock())
+    file_dump_json(Path("somefile"), [1, 2, 3], True)
     assert file_open.call_count == 1
     assert json_dump.call_count == 1
 
 
 def test_file_load_json(mocker, testdatadir) -> None:
-
     # 7m .json does not exist
-    ret = file_load_json(testdatadir / 'UNITTEST_BTC-7m.json')
+    ret = file_load_json(testdatadir / "UNITTEST_BTC-7m.json")
     assert not ret
     # 1m json exists (but no .gz exists)
-    ret = file_load_json(testdatadir / 'UNITTEST_BTC-1m.json')
+    ret = file_load_json(testdatadir / "UNITTEST_BTC-1m.json")
     assert ret
     # 8 .json is empty and will fail if it's loaded. .json.gz is a copy of 1.json
-    ret = file_load_json(testdatadir / 'UNITTEST_BTC-8m.json')
+    ret = file_load_json(testdatadir / "UNITTEST_BTC-8m.json")
     assert ret
 
 
 def test_is_file_in_dir(tmp_path):
-
     # Create a temporary directory and file
     dir_path = tmp_path / "subdir"
     dir_path.mkdir()
@@ -57,69 +64,72 @@ def test_is_file_in_dir(tmp_path):
     assert is_file_in_dir(file_path2, tmp_path) is False
 
 
-@pytest.mark.parametrize("pair,expected_result", [
-    ("ETH/BTC", 'ETH_BTC'),
-    ("ETH/USDT", 'ETH_USDT'),
-    ("ETH/USDT:USDT", 'ETH_USDT_USDT'),  # swap with USDT as settlement currency
-    ("ETH/USD:USD", 'ETH_USD_USD'),  # swap with USD as settlement currency
-    ("AAVE/USD:USD", 'AAVE_USD_USD'),  # swap with USDT as settlement currency
-    ("ETH/USDT:USDT-210625", 'ETH_USDT_USDT-210625'),  # expiring futures
-    ("Fabric Token/ETH", 'Fabric_Token_ETH'),
-    ("ETHH20", 'ETHH20'),
-    (".XBTBON2H", '_XBTBON2H'),
-    ("ETHUSD.d", 'ETHUSD_d'),
-    ("ADA-0327", 'ADA-0327'),
-    ("BTC-USD-200110", 'BTC-USD-200110'),
-    ("BTC-PERP:USDT", 'BTC-PERP_USDT'),
-    ("F-AKRO/USDT", 'F-AKRO_USDT'),
-    ("LC+/ETH", 'LC__ETH'),
-    ("CMT@18/ETH", 'CMT_18_ETH'),
-    ("LBTC:1022/SAI", 'LBTC_1022_SAI'),
-    ("$PAC/BTC", '_PAC_BTC'),
-    ("ACC_OLD/BTC", 'ACC_OLD_BTC'),
-])
+@pytest.mark.parametrize(
+    "pair,expected_result",
+    [
+        ("ETH/BTC", "ETH_BTC"),
+        ("ETH/USDT", "ETH_USDT"),
+        ("ETH/USDT:USDT", "ETH_USDT_USDT"),  # swap with USDT as settlement currency
+        ("ETH/USD:USD", "ETH_USD_USD"),  # swap with USD as settlement currency
+        ("AAVE/USD:USD", "AAVE_USD_USD"),  # swap with USDT as settlement currency
+        ("ETH/USDT:USDT-210625", "ETH_USDT_USDT-210625"),  # expiring futures
+        ("Fabric Token/ETH", "Fabric_Token_ETH"),
+        ("ETHH20", "ETHH20"),
+        (".XBTBON2H", "_XBTBON2H"),
+        ("ETHUSD.d", "ETHUSD_d"),
+        ("ADA-0327", "ADA-0327"),
+        ("BTC-USD-200110", "BTC-USD-200110"),
+        ("BTC-PERP:USDT", "BTC-PERP_USDT"),
+        ("F-AKRO/USDT", "F-AKRO_USDT"),
+        ("LC+/ETH", "LC__ETH"),
+        ("CMT@18/ETH", "CMT_18_ETH"),
+        ("LBTC:1022/SAI", "LBTC_1022_SAI"),
+        ("$PAC/BTC", "_PAC_BTC"),
+        ("ACC_OLD/BTC", "ACC_OLD_BTC"),
+    ],
+)
 def test_pair_to_filename(pair, expected_result):
     pair_s = pair_to_filename(pair)
     assert pair_s == expected_result
 
 
 def test_safe_value_fallback():
-    dict1 = {'keya': None, 'keyb': 2, 'keyc': 5, 'keyd': None}
-    assert safe_value_fallback(dict1, 'keya', 'keyb') == 2
-    assert safe_value_fallback(dict1, 'keyb', 'keya') == 2
+    dict1 = {"keya": None, "keyb": 2, "keyc": 5, "keyd": None}
+    assert safe_value_fallback(dict1, "keya", "keyb") == 2
+    assert safe_value_fallback(dict1, "keyb", "keya") == 2
 
-    assert safe_value_fallback(dict1, 'keyb', 'keyc') == 2
-    assert safe_value_fallback(dict1, 'keya', 'keyc') == 5
+    assert safe_value_fallback(dict1, "keyb", "keyc") == 2
+    assert safe_value_fallback(dict1, "keya", "keyc") == 5
 
-    assert safe_value_fallback(dict1, 'keyc', 'keyb') == 5
+    assert safe_value_fallback(dict1, "keyc", "keyb") == 5
 
-    assert safe_value_fallback(dict1, 'keya', 'keyd') is None
+    assert safe_value_fallback(dict1, "keya", "keyd") is None
 
-    assert safe_value_fallback(dict1, 'keyNo', 'keyNo') is None
-    assert safe_value_fallback(dict1, 'keyNo', 'keyNo', 55) == 55
-    assert safe_value_fallback(dict1, 'keyNo', default_value=55) == 55
-    assert safe_value_fallback(dict1, 'keyNo', None, default_value=55) == 55
+    assert safe_value_fallback(dict1, "keyNo", "keyNo") is None
+    assert safe_value_fallback(dict1, "keyNo", "keyNo", 55) == 55
+    assert safe_value_fallback(dict1, "keyNo", default_value=55) == 55
+    assert safe_value_fallback(dict1, "keyNo", None, default_value=55) == 55
 
 
 def test_safe_value_fallback2():
-    dict1 = {'keya': None, 'keyb': 2, 'keyc': 5, 'keyd': None}
-    dict2 = {'keya': 20, 'keyb': None, 'keyc': 6, 'keyd': None}
-    assert safe_value_fallback2(dict1, dict2, 'keya', 'keya') == 20
-    assert safe_value_fallback2(dict2, dict1, 'keya', 'keya') == 20
+    dict1 = {"keya": None, "keyb": 2, "keyc": 5, "keyd": None}
+    dict2 = {"keya": 20, "keyb": None, "keyc": 6, "keyd": None}
+    assert safe_value_fallback2(dict1, dict2, "keya", "keya") == 20
+    assert safe_value_fallback2(dict2, dict1, "keya", "keya") == 20
 
-    assert safe_value_fallback2(dict1, dict2, 'keyb', 'keyb') == 2
-    assert safe_value_fallback2(dict2, dict1, 'keyb', 'keyb') == 2
+    assert safe_value_fallback2(dict1, dict2, "keyb", "keyb") == 2
+    assert safe_value_fallback2(dict2, dict1, "keyb", "keyb") == 2
 
-    assert safe_value_fallback2(dict1, dict2, 'keyc', 'keyc') == 5
-    assert safe_value_fallback2(dict2, dict1, 'keyc', 'keyc') == 6
+    assert safe_value_fallback2(dict1, dict2, "keyc", "keyc") == 5
+    assert safe_value_fallback2(dict2, dict1, "keyc", "keyc") == 6
 
-    assert safe_value_fallback2(dict1, dict2, 'keyd', 'keyd') is None
-    assert safe_value_fallback2(dict2, dict1, 'keyd', 'keyd') is None
-    assert safe_value_fallback2(dict2, dict1, 'keyd', 'keyd', 1234) == 1234
+    assert safe_value_fallback2(dict1, dict2, "keyd", "keyd") is None
+    assert safe_value_fallback2(dict2, dict1, "keyd", "keyd") is None
+    assert safe_value_fallback2(dict2, dict1, "keyd", "keyd", 1234) == 1234
 
-    assert safe_value_fallback2(dict1, dict2, 'keyNo', 'keyNo') is None
-    assert safe_value_fallback2(dict2, dict1, 'keyNo', 'keyNo') is None
-    assert safe_value_fallback2(dict2, dict1, 'keyNo', 'keyNo', 1234) == 1234
+    assert safe_value_fallback2(dict1, dict2, "keyNo", "keyNo") is None
+    assert safe_value_fallback2(dict2, dict1, "keyNo", "keyNo") is None
+    assert safe_value_fallback2(dict2, dict1, "keyNo", "keyNo", 1234) == 1234
 
 
 def test_plural() -> None:
@@ -154,38 +164,51 @@ def test_plural() -> None:
     assert plural(-1.5, "ox", "oxen") == "oxen"
 
 
-@pytest.mark.parametrize('conn_url,expected', [
-    ("postgresql+psycopg2://scott123:scott123@host:1245/dbname",
-     "postgresql+psycopg2://scott123:*****@host:1245/dbname"),
-    ("postgresql+psycopg2://scott123:scott123@host.name.com/dbname",
-     "postgresql+psycopg2://scott123:*****@host.name.com/dbname"),
-    ("mariadb+mariadbconnector://app_user:Password123!@127.0.0.1:3306/company",
-     "mariadb+mariadbconnector://app_user:*****@127.0.0.1:3306/company"),
-    ("mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4",
-     "mysql+pymysql://user:*****@some_mariadb/dbname?charset=utf8mb4"),
-    ("sqlite:////freqtrade/user_data/tradesv3.sqlite",
-     "sqlite:////freqtrade/user_data/tradesv3.sqlite"),
-])
+@pytest.mark.parametrize(
+    "conn_url,expected",
+    [
+        (
+            "postgresql+psycopg2://scott123:scott123@host:1245/dbname",
+            "postgresql+psycopg2://scott123:*****@host:1245/dbname",
+        ),
+        (
+            "postgresql+psycopg2://scott123:scott123@host.name.com/dbname",
+            "postgresql+psycopg2://scott123:*****@host.name.com/dbname",
+        ),
+        (
+            "mariadb+mariadbconnector://app_user:Password123!@127.0.0.1:3306/company",
+            "mariadb+mariadbconnector://app_user:*****@127.0.0.1:3306/company",
+        ),
+        (
+            "mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4",
+            "mysql+pymysql://user:*****@some_mariadb/dbname?charset=utf8mb4",
+        ),
+        (
+            "sqlite:////freqtrade/user_data/tradesv3.sqlite",
+            "sqlite:////freqtrade/user_data/tradesv3.sqlite",
+        ),
+    ],
+)
 def test_parse_db_uri_for_logging(conn_url, expected) -> None:
-
     assert parse_db_uri_for_logging(conn_url) == expected
 
 
 def test_deep_merge_dicts():
-    a = {'first': {'rows': {'pass': 'dog', 'number': '1', 'test': None}}}
-    b = {'first': {'rows': {'fail': 'cat', 'number': '5', 'test': 'asdf'}}}
-    res = {'first': {'rows': {'pass': 'dog', 'fail': 'cat', 'number': '5', 'test': 'asdf'}}}
-    res2 = {'first': {'rows': {'pass': 'dog', 'fail': 'cat', 'number': '1', 'test': None}}}
+    a = {"first": {"rows": {"pass": "dog", "number": "1", "test": None}}}
+    b = {"first": {"rows": {"fail": "cat", "number": "5", "test": "asdf"}}}
+    res = {"first": {"rows": {"pass": "dog", "fail": "cat", "number": "5", "test": "asdf"}}}
+    res2 = {"first": {"rows": {"pass": "dog", "fail": "cat", "number": "1", "test": None}}}
     assert deep_merge_dicts(b, deepcopy(a)) == res
 
     assert deep_merge_dicts(a, deepcopy(b)) == res2
 
-    res2['first']['rows']['test'] = 'asdf'
+    res2["first"]["rows"]["test"] = "asdf"
     assert deep_merge_dicts(a, deepcopy(b), allow_null_overrides=False) == res2
 
 
 def test_dataframe_json(ohlcv_history):
     from pandas.testing import assert_frame_equal
+
     json = dataframe_to_json(ohlcv_history)
     dataframe = json_to_dataframe(json)
 
@@ -193,7 +216,7 @@ def test_dataframe_json(ohlcv_history):
     assert len(ohlcv_history) == len(dataframe)
 
     assert_frame_equal(ohlcv_history, dataframe)
-    ohlcv_history.at[1, 'date'] = pd.NaT
+    ohlcv_history.at[1, "date"] = pd.NaT
     json = dataframe_to_json(ohlcv_history)
 
     dataframe = json_to_dataframe(json)
diff --git a/tests/test_plotting.py b/tests/test_plotting.py
index 185bfeaf1..4ce976b59 100644
--- a/tests/test_plotting.py
+++ b/tests/test_plotting.py
@@ -12,16 +12,26 @@ from freqtrade.data import history
 from freqtrade.data.btanalysis import load_backtest_data
 from freqtrade.data.metrics import create_cum_profit
 from freqtrade.exceptions import OperationalException
-from freqtrade.plot.plotting import (add_areas, add_indicators, add_profit, create_plotconfig,
-                                     generate_candlestick_graph, generate_plot_filename,
-                                     generate_profit_graph, init_plotscript, load_and_plot_trades,
-                                     plot_profit, plot_trades, store_plot_file)
+from freqtrade.plot.plotting import (
+    add_areas,
+    add_indicators,
+    add_profit,
+    create_plotconfig,
+    generate_candlestick_graph,
+    generate_plot_filename,
+    generate_profit_graph,
+    init_plotscript,
+    load_and_plot_trades,
+    plot_profit,
+    plot_trades,
+    store_plot_file,
+)
 from freqtrade.resolvers import StrategyResolver
 from tests.conftest import get_args, log_has, log_has_re, patch_exchange
 
 
 def fig_generating_mock(fig, *args, **kwargs):
-    """ Return Fig - used to mock add_indicators and plot_trades"""
+    """Return Fig - used to mock add_indicators and plot_trades"""
     return fig
 
 
@@ -41,18 +51,18 @@ def generate_empty_figure():
 
 
 def test_init_plotscript(default_conf, mocker, testdatadir):
-    default_conf['timerange'] = "20180110-20180112"
-    default_conf['trade_source'] = "file"
-    default_conf['timeframe'] = "5m"
-    default_conf['exportfilename'] = testdatadir / "backtest-result.json"
+    default_conf["timerange"] = "20180110-20180112"
+    default_conf["trade_source"] = "file"
+    default_conf["timeframe"] = "5m"
+    default_conf["exportfilename"] = testdatadir / "backtest-result.json"
     supported_markets = ["TRX/BTC", "ADA/BTC"]
     ret = init_plotscript(default_conf, supported_markets)
     assert "ohlcv" in ret
     assert "trades" in ret
     assert "pairs" in ret
-    assert 'timerange' in ret
+    assert "timerange" in ret
 
-    default_conf['pairs'] = ["TRX/BTC", "ADA/BTC"]
+    default_conf["pairs"] = ["TRX/BTC", "ADA/BTC"]
     ret = init_plotscript(default_conf, supported_markets, 20)
     assert "ohlcv" in ret
     assert "TRX/BTC" in ret["ohlcv"]
@@ -63,15 +73,16 @@ def test_add_indicators(default_conf, testdatadir, caplog):
     pair = "UNITTEST/BTC"
     timerange = TimeRange()
 
-    data = history.load_pair_history(pair=pair, timeframe='1m',
-                                     datadir=testdatadir, timerange=timerange)
+    data = history.load_pair_history(
+        pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange
+    )
     indicators1 = {"ema10": {}}
     indicators2 = {"macd": {"color": "red"}}
 
     strategy = StrategyResolver.load_strategy(default_conf)
 
     # Generate entry/exit signals and indicators
-    data = strategy.analyze_ticker(data, {'pair': pair})
+    data = strategy.analyze_ticker(data, {"pair": pair})
     fig = generate_empty_figure()
 
     # Row 1
@@ -89,39 +100,43 @@ def test_add_indicators(default_conf, testdatadir, caplog):
     assert macd.line.color == "red"
 
     # No indicator found
-    fig3 = add_indicators(fig=deepcopy(fig), row=3, indicators={'no_indicator': {}}, data=data)
+    fig3 = add_indicators(fig=deepcopy(fig), row=3, indicators={"no_indicator": {}}, data=data)
     assert fig == fig3
     assert log_has_re(r'Indicator "no_indicator" ignored\..*', caplog)
 
 
 def test_add_areas(default_conf, testdatadir, caplog):
     pair = "UNITTEST/BTC"
-    timerange = TimeRange(None, 'line', 0, -1000)
+    timerange = TimeRange(None, "line", 0, -1000)
 
-    data = history.load_pair_history(pair=pair, timeframe='1m',
-                                     datadir=testdatadir, timerange=timerange)
-    indicators = {"macd": {"color": "red",
-                           "fill_color": "black",
-                           "fill_to": "macdhist",
-                           "fill_label": "MACD Fill"}}
+    data = history.load_pair_history(
+        pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange
+    )
+    indicators = {
+        "macd": {
+            "color": "red",
+            "fill_color": "black",
+            "fill_to": "macdhist",
+            "fill_label": "MACD Fill",
+        }
+    }
 
-    ind_no_label = {"macd": {"fill_color": "red",
-                             "fill_to": "macdhist"}}
+    ind_no_label = {"macd": {"fill_color": "red", "fill_to": "macdhist"}}
 
     ind_plain = {"macd": {"fill_to": "macdhist"}}
     strategy = StrategyResolver.load_strategy(default_conf)
 
     # Generate entry/exit signals and indicators
-    data = strategy.analyze_ticker(data, {'pair': pair})
+    data = strategy.analyze_ticker(data, {"pair": pair})
     fig = generate_empty_figure()
 
     # indicator mentioned in fill_to does not exist
-    fig1 = add_areas(fig, 1, data, {'ema10': {'fill_to': 'no_fill_indicator'}})
+    fig1 = add_areas(fig, 1, data, {"ema10": {"fill_to": "no_fill_indicator"}})
     assert fig == fig1
     assert log_has_re(r'fill_to: "no_fill_indicator" ignored\..*', caplog)
 
     # indicator does not exist
-    fig2 = add_areas(fig, 1, data, {'no_indicator': {'fill_to': 'ema10'}})
+    fig2 = add_areas(fig, 1, data, {"no_indicator": {"fill_to": "ema10"}})
     assert fig == fig2
     assert log_has_re(r'Indicator "no_indicator" ignored\..*', caplog)
 
@@ -158,56 +173,60 @@ def test_plot_trades(testdatadir, caplog):
     pair = "ADA/BTC"
     filename = testdatadir / "backtest_results/backtest-result.json"
     trades = load_backtest_data(filename)
-    trades = trades.loc[trades['pair'] == pair]
+    trades = trades.loc[trades["pair"] == pair]
 
     fig = plot_trades(fig, trades)
     figure = fig1.layout.figure
 
     # Check entry - color, should be in first graph, ...
-    trade_entries = find_trace_in_fig_data(figure.data, 'Trade entry')
+    trade_entries = find_trace_in_fig_data(figure.data, "Trade entry")
     assert isinstance(trade_entries, go.Scatter)
-    assert trade_entries.yaxis == 'y'
+    assert trade_entries.yaxis == "y"
     assert len(trades) == len(trade_entries.x)
-    assert trade_entries.marker.color == 'cyan'
-    assert trade_entries.marker.symbol == 'circle-open'
-    assert trade_entries.text[0] == '3.99%, buy_tag, roi, 15 min'
+    assert trade_entries.marker.color == "cyan"
+    assert trade_entries.marker.symbol == "circle-open"
+    assert trade_entries.text[0] == "3.99%, buy_tag, roi, 15 min"
 
-    trade_exit = find_trace_in_fig_data(figure.data, 'Exit - Profit')
+    trade_exit = find_trace_in_fig_data(figure.data, "Exit - Profit")
     assert isinstance(trade_exit, go.Scatter)
-    assert trade_exit.yaxis == 'y'
-    assert len(trades.loc[trades['profit_ratio'] > 0]) == len(trade_exit.x)
-    assert trade_exit.marker.color == 'green'
-    assert trade_exit.marker.symbol == 'square-open'
-    assert trade_exit.text[0] == '3.99%, buy_tag, roi, 15 min'
+    assert trade_exit.yaxis == "y"
+    assert len(trades.loc[trades["profit_ratio"] > 0]) == len(trade_exit.x)
+    assert trade_exit.marker.color == "green"
+    assert trade_exit.marker.symbol == "square-open"
+    assert trade_exit.text[0] == "3.99%, buy_tag, roi, 15 min"
 
-    trade_sell_loss = find_trace_in_fig_data(figure.data, 'Exit - Loss')
+    trade_sell_loss = find_trace_in_fig_data(figure.data, "Exit - Loss")
     assert isinstance(trade_sell_loss, go.Scatter)
-    assert trade_sell_loss.yaxis == 'y'
-    assert len(trades.loc[trades['profit_ratio'] <= 0]) == len(trade_sell_loss.x)
-    assert trade_sell_loss.marker.color == 'red'
-    assert trade_sell_loss.marker.symbol == 'square-open'
-    assert trade_sell_loss.text[5] == '-10.45%, stop_loss, 720 min'
+    assert trade_sell_loss.yaxis == "y"
+    assert len(trades.loc[trades["profit_ratio"] <= 0]) == len(trade_sell_loss.x)
+    assert trade_sell_loss.marker.color == "red"
+    assert trade_sell_loss.marker.symbol == "square-open"
+    assert trade_sell_loss.text[5] == "-10.45%, stop_loss, 720 min"
 
 
 def test_generate_candlestick_graph_no_signals_no_trades(default_conf, mocker, testdatadir, caplog):
-    row_mock = mocker.patch('freqtrade.plot.plotting.add_indicators',
-                            MagicMock(side_effect=fig_generating_mock))
-    trades_mock = mocker.patch('freqtrade.plot.plotting.plot_trades',
-                               MagicMock(side_effect=fig_generating_mock))
+    row_mock = mocker.patch(
+        "freqtrade.plot.plotting.add_indicators", MagicMock(side_effect=fig_generating_mock)
+    )
+    trades_mock = mocker.patch(
+        "freqtrade.plot.plotting.plot_trades", MagicMock(side_effect=fig_generating_mock)
+    )
 
     pair = "UNITTEST/BTC"
-    timerange = TimeRange(None, 'line', 0, -1000)
-    data = history.load_pair_history(pair=pair, timeframe='1m',
-                                     datadir=testdatadir, timerange=timerange)
-    data['enter_long'] = 0
-    data['exit_long'] = 0
-    data['enter_short'] = 0
-    data['exit_short'] = 0
+    timerange = TimeRange(None, "line", 0, -1000)
+    data = history.load_pair_history(
+        pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange
+    )
+    data["enter_long"] = 0
+    data["exit_long"] = 0
+    data["enter_short"] = 0
+    data["exit_short"] = 0
 
     indicators1 = []
     indicators2 = []
-    fig = generate_candlestick_graph(pair=pair, data=data, trades=None,
-                                     indicators1=indicators1, indicators2=indicators2)
+    fig = generate_candlestick_graph(
+        pair=pair, data=data, trades=None, indicators1=indicators1, indicators2=indicators2
+    )
     assert isinstance(fig, go.Figure)
     assert fig.layout.title.text == pair
     figure = fig.layout.figure
@@ -230,24 +249,28 @@ def test_generate_candlestick_graph_no_signals_no_trades(default_conf, mocker, t
 
 
 def test_generate_candlestick_graph_no_trades(default_conf, mocker, testdatadir):
-    row_mock = mocker.patch('freqtrade.plot.plotting.add_indicators',
-                            MagicMock(side_effect=fig_generating_mock))
-    trades_mock = mocker.patch('freqtrade.plot.plotting.plot_trades',
-                               MagicMock(side_effect=fig_generating_mock))
-    pair = 'UNITTEST/BTC'
-    timerange = TimeRange(None, 'line', 0, -1000)
-    data = history.load_pair_history(pair=pair, timeframe='1m',
-                                     datadir=testdatadir, timerange=timerange)
+    row_mock = mocker.patch(
+        "freqtrade.plot.plotting.add_indicators", MagicMock(side_effect=fig_generating_mock)
+    )
+    trades_mock = mocker.patch(
+        "freqtrade.plot.plotting.plot_trades", MagicMock(side_effect=fig_generating_mock)
+    )
+    pair = "UNITTEST/BTC"
+    timerange = TimeRange(None, "line", 0, -1000)
+    data = history.load_pair_history(
+        pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange
+    )
 
     strategy = StrategyResolver.load_strategy(default_conf)
 
     # Generate buy/sell signals and indicators
-    data = strategy.analyze_ticker(data, {'pair': pair})
+    data = strategy.analyze_ticker(data, {"pair": pair})
 
     indicators1 = []
     indicators2 = []
-    fig = generate_candlestick_graph(pair=pair, data=data, trades=None,
-                                     indicators1=indicators1, indicators2=indicators2)
+    fig = generate_candlestick_graph(
+        pair=pair, data=data, trades=None, indicators1=indicators1, indicators2=indicators2
+    )
     assert isinstance(fig, go.Figure)
     assert fig.layout.title.text == pair
     figure = fig.layout.figure
@@ -263,12 +286,12 @@ def test_generate_candlestick_graph_no_trades(default_conf, mocker, testdatadir)
     enter_long = find_trace_in_fig_data(figure.data, "enter_long")
     assert isinstance(enter_long, go.Scatter)
     # All buy-signals should be plotted
-    assert int(data['enter_long'].sum()) == len(enter_long.x)
+    assert int(data["enter_long"].sum()) == len(enter_long.x)
 
     exit_long = find_trace_in_fig_data(figure.data, "exit_long")
     assert isinstance(exit_long, go.Scatter)
     # All buy-signals should be plotted
-    assert int(data['exit_long'].sum()) == len(exit_long.x)
+    assert int(data["exit_long"].sum()) == len(exit_long.x)
 
     assert find_trace_in_fig_data(figure.data, "Bollinger Band")
 
@@ -284,16 +307,15 @@ def test_generate_Plot_filename():
 def test_generate_plot_file(mocker, caplog, user_dir):
     fig = generate_empty_figure()
     plot_mock = mocker.patch("freqtrade.plot.plotting.plot", MagicMock())
-    store_plot_file(fig, filename="freqtrade-plot-UNITTEST_BTC-5m.html",
-                    directory=user_dir / "plot")
+    store_plot_file(
+        fig, filename="freqtrade-plot-UNITTEST_BTC-5m.html", directory=user_dir / "plot"
+    )
 
     expected_fn = str(user_dir / "plot/freqtrade-plot-UNITTEST_BTC-5m.html")
     assert plot_mock.call_count == 1
     assert plot_mock.call_args[0][0] == fig
-    assert (plot_mock.call_args_list[0][1]['filename']
-            == expected_fn)
-    assert log_has(f"Stored plot as {expected_fn}",
-                   caplog)
+    assert plot_mock.call_args_list[0][1]["filename"] == expected_fn
+    assert log_has(f"Stored plot as {expected_fn}", caplog)
 
 
 def test_add_profit(testdatadir):
@@ -301,15 +323,16 @@ def test_add_profit(testdatadir):
     bt_data = load_backtest_data(filename)
     timerange = TimeRange.parse_timerange("20180110-20180112")
 
-    df = history.load_pair_history(pair="TRX/BTC", timeframe='5m',
-                                   datadir=testdatadir, timerange=timerange)
+    df = history.load_pair_history(
+        pair="TRX/BTC", timeframe="5m", datadir=testdatadir, timerange=timerange
+    )
     fig = generate_empty_figure()
 
-    cum_profits = create_cum_profit(df.set_index('date'),
-                                    bt_data[bt_data["pair"] == 'TRX/BTC'],
-                                    "cum_profits", timeframe="5m")
+    cum_profits = create_cum_profit(
+        df.set_index("date"), bt_data[bt_data["pair"] == "TRX/BTC"], "cum_profits", timeframe="5m"
+    )
 
-    fig1 = add_profit(fig, row=2, data=cum_profits, column='cum_profits', name='Profits')
+    fig1 = add_profit(fig, row=2, data=cum_profits, column="cum_profits", name="Profits")
     figure = fig1.layout.figure
     profits = find_trace_in_fig_data(figure.data, "Profits")
     assert isinstance(profits, go.Scatter)
@@ -321,22 +344,15 @@ def test_generate_profit_graph(testdatadir):
     trades = load_backtest_data(filename)
     timerange = TimeRange.parse_timerange("20180110-20180112")
     pairs = ["TRX/BTC", "XLM/BTC"]
-    trades = trades[trades['close_date'] < pd.Timestamp('2018-01-12', tz='UTC')]
+    trades = trades[trades["close_date"] < pd.Timestamp("2018-01-12", tz="UTC")]
 
-    data = history.load_data(datadir=testdatadir,
-                             pairs=pairs,
-                             timeframe='5m',
-                             timerange=timerange)
+    data = history.load_data(datadir=testdatadir, pairs=pairs, timeframe="5m", timerange=timerange)
 
-    trades = trades[trades['pair'].isin(pairs)]
+    trades = trades[trades["pair"].isin(pairs)]
 
     fig = generate_profit_graph(
-        pairs,
-        data,
-        trades,
-        timeframe="5m",
-        stake_currency='BTC',
-        starting_balance=0)
+        pairs, data, trades, timeframe="5m", stake_currency="BTC", starting_balance=0
+    )
     assert isinstance(fig, go.Figure)
 
     assert fig.layout.title.text == "Freqtrade Profit plot"
@@ -369,40 +385,48 @@ def test_generate_profit_graph(testdatadir):
 
     with pytest.raises(OperationalException, match=r"No trades found.*"):
         # Pair cannot be empty - so it's an empty dataframe.
-        generate_profit_graph(pairs, data, trades.loc[trades['pair'].isnull()], timeframe="5m",
-                              stake_currency='BTC', starting_balance=0)
+        generate_profit_graph(
+            pairs,
+            data,
+            trades.loc[trades["pair"].isnull()],
+            timeframe="5m",
+            stake_currency="BTC",
+            starting_balance=0,
+        )
 
 
 def test_start_plot_dataframe(mocker):
     aup = mocker.patch("freqtrade.plot.plotting.load_and_plot_trades", MagicMock())
     args = [
         "plot-dataframe",
-        "--config", "tests/testdata/testconfigs/main_test_config.json",
-        "--pairs", "ETH/BTC"
+        "--config",
+        "tests/testdata/testconfigs/main_test_config.json",
+        "--pairs",
+        "ETH/BTC",
     ]
     start_plot_dataframe(get_args(args))
 
     assert aup.call_count == 1
     called_config = aup.call_args_list[0][0][0]
     assert "pairs" in called_config
-    assert called_config['pairs'] == ["ETH/BTC"]
+    assert called_config["pairs"] == ["ETH/BTC"]
 
 
 def test_load_and_plot_trades(default_conf, mocker, caplog, testdatadir):
     patch_exchange(mocker)
 
-    default_conf['trade_source'] = 'file'
-    default_conf['exportfilename'] = testdatadir / "backtest-result.json"
-    default_conf['indicators1'] = ["sma5", "ema10"]
-    default_conf['indicators2'] = ["macd"]
-    default_conf['pairs'] = ["ETH/BTC", "LTC/BTC"]
+    default_conf["trade_source"] = "file"
+    default_conf["exportfilename"] = testdatadir / "backtest-result.json"
+    default_conf["indicators1"] = ["sma5", "ema10"]
+    default_conf["indicators2"] = ["macd"]
+    default_conf["pairs"] = ["ETH/BTC", "LTC/BTC"]
 
     candle_mock = MagicMock()
     store_mock = MagicMock()
     mocker.patch.multiple(
         "freqtrade.plot.plotting",
         generate_candlestick_graph=candle_mock,
-        store_plot_file=store_mock
+        store_plot_file=store_mock,
     )
     load_and_plot_trades(default_conf)
 
@@ -410,8 +434,8 @@ def test_load_and_plot_trades(default_conf, mocker, caplog, testdatadir):
     assert candle_mock.call_count == 2
     assert store_mock.call_count == 2
 
-    assert candle_mock.call_args_list[0][1]['indicators1'] == ['sma5', 'ema10']
-    assert candle_mock.call_args_list[0][1]['indicators2'] == ['macd']
+    assert candle_mock.call_args_list[0][1]["indicators1"] == ["sma5", "ema10"]
+    assert candle_mock.call_args_list[0][1]["indicators2"] == ["macd"]
 
     assert log_has("End of plotting process. 2 plots generated", caplog)
 
@@ -420,49 +444,46 @@ def test_start_plot_profit(mocker):
     aup = mocker.patch("freqtrade.plot.plotting.plot_profit", MagicMock())
     args = [
         "plot-profit",
-        "--config", "tests/testdata/testconfigs/main_test_config.json",
-        "--pairs", "ETH/BTC"
+        "--config",
+        "tests/testdata/testconfigs/main_test_config.json",
+        "--pairs",
+        "ETH/BTC",
     ]
     start_plot_profit(get_args(args))
 
     assert aup.call_count == 1
     called_config = aup.call_args_list[0][0][0]
     assert "pairs" in called_config
-    assert called_config['pairs'] == ["ETH/BTC"]
+    assert called_config["pairs"] == ["ETH/BTC"]
 
 
 def test_start_plot_profit_error(mocker):
-
-    args = [
-        'plot-profit',
-        '--pairs', 'ETH/BTC'
-    ]
+    args = ["plot-profit", "--pairs", "ETH/BTC"]
     argsp = get_args(args)
     # Make sure we use no config. Details: #2241
     # not resetting config causes random failures if config.json exists
-    argsp['config'] = []
+    argsp["config"] = []
     with pytest.raises(OperationalException):
         start_plot_profit(argsp)
 
 
 def test_plot_profit(default_conf, mocker, testdatadir):
     patch_exchange(mocker)
-    default_conf['trade_source'] = 'file'
-    default_conf['exportfilename'] = testdatadir / 'backtest-result_test_nofile.json'
-    default_conf['pairs'] = ['ETH/BTC', 'LTC/BTC']
+    default_conf["trade_source"] = "file"
+    default_conf["exportfilename"] = testdatadir / "backtest-result_test_nofile.json"
+    default_conf["pairs"] = ["ETH/BTC", "LTC/BTC"]
 
     profit_mock = MagicMock()
     store_mock = MagicMock()
     mocker.patch.multiple(
-        "freqtrade.plot.plotting",
-        generate_profit_graph=profit_mock,
-        store_plot_file=store_mock
+        "freqtrade.plot.plotting", generate_profit_graph=profit_mock, store_plot_file=store_mock
     )
-    with pytest.raises(OperationalException,
-                       match=r"No trades found, cannot generate Profit-plot.*"):
+    with pytest.raises(
+        OperationalException, match=r"No trades found, cannot generate Profit-plot.*"
+    ):
         plot_profit(default_conf)
 
-    default_conf['exportfilename'] = testdatadir / "backtest_results/backtest-result.json"
+    default_conf["exportfilename"] = testdatadir / "backtest_results/backtest-result.json"
 
     plot_profit(default_conf)
 
@@ -470,53 +491,78 @@ def test_plot_profit(default_conf, mocker, testdatadir):
     assert profit_mock.call_count == 1
     assert store_mock.call_count == 1
 
-    assert profit_mock.call_args_list[0][0][0] == default_conf['pairs']
-    assert store_mock.call_args_list[0][1]['auto_open'] is False
+    assert profit_mock.call_args_list[0][0][0] == default_conf["pairs"]
+    assert store_mock.call_args_list[0][1]["auto_open"] is False
 
-    del default_conf['timeframe']
+    del default_conf["timeframe"]
     with pytest.raises(OperationalException, match=r"Timeframe must be set.*--timeframe.*"):
         plot_profit(default_conf)
 
 
-@pytest.mark.parametrize("ind1,ind2,plot_conf,exp", [
-    # No indicators, use plot_conf
-    ([], [], {},
-     {'main_plot': {'sma': {}, 'ema3': {}, 'ema5': {}},
-      'subplots': {'Other': {'macd': {}, 'macdsignal': {}}}}),
-    # use indicators
-    (['sma', 'ema3'], ['macd'], {},
-     {'main_plot': {'sma': {}, 'ema3': {}}, 'subplots': {'Other': {'macd': {}}}}),
-    # only main_plot - adds empty subplots
-    ([], [], {'main_plot': {'sma': {}}},
-     {'main_plot': {'sma': {}}, 'subplots': {}}),
-    # Main and subplots
-    ([], [], {'main_plot': {'sma': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}},
-     {'main_plot': {'sma': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}}),
-    # no main_plot, adds empty main_plot
-    ([], [], {'subplots': {'RSI': {'rsi': {'color': 'red'}}}},
-     {'main_plot': {}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}}),
-    # indicator 1 / 2 should have prevalence
-    (['sma', 'ema3'], ['macd'],
-     {'main_plot': {'sma': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}},
-     {'main_plot': {'sma': {}, 'ema3': {}}, 'subplots': {'Other': {'macd': {}}}}
-     ),
-    # indicator 1 - overrides plot_config main_plot
-    (['sma', 'ema3'], [],
-     {'main_plot': {'sma': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}},
-     {'main_plot': {'sma': {}, 'ema3': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}}
-     ),
-    # indicator 2 - overrides plot_config subplots
-    ([], ['macd', 'macd_signal'],
-     {'main_plot': {'sma': {}}, 'subplots': {'RSI': {'rsi': {'color': 'red'}}}},
-     {'main_plot': {'sma': {}}, 'subplots': {'Other': {'macd': {}, 'macd_signal': {}}}}
-     ),
-])
+@pytest.mark.parametrize(
+    "ind1,ind2,plot_conf,exp",
+    [
+        # No indicators, use plot_conf
+        (
+            [],
+            [],
+            {},
+            {
+                "main_plot": {"sma": {}, "ema3": {}, "ema5": {}},
+                "subplots": {"Other": {"macd": {}, "macdsignal": {}}},
+            },
+        ),
+        # use indicators
+        (
+            ["sma", "ema3"],
+            ["macd"],
+            {},
+            {"main_plot": {"sma": {}, "ema3": {}}, "subplots": {"Other": {"macd": {}}}},
+        ),
+        # only main_plot - adds empty subplots
+        ([], [], {"main_plot": {"sma": {}}}, {"main_plot": {"sma": {}}, "subplots": {}}),
+        # Main and subplots
+        (
+            [],
+            [],
+            {"main_plot": {"sma": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+            {"main_plot": {"sma": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+        ),
+        # no main_plot, adds empty main_plot
+        (
+            [],
+            [],
+            {"subplots": {"RSI": {"rsi": {"color": "red"}}}},
+            {"main_plot": {}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+        ),
+        # indicator 1 / 2 should have prevalence
+        (
+            ["sma", "ema3"],
+            ["macd"],
+            {"main_plot": {"sma": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+            {"main_plot": {"sma": {}, "ema3": {}}, "subplots": {"Other": {"macd": {}}}},
+        ),
+        # indicator 1 - overrides plot_config main_plot
+        (
+            ["sma", "ema3"],
+            [],
+            {"main_plot": {"sma": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+            {"main_plot": {"sma": {}, "ema3": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+        ),
+        # indicator 2 - overrides plot_config subplots
+        (
+            [],
+            ["macd", "macd_signal"],
+            {"main_plot": {"sma": {}}, "subplots": {"RSI": {"rsi": {"color": "red"}}}},
+            {"main_plot": {"sma": {}}, "subplots": {"Other": {"macd": {}, "macd_signal": {}}}},
+        ),
+    ],
+)
 def test_create_plotconfig(ind1, ind2, plot_conf, exp):
-
     res = create_plotconfig(ind1, ind2, plot_conf)
-    assert 'main_plot' in res
-    assert 'subplots' in res
-    assert isinstance(res['main_plot'], dict)
-    assert isinstance(res['subplots'], dict)
+    assert "main_plot" in res
+    assert "subplots" in res
+    assert isinstance(res["main_plot"], dict)
+    assert isinstance(res["subplots"], dict)
 
     assert res == exp
diff --git a/tests/test_strategy_updater.py b/tests/test_strategy_updater.py
index 7f4ae4349..a53c6c9b7 100644
--- a/tests/test_strategy_updater.py
+++ b/tests/test_strategy_updater.py
@@ -18,43 +18,37 @@ if sys.version_info < (3, 9):
 
 def test_strategy_updater_start(user_dir, capsys) -> None:
     # Effective test without mocks.
-    teststrats = Path(__file__).parent / 'strategy/strats'
+    teststrats = Path(__file__).parent / "strategy/strats"
     tmpdirp = Path(user_dir) / "strategies"
     tmpdirp.mkdir(parents=True, exist_ok=True)
-    shutil.copy(teststrats / 'strategy_test_v2.py', tmpdirp)
-    old_code = (teststrats / 'strategy_test_v2.py').read_text()
+    shutil.copy(teststrats / "strategy_test_v2.py", tmpdirp)
+    old_code = (teststrats / "strategy_test_v2.py").read_text()
 
-    args = [
-        "strategy-updater",
-        "--userdir",
-        str(user_dir),
-        "--strategy-list",
-        "StrategyTestV2"
-         ]
+    args = ["strategy-updater", "--userdir", str(user_dir), "--strategy-list", "StrategyTestV2"]
     pargs = get_args(args)
-    pargs['config'] = None
+    pargs["config"] = None
 
     start_strategy_update(pargs)
 
     assert Path(user_dir / "strategies_orig_updater").exists()
     # Backup file exists
-    assert Path(user_dir / "strategies_orig_updater" / 'strategy_test_v2.py').exists()
+    assert Path(user_dir / "strategies_orig_updater" / "strategy_test_v2.py").exists()
     # updated file exists
-    new_file = tmpdirp / 'strategy_test_v2.py'
+    new_file = tmpdirp / "strategy_test_v2.py"
     assert new_file.exists()
     new_code = new_file.read_text()
-    assert 'INTERFACE_VERSION = 3' in new_code
-    assert 'INTERFACE_VERSION = 2' in old_code
+    assert "INTERFACE_VERSION = 3" in new_code
+    assert "INTERFACE_VERSION = 2" in old_code
     captured = capsys.readouterr()
 
-    assert 'Conversion of strategy_test_v2.py started.' in captured.out
-    assert re.search(r'Conversion of strategy_test_v2\.py took .* seconds', captured.out)
+    assert "Conversion of strategy_test_v2.py started." in captured.out
+    assert re.search(r"Conversion of strategy_test_v2\.py took .* seconds", captured.out)
 
 
 def test_strategy_updater_methods(default_conf, caplog) -> None:
-
     instance_strategy_updater = StrategyUpdater()
-    modified_code1 = instance_strategy_updater.update_code("""
+    modified_code1 = instance_strategy_updater.update_code(
+        """
 class testClass(IStrategy):
     def populate_buy_trend():
         pass
@@ -66,7 +60,8 @@ class testClass(IStrategy):
         pass
     def custom_sell():
         pass
-""")
+"""
+    )
 
     assert "populate_entry_trend" in modified_code1
     assert "populate_exit_trend" in modified_code1
@@ -79,11 +74,13 @@ class testClass(IStrategy):
 def test_strategy_updater_params(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
 
-    modified_code2 = instance_strategy_updater.update_code("""
+    modified_code2 = instance_strategy_updater.update_code(
+        """
 ticker_interval = '15m'
 buy_some_parameter = IntParameter(space='buy')
 sell_some_parameter = IntParameter(space='sell')
-""")
+"""
+    )
 
     assert "timeframe" in modified_code2
     # check for not editing hyperopt spaces
@@ -93,13 +90,15 @@ sell_some_parameter = IntParameter(space='sell')
 
 def test_strategy_updater_constants(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code3 = instance_strategy_updater.update_code("""
+    modified_code3 = instance_strategy_updater.update_code(
+        """
 use_sell_signal = True
 sell_profit_only = True
 sell_profit_offset = True
 ignore_roi_if_buy_signal = True
 forcebuy_enable = True
-""")
+"""
+    )
 
     assert "use_exit_signal" in modified_code3
     assert "exit_profit_only" in modified_code3
@@ -110,10 +109,12 @@ forcebuy_enable = True
 
 def test_strategy_updater_df_columns(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 dataframe.loc[reduce(lambda x, y: x & y, conditions), ["buy", "buy_tag"]] = (1, "buy_signal_1")
 dataframe.loc[reduce(lambda x, y: x & y, conditions), 'sell'] = 1
-""")
+"""
+    )
 
     assert "enter_long" in modified_code
     assert "exit_long" in modified_code
@@ -122,18 +123,21 @@ dataframe.loc[reduce(lambda x, y: x & y, conditions), 'sell'] = 1
 
 def test_strategy_updater_method_params(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 def confirm_trade_exit(sell_reason: str):
     nr_orders = trade.nr_of_successful_buys
     pass
-    """)
+    """
+    )
     assert "exit_reason" in modified_code
     assert "nr_orders = trade.nr_of_successful_entries" in modified_code
 
 
 def test_strategy_updater_dicts(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 order_time_in_force = {
     'buy': 'gtc',
     'sell': 'ioc'
@@ -148,7 +152,8 @@ unfilledtimeout = {
     'buy': 1,
     'sell': 2
 }
-""")
+"""
+    )
 
     assert "'entry': 'gtc'" in modified_code
     assert "'exit': 'ioc'" in modified_code
@@ -160,11 +165,13 @@ unfilledtimeout = {
 
 def test_strategy_updater_comparisons(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 def confirm_trade_exit(sell_reason):
     if (sell_reason == 'stop_loss'):
         pass
-""")
+"""
+    )
     assert "exit_reason" in modified_code
     assert "exit_reason == 'stop_loss'" in modified_code
 
@@ -172,11 +179,13 @@ def confirm_trade_exit(sell_reason):
 def test_strategy_updater_strings(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
 
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 sell_reason == 'sell_signal'
 sell_reason == 'force_sell'
 sell_reason == 'emergency_sell'
-""")
+"""
+    )
 
     # those tests currently don't work, next in line.
     assert "exit_signal" in modified_code
@@ -187,7 +196,8 @@ sell_reason == 'emergency_sell'
 
 def test_strategy_updater_comments(default_conf, caplog) -> None:
     instance_strategy_updater = StrategyUpdater()
-    modified_code = instance_strategy_updater.update_code("""
+    modified_code = instance_strategy_updater.update_code(
+        """
 # This is the 1st comment
 import talib.abstract as ta
 # This is the 2nd comment
@@ -204,7 +214,8 @@ class someStrategy(IStrategy):
 
     # This is the 4th comment
     stoploss = -0.1
-""")
+"""
+    )
 
     assert "This is the 1st comment" in modified_code
     assert "This is the 2nd comment" in modified_code
diff --git a/tests/test_talib.py b/tests/test_talib.py
index f526fdd4d..97551bec9 100644
--- a/tests/test_talib.py
+++ b/tests/test_talib.py
@@ -3,12 +3,14 @@ import talib.abstract as ta
 
 
 def test_talib_bollingerbands_near_zero_values():
-    inputs = pd.DataFrame([
-        {'close': 0.00000010},
-        {'close': 0.00000011},
-        {'close': 0.00000012},
-        {'close': 0.00000013},
-        {'close': 0.00000014}
-    ])
+    inputs = pd.DataFrame(
+        [
+            {"close": 0.00000010},
+            {"close": 0.00000011},
+            {"close": 0.00000012},
+            {"close": 0.00000013},
+            {"close": 0.00000014},
+        ]
+    )
     bollinger = ta.BBANDS(inputs, matype=0, timeperiod=2)
-    assert bollinger['upperband'][3] != bollinger['middleband'][3]
+    assert bollinger["upperband"][3] != bollinger["middleband"][3]
diff --git a/tests/test_timerange.py b/tests/test_timerange.py
index d1c61704f..c7471e263 100644
--- a/tests/test_timerange.py
+++ b/tests/test_timerange.py
@@ -8,61 +8,61 @@ from freqtrade.exceptions import OperationalException
 
 
 def test_parse_timerange_incorrect():
-
-    timerange = TimeRange.parse_timerange('')
+    timerange = TimeRange.parse_timerange("")
     assert timerange == TimeRange(None, None, 0, 0)
-    timerange = TimeRange.parse_timerange('20100522-')
-    assert TimeRange('date', None, 1274486400, 0) == timerange
-    assert timerange.timerange_str == '20100522-'
-    timerange = TimeRange.parse_timerange('-20100522')
-    assert TimeRange(None, 'date', 0, 1274486400) == timerange
-    assert timerange.timerange_str == '-20100522'
-    timerange = TimeRange.parse_timerange('20100522-20150730')
-    assert timerange == TimeRange('date', 'date', 1274486400, 1438214400)
-    assert timerange.timerange_str == '20100522-20150730'
-    assert timerange.start_fmt == '2010-05-22 00:00:00'
-    assert timerange.stop_fmt == '2015-07-30 00:00:00'
+    timerange = TimeRange.parse_timerange("20100522-")
+    assert TimeRange("date", None, 1274486400, 0) == timerange
+    assert timerange.timerange_str == "20100522-"
+    timerange = TimeRange.parse_timerange("-20100522")
+    assert TimeRange(None, "date", 0, 1274486400) == timerange
+    assert timerange.timerange_str == "-20100522"
+    timerange = TimeRange.parse_timerange("20100522-20150730")
+    assert timerange == TimeRange("date", "date", 1274486400, 1438214400)
+    assert timerange.timerange_str == "20100522-20150730"
+    assert timerange.start_fmt == "2010-05-22 00:00:00"
+    assert timerange.stop_fmt == "2015-07-30 00:00:00"
 
     # Added test for unix timestamp - BTC genesis date
-    assert TimeRange('date', None, 1231006505, 0) == TimeRange.parse_timerange('1231006505-')
-    assert TimeRange(None, 'date', 0, 1233360000) == TimeRange.parse_timerange('-1233360000')
-    timerange = TimeRange.parse_timerange('1231006505-1233360000')
-    assert TimeRange('date', 'date', 1231006505, 1233360000) == timerange
+    assert TimeRange("date", None, 1231006505, 0) == TimeRange.parse_timerange("1231006505-")
+    assert TimeRange(None, "date", 0, 1233360000) == TimeRange.parse_timerange("-1233360000")
+    timerange = TimeRange.parse_timerange("1231006505-1233360000")
+    assert TimeRange("date", "date", 1231006505, 1233360000) == timerange
     assert isinstance(timerange.startdt, datetime)
     assert isinstance(timerange.stopdt, datetime)
     assert timerange.startdt == datetime.fromtimestamp(1231006505, tz=timezone.utc)
     assert timerange.stopdt == datetime.fromtimestamp(1233360000, tz=timezone.utc)
-    assert timerange.timerange_str == '20090103-20090131'
+    assert timerange.timerange_str == "20090103-20090131"
 
-    timerange = TimeRange.parse_timerange('1231006505000-1233360000000')
-    assert TimeRange('date', 'date', 1231006505, 1233360000) == timerange
+    timerange = TimeRange.parse_timerange("1231006505000-1233360000000")
+    assert TimeRange("date", "date", 1231006505, 1233360000) == timerange
 
-    timerange = TimeRange.parse_timerange('1231006505000-')
-    assert TimeRange('date', None, 1231006505, 0) == timerange
+    timerange = TimeRange.parse_timerange("1231006505000-")
+    assert TimeRange("date", None, 1231006505, 0) == timerange
 
-    timerange = TimeRange.parse_timerange('-1231006505000')
-    assert TimeRange(None, 'date', 0, 1231006505) == timerange
+    timerange = TimeRange.parse_timerange("-1231006505000")
+    assert TimeRange(None, "date", 0, 1231006505) == timerange
 
-    with pytest.raises(OperationalException, match=r'Incorrect syntax.*'):
-        TimeRange.parse_timerange('-')
+    with pytest.raises(OperationalException, match=r"Incorrect syntax.*"):
+        TimeRange.parse_timerange("-")
 
-    with pytest.raises(OperationalException,
-                       match=r'Start date is after stop date for timerange.*'):
-        TimeRange.parse_timerange('20100523-20100522')
+    with pytest.raises(
+        OperationalException, match=r"Start date is after stop date for timerange.*"
+    ):
+        TimeRange.parse_timerange("20100523-20100522")
 
 
 def test_subtract_start():
-    x = TimeRange('date', 'date', 1274486400, 1438214400)
+    x = TimeRange("date", "date", 1274486400, 1438214400)
     x.subtract_start(300)
     assert x.startts == 1274486400 - 300
 
     # Do nothing if no startdate exists
-    x = TimeRange(None, 'date', 0, 1438214400)
+    x = TimeRange(None, "date", 0, 1438214400)
     x.subtract_start(300)
     assert not x.startts
     assert not x.startdt
 
-    x = TimeRange('date', None, 1274486400, 0)
+    x = TimeRange("date", None, 1274486400, 0)
     x.subtract_start(300)
     assert x.startts == 1274486400 - 300
 
@@ -70,17 +70,17 @@ def test_subtract_start():
 def test_adjust_start_if_necessary():
     min_date = datetime(2017, 11, 14, 21, 15, 00, tzinfo=timezone.utc)
 
-    x = TimeRange('date', 'date', 1510694100, 1510780500)
+    x = TimeRange("date", "date", 1510694100, 1510780500)
     # Adjust by 20 candles - min_date == startts
     x.adjust_start_if_necessary(300, 20, min_date)
     assert x.startts == 1510694100 + (20 * 300)
 
-    x = TimeRange('date', 'date', 1510700100, 1510780500)
+    x = TimeRange("date", "date", 1510700100, 1510780500)
     # Do nothing, startup is set and different min_date
     x.adjust_start_if_necessary(300, 20, min_date)
     assert x.startts == 1510694100 + (20 * 300)
 
-    x = TimeRange(None, 'date', 0, 1510780500)
+    x = TimeRange(None, "date", 0, 1510780500)
     # Adjust by 20 candles = 20 * 5m
     x.adjust_start_if_necessary(300, 20, min_date)
     assert x.startts == 1510694100 + (20 * 300)
diff --git a/tests/test_wallets.py b/tests/test_wallets.py
index 0d0ada1b7..f33222b7c 100644
--- a/tests/test_wallets.py
+++ b/tests/test_wallets.py
@@ -8,190 +8,189 @@ from sqlalchemy import select
 from freqtrade.constants import UNLIMITED_STAKE_AMOUNT
 from freqtrade.exceptions import DependencyException
 from freqtrade.persistence import Trade
-from tests.conftest import (EXMS, create_mock_trades, create_mock_trades_usdt,
-                            get_patched_freqtradebot, patch_wallet)
+from tests.conftest import (
+    EXMS,
+    create_mock_trades,
+    create_mock_trades_usdt,
+    get_patched_freqtradebot,
+    patch_wallet,
+)
 
 
 def test_sync_wallet_at_boot(mocker, default_conf):
-    default_conf['dry_run'] = False
+    default_conf["dry_run"] = False
     mocker.patch.multiple(
         EXMS,
-        get_balances=MagicMock(return_value={
-            "BNT": {
-                "free": 1.0,
-                "used": 2.0,
-                "total": 3.0
-            },
-            "GAS": {
-                "free": 0.260739,
-                "used": 0.0,
-                "total": 0.260739
-            },
-            "USDT": {
-                "free": 20,
-                "used": 20,
-                "total": 40
-            },
-        })
+        get_balances=MagicMock(
+            return_value={
+                "BNT": {"free": 1.0, "used": 2.0, "total": 3.0},
+                "GAS": {"free": 0.260739, "used": 0.0, "total": 0.260739},
+                "USDT": {"free": 20, "used": 20, "total": 40},
+            }
+        ),
     )
 
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
 
     assert len(freqtrade.wallets._wallets) == 3
-    assert freqtrade.wallets._wallets['BNT'].free == 1.0
-    assert freqtrade.wallets._wallets['BNT'].used == 2.0
-    assert freqtrade.wallets._wallets['BNT'].total == 3.0
-    assert freqtrade.wallets._wallets['GAS'].free == 0.260739
-    assert freqtrade.wallets._wallets['GAS'].used == 0.0
-    assert freqtrade.wallets._wallets['GAS'].total == 0.260739
-    assert freqtrade.wallets.get_free('BNT') == 1.0
-    assert 'USDT' in freqtrade.wallets._wallets
+    assert freqtrade.wallets._wallets["BNT"].free == 1.0
+    assert freqtrade.wallets._wallets["BNT"].used == 2.0
+    assert freqtrade.wallets._wallets["BNT"].total == 3.0
+    assert freqtrade.wallets._wallets["GAS"].free == 0.260739
+    assert freqtrade.wallets._wallets["GAS"].used == 0.0
+    assert freqtrade.wallets._wallets["GAS"].total == 0.260739
+    assert freqtrade.wallets.get_free("BNT") == 1.0
+    assert "USDT" in freqtrade.wallets._wallets
     assert freqtrade.wallets._last_wallet_refresh is not None
     mocker.patch.multiple(
         EXMS,
-        get_balances=MagicMock(return_value={
-            "BNT": {
-                "free": 1.2,
-                "used": 1.9,
-                "total": 3.5
-            },
-            "GAS": {
-                "free": 0.270739,
-                "used": 0.1,
-                "total": 0.260439
-            },
-        })
+        get_balances=MagicMock(
+            return_value={
+                "BNT": {"free": 1.2, "used": 1.9, "total": 3.5},
+                "GAS": {"free": 0.270739, "used": 0.1, "total": 0.260439},
+            }
+        ),
     )
 
     freqtrade.wallets.update()
 
     # USDT is missing from the 2nd result - so should not be in this either.
     assert len(freqtrade.wallets._wallets) == 2
-    assert freqtrade.wallets._wallets['BNT'].free == 1.2
-    assert freqtrade.wallets._wallets['BNT'].used == 1.9
-    assert freqtrade.wallets._wallets['BNT'].total == 3.5
-    assert freqtrade.wallets._wallets['GAS'].free == 0.270739
-    assert freqtrade.wallets._wallets['GAS'].used == 0.1
-    assert freqtrade.wallets._wallets['GAS'].total == 0.260439
-    assert freqtrade.wallets.get_free('GAS') == 0.270739
-    assert freqtrade.wallets.get_used('GAS') == 0.1
-    assert freqtrade.wallets.get_total('GAS') == 0.260439
-    update_mock = mocker.patch('freqtrade.wallets.Wallets._update_live')
+    assert freqtrade.wallets._wallets["BNT"].free == 1.2
+    assert freqtrade.wallets._wallets["BNT"].used == 1.9
+    assert freqtrade.wallets._wallets["BNT"].total == 3.5
+    assert freqtrade.wallets._wallets["GAS"].free == 0.270739
+    assert freqtrade.wallets._wallets["GAS"].used == 0.1
+    assert freqtrade.wallets._wallets["GAS"].total == 0.260439
+    assert freqtrade.wallets.get_free("GAS") == 0.270739
+    assert freqtrade.wallets.get_used("GAS") == 0.1
+    assert freqtrade.wallets.get_total("GAS") == 0.260439
+    update_mock = mocker.patch("freqtrade.wallets.Wallets._update_live")
     freqtrade.wallets.update(False)
     assert update_mock.call_count == 0
     freqtrade.wallets.update()
     assert update_mock.call_count == 1
 
-    assert freqtrade.wallets.get_free('NOCURRENCY') == 0
-    assert freqtrade.wallets.get_used('NOCURRENCY') == 0
-    assert freqtrade.wallets.get_total('NOCURRENCY') == 0
+    assert freqtrade.wallets.get_free("NOCURRENCY") == 0
+    assert freqtrade.wallets.get_used("NOCURRENCY") == 0
+    assert freqtrade.wallets.get_total("NOCURRENCY") == 0
 
 
 def test_sync_wallet_missing_data(mocker, default_conf):
-    default_conf['dry_run'] = False
+    default_conf["dry_run"] = False
     mocker.patch.multiple(
         EXMS,
-        get_balances=MagicMock(return_value={
-            "BNT": {
-                "free": 1.0,
-                "used": 2.0,
-                "total": 3.0
-            },
-            "GAS": {
-                "free": 0.260739,
-                "total": 0.260739
-            },
-        })
+        get_balances=MagicMock(
+            return_value={
+                "BNT": {"free": 1.0, "used": 2.0, "total": 3.0},
+                "GAS": {"free": 0.260739, "total": 0.260739},
+            }
+        ),
     )
 
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
 
     assert len(freqtrade.wallets._wallets) == 2
-    assert freqtrade.wallets._wallets['BNT'].free == 1.0
-    assert freqtrade.wallets._wallets['BNT'].used == 2.0
-    assert freqtrade.wallets._wallets['BNT'].total == 3.0
-    assert freqtrade.wallets._wallets['GAS'].free == 0.260739
-    assert freqtrade.wallets._wallets['GAS'].used is None
-    assert freqtrade.wallets._wallets['GAS'].total == 0.260739
-    assert freqtrade.wallets.get_free('GAS') == 0.260739
+    assert freqtrade.wallets._wallets["BNT"].free == 1.0
+    assert freqtrade.wallets._wallets["BNT"].used == 2.0
+    assert freqtrade.wallets._wallets["BNT"].total == 3.0
+    assert freqtrade.wallets._wallets["GAS"].free == 0.260739
+    assert freqtrade.wallets._wallets["GAS"].used is None
+    assert freqtrade.wallets._wallets["GAS"].total == 0.260739
+    assert freqtrade.wallets.get_free("GAS") == 0.260739
 
 
 def test_get_trade_stake_amount_no_stake_amount(default_conf, mocker) -> None:
-    patch_wallet(mocker, free=default_conf['stake_amount'] * 0.5)
+    patch_wallet(mocker, free=default_conf["stake_amount"] * 0.5)
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
 
-    with pytest.raises(DependencyException, match=r'.*stake amount.*'):
-        freqtrade.wallets.get_trade_stake_amount('ETH/BTC', 1)
+    with pytest.raises(DependencyException, match=r".*stake amount.*"):
+        freqtrade.wallets.get_trade_stake_amount("ETH/BTC", 1)
 
 
-@pytest.mark.parametrize("balance_ratio,capital,result1,result2", [
-                        (1,    None, 50, 66.66666),
-                        (0.99, None, 49.5, 66.0),
-                        (0.50, None, 25, 33.3333),
-    # Tests with capital ignore balance_ratio
-                        (1,    100, 50, 0.0),
-                        (0.99, 200, 50, 66.66666),
-                        (0.99, 150, 50, 50),
-                        (0.50, 50, 25, 0.0),
-                        (0.50, 10, 5, 0.0),
-])
-def test_get_trade_stake_amount_unlimited_amount(default_conf, ticker, balance_ratio, capital,
-                                                 result1, result2, limit_buy_order_open,
-                                                 fee, mocker) -> None:
+@pytest.mark.parametrize(
+    "balance_ratio,capital,result1,result2",
+    [
+        (1, None, 50, 66.66666),
+        (0.99, None, 49.5, 66.0),
+        (0.50, None, 25, 33.3333),
+        # Tests with capital ignore balance_ratio
+        (1, 100, 50, 0.0),
+        (0.99, 200, 50, 66.66666),
+        (0.99, 150, 50, 50),
+        (0.50, 50, 25, 0.0),
+        (0.50, 10, 5, 0.0),
+    ],
+)
+def test_get_trade_stake_amount_unlimited_amount(
+    default_conf,
+    ticker,
+    balance_ratio,
+    capital,
+    result1,
+    result2,
+    limit_buy_order_open,
+    fee,
+    mocker,
+) -> None:
     mocker.patch.multiple(
         EXMS,
         fetch_ticker=ticker,
         create_order=MagicMock(return_value=limit_buy_order_open),
-        get_fee=fee
+        get_fee=fee,
     )
 
     conf = deepcopy(default_conf)
-    conf['stake_amount'] = UNLIMITED_STAKE_AMOUNT
-    conf['dry_run_wallet'] = 100
-    conf['tradable_balance_ratio'] = balance_ratio
+    conf["stake_amount"] = UNLIMITED_STAKE_AMOUNT
+    conf["dry_run_wallet"] = 100
+    conf["tradable_balance_ratio"] = balance_ratio
     if capital is not None:
-        conf['available_capital'] = capital
+        conf["available_capital"] = capital
 
     freqtrade = get_patched_freqtradebot(mocker, conf)
 
     # no open trades, order amount should be 'balance / max_open_trades'
-    result = freqtrade.wallets.get_trade_stake_amount('ETH/USDT', 2)
+    result = freqtrade.wallets.get_trade_stake_amount("ETH/USDT", 2)
     assert result == result1
 
     # create one trade, order amount should be 'balance / (max_open_trades - num_open_trades)'
-    freqtrade.execute_entry('ETH/USDT', result)
+    freqtrade.execute_entry("ETH/USDT", result)
 
-    result = freqtrade.wallets.get_trade_stake_amount('LTC/USDT', 2)
+    result = freqtrade.wallets.get_trade_stake_amount("LTC/USDT", 2)
     assert result == result1
 
     # create 2 trades, order amount should be None
-    freqtrade.execute_entry('LTC/BTC', result)
+    freqtrade.execute_entry("LTC/BTC", result)
 
-    result = freqtrade.wallets.get_trade_stake_amount('XRP/USDT', 2)
+    result = freqtrade.wallets.get_trade_stake_amount("XRP/USDT", 2)
     assert result == 0
 
-    freqtrade.config['dry_run_wallet'] = 200
+    freqtrade.config["dry_run_wallet"] = 200
     freqtrade.wallets.start_cap = 200
-    result = freqtrade.wallets.get_trade_stake_amount('XRP/USDT', 3)
+    result = freqtrade.wallets.get_trade_stake_amount("XRP/USDT", 3)
     assert round(result, 4) == round(result2, 4)
 
     # set max_open_trades = None, so do not trade
-    result = freqtrade.wallets.get_trade_stake_amount('NEO/USDT', 0)
+    result = freqtrade.wallets.get_trade_stake_amount("NEO/USDT", 0)
     assert result == 0
 
 
-@pytest.mark.parametrize('stake_amount,min_stake,stake_available,max_stake,trade_amount,expected', [
-    (22, 11, 50, 10000, None, 22),
-    (100, 11, 500, 10000, None, 100),
-    (1000, 11, 500, 10000, None, 500),  # Above stake_available
-    (700, 11, 1000, 400, None, 400),  # Above max_stake, below stake available
-    (20, 15, 10, 10000, None, 0),  # Minimum stake > stake_available
-    (9, 11, 100, 10000, None, 11),  # Below min stake
-    (1, 15, 10, 10000, None, 0),  # Below min stake and min_stake > stake_available
-    (20, 50, 100, 10000, None, 0),  # Below min stake and stake * 1.3 > min_stake
-    (1000, None, 1000, 10000, None, 1000),  # No min-stake-amount could be determined
-    (2000, 15, 2000, 3000, 1500, 1500),  # Rebuy - resulting in too high stake amount. Adjusting.
-])
+@pytest.mark.parametrize(
+    "stake_amount,min_stake,stake_available,max_stake,trade_amount,expected",
+    [
+        (22, 11, 50, 10000, None, 22),
+        (100, 11, 500, 10000, None, 100),
+        (1000, 11, 500, 10000, None, 500),  # Above stake_available
+        (700, 11, 1000, 400, None, 400),  # Above max_stake, below stake available
+        (20, 15, 10, 10000, None, 0),  # Minimum stake > stake_available
+        (9, 11, 100, 10000, None, 11),  # Below min stake
+        (1, 15, 10, 10000, None, 0),  # Below min stake and min_stake > stake_available
+        (20, 50, 100, 10000, None, 0),  # Below min stake and stake * 1.3 > min_stake
+        (1000, None, 1000, 10000, None, 1000),  # No min-stake-amount could be determined
+        # Rebuy - resulting in too high stake amount. Adjusting.
+        (2000, 15, 2000, 3000, 1500, 1500),
+    ],
+)
 def test_validate_stake_amount(
     mocker,
     default_conf,
@@ -204,33 +203,41 @@ def test_validate_stake_amount(
 ):
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
 
-    mocker.patch("freqtrade.wallets.Wallets.get_available_stake_amount",
-                 return_value=stake_available)
+    mocker.patch(
+        "freqtrade.wallets.Wallets.get_available_stake_amount", return_value=stake_available
+    )
     res = freqtrade.wallets.validate_stake_amount(
-        'XRP/USDT', stake_amount, min_stake, max_stake, trade_amount)
+        "XRP/USDT", stake_amount, min_stake, max_stake, trade_amount
+    )
     assert res == expected
 
 
-@pytest.mark.parametrize('available_capital,closed_profit,open_stakes,free,expected', [
-    (None, 10, 100, 910, 1000),
-    (None, 0, 0, 2500, 2500),
-    (None, 500, 0, 2500, 2000),
-    (None, 500, 0, 2500, 2000),
-    (None, -70, 0, 1930, 2000),
-    # Only available balance matters when it's set.
-    (100, 0, 0, 0, 100),
-    (1000, 0, 2, 5, 1000),
-    (1235, 2250, 2, 5, 1235),
-    (1235, -2250, 2, 5, 1235),
-])
-def test_get_starting_balance(mocker, default_conf, available_capital, closed_profit,
-                              open_stakes, free, expected):
+@pytest.mark.parametrize(
+    "available_capital,closed_profit,open_stakes,free,expected",
+    [
+        (None, 10, 100, 910, 1000),
+        (None, 0, 0, 2500, 2500),
+        (None, 500, 0, 2500, 2000),
+        (None, 500, 0, 2500, 2000),
+        (None, -70, 0, 1930, 2000),
+        # Only available balance matters when it's set.
+        (100, 0, 0, 0, 100),
+        (1000, 0, 2, 5, 1000),
+        (1235, 2250, 2, 5, 1235),
+        (1235, -2250, 2, 5, 1235),
+    ],
+)
+def test_get_starting_balance(
+    mocker, default_conf, available_capital, closed_profit, open_stakes, free, expected
+):
     if available_capital:
-        default_conf['available_capital'] = available_capital
-    mocker.patch("freqtrade.persistence.models.Trade.get_total_closed_profit",
-                 return_value=closed_profit)
-    mocker.patch("freqtrade.persistence.models.Trade.total_open_trades_stakes",
-                 return_value=open_stakes)
+        default_conf["available_capital"] = available_capital
+    mocker.patch(
+        "freqtrade.persistence.models.Trade.get_total_closed_profit", return_value=closed_profit
+    )
+    mocker.patch(
+        "freqtrade.persistence.models.Trade.total_open_trades_stakes", return_value=open_stakes
+    )
     mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=free)
 
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
@@ -239,9 +246,9 @@ def test_get_starting_balance(mocker, default_conf, available_capital, closed_pr
 
 
 def test_sync_wallet_futures_live(mocker, default_conf):
-    default_conf['dry_run'] = False
-    default_conf['trading_mode'] = 'futures'
-    default_conf['margin_mode'] = 'isolated'
+    default_conf["dry_run"] = False
+    default_conf["trading_mode"] = "futures"
+    default_conf["margin_mode"] = "isolated"
     mock_result = [
         {
             "symbol": "ETH/USDT:USDT",
@@ -262,8 +269,8 @@ def test_sync_wallet_futures_live(mocker, default_conf):
             "markPrice": 2896.41,
             "collateral": 20,
             "marginType": "isolated",
-            "side": 'short',
-            "percentage": None
+            "side": "short",
+            "percentage": None,
         },
         {
             "symbol": "ADA/USDT:USDT",
@@ -284,8 +291,8 @@ def test_sync_wallet_futures_live(mocker, default_conf):
             "markPrice": 0.91,
             "collateral": 20,
             "marginType": "isolated",
-            "side": 'short',
-            "percentage": None
+            "side": "short",
+            "percentage": None,
         },
         {
             # Closed position
@@ -307,20 +314,18 @@ def test_sync_wallet_futures_live(mocker, default_conf):
             "markPrice": 15.41,
             "collateral": 0.0,
             "marginType": "isolated",
-            "side": 'short',
-            "percentage": None
-        }
+            "side": "short",
+            "percentage": None,
+        },
     ]
     mocker.patch.multiple(
         EXMS,
-        get_balances=MagicMock(return_value={
-            "USDT": {
-                "free": 900,
-                "used": 100,
-                "total": 1000
-            },
-        }),
-        fetch_positions=MagicMock(return_value=mock_result)
+        get_balances=MagicMock(
+            return_value={
+                "USDT": {"free": 900, "used": 100, "total": 1000},
+            }
+        ),
+        fetch_positions=MagicMock(return_value=mock_result),
     )
 
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
@@ -328,23 +333,23 @@ def test_sync_wallet_futures_live(mocker, default_conf):
     assert len(freqtrade.wallets._wallets) == 1
     assert len(freqtrade.wallets._positions) == 2
 
-    assert 'USDT' in freqtrade.wallets._wallets
-    assert 'ETH/USDT:USDT' in freqtrade.wallets._positions
+    assert "USDT" in freqtrade.wallets._wallets
+    assert "ETH/USDT:USDT" in freqtrade.wallets._positions
     assert freqtrade.wallets._last_wallet_refresh is not None
 
     # Remove ETH/USDT:USDT position
     del mock_result[0]
     freqtrade.wallets.update()
     assert len(freqtrade.wallets._positions) == 1
-    assert 'ETH/USDT:USDT' not in freqtrade.wallets._positions
+    assert "ETH/USDT:USDT" not in freqtrade.wallets._positions
 
 
 def test_sync_wallet_dry(mocker, default_conf_usdt, fee):
-    default_conf_usdt['dry_run'] = True
+    default_conf_usdt["dry_run"] = True
     freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
     assert len(freqtrade.wallets._wallets) == 1
     assert len(freqtrade.wallets._positions) == 0
-    assert freqtrade.wallets.get_total('USDT') == 1000
+    assert freqtrade.wallets.get_total("USDT") == 1000
 
     create_mock_trades_usdt(fee, is_short=None)
 
@@ -353,23 +358,23 @@ def test_sync_wallet_dry(mocker, default_conf_usdt, fee):
     assert len(freqtrade.wallets._wallets) == 5
     assert len(freqtrade.wallets._positions) == 0
     bal = freqtrade.wallets.get_all_balances()
-    assert bal['NEO'].total == 10
-    assert bal['XRP'].total == 10
-    assert bal['LTC'].total == 2
-    assert bal['USDT'].total == 922.74
+    assert bal["NEO"].total == 10
+    assert bal["XRP"].total == 10
+    assert bal["LTC"].total == 2
+    assert bal["USDT"].total == 922.74
 
-    assert freqtrade.wallets.get_starting_balance() == default_conf_usdt['dry_run_wallet']
-    total = freqtrade.wallets.get_total('LTC')
-    free = freqtrade.wallets.get_free('LTC')
-    used = freqtrade.wallets.get_used('LTC')
+    assert freqtrade.wallets.get_starting_balance() == default_conf_usdt["dry_run_wallet"]
+    total = freqtrade.wallets.get_total("LTC")
+    free = freqtrade.wallets.get_free("LTC")
+    used = freqtrade.wallets.get_used("LTC")
     assert free != 0
     assert free + used == total
 
 
 def test_sync_wallet_futures_dry(mocker, default_conf, fee):
-    default_conf['dry_run'] = True
-    default_conf['trading_mode'] = 'futures'
-    default_conf['margin_mode'] = 'isolated'
+    default_conf["dry_run"] = True
+    default_conf["trading_mode"] = "futures"
+    default_conf["margin_mode"] = "isolated"
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
     assert len(freqtrade.wallets._wallets) == 1
     assert len(freqtrade.wallets._positions) == 0
@@ -381,15 +386,15 @@ def test_sync_wallet_futures_dry(mocker, default_conf, fee):
     assert len(freqtrade.wallets._wallets) == 1
     assert len(freqtrade.wallets._positions) == 4
     positions = freqtrade.wallets.get_all_positions()
-    assert positions['ETH/BTC'].side == 'short'
-    assert positions['ETC/BTC'].side == 'long'
-    assert positions['XRP/BTC'].side == 'long'
-    assert positions['LTC/BTC'].side == 'short'
+    assert positions["ETH/BTC"].side == "short"
+    assert positions["ETC/BTC"].side == "long"
+    assert positions["XRP/BTC"].side == "long"
+    assert positions["LTC/BTC"].side == "short"
 
-    assert freqtrade.wallets.get_starting_balance() == default_conf['dry_run_wallet']
-    total = freqtrade.wallets.get_total('BTC')
-    free = freqtrade.wallets.get_free('BTC')
-    used = freqtrade.wallets.get_used('BTC')
+    assert freqtrade.wallets.get_starting_balance() == default_conf["dry_run_wallet"]
+    total = freqtrade.wallets.get_total("BTC")
+    free = freqtrade.wallets.get_free("BTC")
+    used = freqtrade.wallets.get_used("BTC")
     assert free + used == total
 
 
@@ -416,14 +421,14 @@ def test_check_exit_amount(mocker, default_conf, fee):
 
 
 def test_check_exit_amount_futures(mocker, default_conf, fee):
-    default_conf['trading_mode'] = 'futures'
-    default_conf['margin_mode'] = 'isolated'
+    default_conf["trading_mode"] = "futures"
+    default_conf["margin_mode"] = "isolated"
     freqtrade = get_patched_freqtradebot(mocker, default_conf)
     total_mock = mocker.patch("freqtrade.wallets.Wallets.get_total", return_value=123)
 
     create_mock_trades(fee, is_short=None)
     trade = Trade.session.scalars(select(Trade)).first()
-    trade.trading_mode = 'futures'
+    trade.trading_mode = "futures"
     assert trade.amount == 123
 
     assert freqtrade.wallets.check_exit_amount(trade) is True
diff --git a/tests/utils/test_binance_mig.py b/tests/utils/test_binance_mig.py
index b509b7320..f700ff73a 100644
--- a/tests/utils/test_binance_mig.py
+++ b/tests/utils/test_binance_mig.py
@@ -3,39 +3,38 @@ import shutil
 import pytest
 
 from freqtrade.persistence import Trade
-from freqtrade.util.migrations import (migrate_binance_futures_data, migrate_binance_futures_names,
-                                       migrate_data)
+from freqtrade.util.migrations import migrate_binance_futures_data, migrate_data
+from freqtrade.util.migrations.binance_mig import migrate_binance_futures_names
 from tests.conftest import create_mock_trades_usdt, log_has
 
 
 def test_binance_mig_data_conversion(default_conf_usdt, tmp_path, testdatadir):
-
     # call doing nothing (spot mode)
     migrate_binance_futures_data(default_conf_usdt)
-    default_conf_usdt['trading_mode'] = 'futures'
-    pair_old = 'XRP_USDT'
-    pair_unified = 'XRP_USDT_USDT'
-    futures_src = testdatadir / 'futures'
-    futures_dst = tmp_path / 'futures'
+    default_conf_usdt["trading_mode"] = "futures"
+    pair_old = "XRP_USDT"
+    pair_unified = "XRP_USDT_USDT"
+    futures_src = testdatadir / "futures"
+    futures_dst = tmp_path / "futures"
     futures_dst.mkdir()
     files = [
-        '-1h-mark.feather',
-        '-1h-futures.feather',
-        '-8h-funding_rate.feather',
-        '-8h-mark.feather',
+        "-1h-mark.feather",
+        "-1h-futures.feather",
+        "-8h-funding_rate.feather",
+        "-8h-mark.feather",
     ]
 
     # Copy files to tmpdir and rename to old naming
     for file in files:
-        fn_after = futures_dst / f'{pair_old}{file}'
-        shutil.copy(futures_src / f'{pair_unified}{file}', fn_after)
+        fn_after = futures_dst / f"{pair_old}{file}"
+        shutil.copy(futures_src / f"{pair_unified}{file}", fn_after)
 
-    default_conf_usdt['datadir'] = tmp_path
+    default_conf_usdt["datadir"] = tmp_path
     # Migrate files to unified namings
     migrate_binance_futures_data(default_conf_usdt)
 
     for file in files:
-        fn_after = futures_dst / f'{pair_unified}{file}'
+        fn_after = futures_dst / f"{pair_unified}{file}"
         assert fn_after.exists()
 
 
@@ -47,19 +46,19 @@ def test_binance_mig_db_conversion(default_conf_usdt, fee, caplog):
     create_mock_trades_usdt(fee, None)
 
     for t in Trade.get_trades():
-        t.trading_mode = 'FUTURES'
-        t.exchange = 'binance'
+        t.trading_mode = "FUTURES"
+        t.exchange = "binance"
     Trade.commit()
 
-    default_conf_usdt['trading_mode'] = 'futures'
+    default_conf_usdt["trading_mode"] = "futures"
     migrate_binance_futures_names(default_conf_usdt)
-    assert log_has('Migrating binance futures pairs in database.', caplog)
+    assert log_has("Migrating binance futures pairs in database.", caplog)
 
 
 def test_migration_wrapper(default_conf_usdt, mocker):
-    default_conf_usdt['trading_mode'] = 'futures'
-    binmock = mocker.patch('freqtrade.util.migrations.migrate_binance_futures_data')
-    funding_mock = mocker.patch('freqtrade.util.migrations.migrate_funding_fee_timeframe')
+    default_conf_usdt["trading_mode"] = "futures"
+    binmock = mocker.patch("freqtrade.util.migrations.migrate_binance_futures_data")
+    funding_mock = mocker.patch("freqtrade.util.migrations.migrate_funding_fee_timeframe")
     migrate_data(default_conf_usdt)
 
     assert binmock.call_count == 1
diff --git a/tests/utils/test_ccxt_precise.py b/tests/utils/test_ccxt_precise.py
index 5542ac8d2..536a66182 100644
--- a/tests/utils/test_ccxt_precise.py
+++ b/tests/utils/test_ccxt_precise.py
@@ -1,82 +1,82 @@
 from freqtrade.util import FtPrecise
 
 
-ws = FtPrecise('-1.123e-6')
-ws = FtPrecise('-1.123e-6')
-xs = FtPrecise('0.00000002')
-ys = FtPrecise('69696900000')
-zs = FtPrecise('0')
+ws = FtPrecise("-1.123e-6")
+ws = FtPrecise("-1.123e-6")
+xs = FtPrecise("0.00000002")
+ys = FtPrecise("69696900000")
+zs = FtPrecise("0")
 
 
 def test_FtPrecise():
-    assert ys * xs == '1393.938'
-    assert xs * ys == '1393.938'
+    assert ys * xs == "1393.938"
+    assert xs * ys == "1393.938"
 
-    assert ys + xs == '69696900000.00000002'
-    assert xs + ys == '69696900000.00000002'
-    assert xs - ys == '-69696899999.99999998'
-    assert ys - xs == '69696899999.99999998'
-    assert xs / ys == '0'
-    assert ys / xs == '3484845000000000000'
+    assert ys + xs == "69696900000.00000002"
+    assert xs + ys == "69696900000.00000002"
+    assert xs - ys == "-69696899999.99999998"
+    assert ys - xs == "69696899999.99999998"
+    assert xs / ys == "0"
+    assert ys / xs == "3484845000000000000"
 
-    assert ws * xs == '-0.00000000000002246'
-    assert xs * ws == '-0.00000000000002246'
+    assert ws * xs == "-0.00000000000002246"
+    assert xs * ws == "-0.00000000000002246"
 
-    assert ws + xs == '-0.000001103'
-    assert xs + ws == '-0.000001103'
+    assert ws + xs == "-0.000001103"
+    assert xs + ws == "-0.000001103"
 
-    assert xs - ws == '0.000001143'
-    assert ws - xs == '-0.000001143'
+    assert xs - ws == "0.000001143"
+    assert ws - xs == "-0.000001143"
 
-    assert xs / ws == '-0.017809439002671415'
-    assert ws / xs == '-56.15'
+    assert xs / ws == "-0.017809439002671415"
+    assert ws / xs == "-56.15"
 
-    assert zs * ws == '0'
-    assert zs * xs == '0'
-    assert zs * ys == '0'
-    assert ws * zs == '0'
-    assert xs * zs == '0'
-    assert ys * zs == '0'
+    assert zs * ws == "0"
+    assert zs * xs == "0"
+    assert zs * ys == "0"
+    assert ws * zs == "0"
+    assert xs * zs == "0"
+    assert ys * zs == "0"
 
-    assert zs + ws == '-0.000001123'
-    assert zs + xs == '0.00000002'
-    assert zs + ys == '69696900000'
-    assert ws + zs == '-0.000001123'
-    assert xs + zs == '0.00000002'
-    assert ys + zs == '69696900000'
+    assert zs + ws == "-0.000001123"
+    assert zs + xs == "0.00000002"
+    assert zs + ys == "69696900000"
+    assert ws + zs == "-0.000001123"
+    assert xs + zs == "0.00000002"
+    assert ys + zs == "69696900000"
 
-    assert abs(FtPrecise('-500.1')) == '500.1'
-    assert abs(FtPrecise('213')) == '213'
+    assert abs(FtPrecise("-500.1")) == "500.1"
+    assert abs(FtPrecise("213")) == "213"
 
-    assert abs(FtPrecise('-500.1')) == '500.1'
-    assert -FtPrecise('213') == '-213'
+    assert abs(FtPrecise("-500.1")) == "500.1"
+    assert -FtPrecise("213") == "-213"
 
-    assert FtPrecise('10.1') % FtPrecise('0.5') == '0.1'
-    assert FtPrecise('5550') % FtPrecise('120') == '30'
+    assert FtPrecise("10.1") % FtPrecise("0.5") == "0.1"
+    assert FtPrecise("5550") % FtPrecise("120") == "30"
 
-    assert FtPrecise('-0.0') == FtPrecise('0')
-    assert FtPrecise('5.534000') == FtPrecise('5.5340')
+    assert FtPrecise("-0.0") == FtPrecise("0")
+    assert FtPrecise("5.534000") == FtPrecise("5.5340")
 
-    assert min(FtPrecise('-3.1415'), FtPrecise('-2')) == '-3.1415'
+    assert min(FtPrecise("-3.1415"), FtPrecise("-2")) == "-3.1415"
 
-    assert max(FtPrecise('3.1415'), FtPrecise('-2')) == '3.1415'
+    assert max(FtPrecise("3.1415"), FtPrecise("-2")) == "3.1415"
 
-    assert FtPrecise('2') > FtPrecise('1.2345')
-    assert not FtPrecise('-3.1415') > FtPrecise('-2')
-    assert not FtPrecise('3.1415') > FtPrecise('3.1415')
-    assert FtPrecise.string_gt('3.14150000000000000000001', '3.1415')
+    assert FtPrecise("2") > FtPrecise("1.2345")
+    assert not FtPrecise("-3.1415") > FtPrecise("-2")
+    assert not FtPrecise("3.1415") > FtPrecise("3.1415")
+    assert FtPrecise.string_gt("3.14150000000000000000001", "3.1415")
 
-    assert FtPrecise('3.1415') >= FtPrecise('3.1415')
-    assert FtPrecise('3.14150000000000000000001') >= FtPrecise('3.1415')
+    assert FtPrecise("3.1415") >= FtPrecise("3.1415")
+    assert FtPrecise("3.14150000000000000000001") >= FtPrecise("3.1415")
 
-    assert not FtPrecise('3.1415') < FtPrecise('3.1415')
+    assert not FtPrecise("3.1415") < FtPrecise("3.1415")
 
-    assert FtPrecise('3.1415') <= FtPrecise('3.1415')
-    assert FtPrecise('3.1415') <= FtPrecise('3.14150000000000000000001')
+    assert FtPrecise("3.1415") <= FtPrecise("3.1415")
+    assert FtPrecise("3.1415") <= FtPrecise("3.14150000000000000000001")
 
-    assert FtPrecise(213) == '213'
-    assert FtPrecise(-213) == '-213'
-    assert str(FtPrecise(-213)) == '-213'
-    assert FtPrecise(213.2) == '213.2'
+    assert FtPrecise(213) == "213"
+    assert FtPrecise(-213) == "-213"
+    assert str(FtPrecise(-213)) == "-213"
+    assert FtPrecise(213.2) == "213.2"
     assert float(FtPrecise(213.2)) == 213.2
     assert float(FtPrecise(-213.2)) == -213.2
diff --git a/tests/utils/test_datetime_helpers.py b/tests/utils/test_datetime_helpers.py
index 20e6fc0f5..d17d2ec5a 100644
--- a/tests/utils/test_datetime_helpers.py
+++ b/tests/utils/test_datetime_helpers.py
@@ -3,8 +3,18 @@ from datetime import datetime, timedelta, timezone
 import pytest
 import time_machine
 
-from freqtrade.util import (dt_floor_day, dt_from_ts, dt_now, dt_ts, dt_ts_def, dt_ts_none, dt_utc,
-                            format_date, format_ms_time, shorten_date)
+from freqtrade.util import (
+    dt_floor_day,
+    dt_from_ts,
+    dt_now,
+    dt_ts,
+    dt_ts_def,
+    dt_ts_none,
+    dt_utc,
+    format_date,
+    format_ms_time,
+    shorten_date,
+)
 from freqtrade.util.datetime_helpers import dt_humanize_delta
 
 
@@ -39,16 +49,18 @@ def test_dt_ts_none():
 
 def test_dt_utc():
     assert dt_utc(2023, 5, 5) == datetime(2023, 5, 5, tzinfo=timezone.utc)
-    assert dt_utc(2023, 5, 5, 0, 0, 0, 555500) == datetime(2023, 5, 5, 0, 0, 0, 555500,
-                                                           tzinfo=timezone.utc)
+    assert dt_utc(2023, 5, 5, 0, 0, 0, 555500) == datetime(
+        2023, 5, 5, 0, 0, 0, 555500, tzinfo=timezone.utc
+    )
 
 
-@pytest.mark.parametrize('as_ms', [True, False])
+@pytest.mark.parametrize("as_ms", [True, False])
 def test_dt_from_ts(as_ms):
     multi = 1000 if as_ms else 1
     assert dt_from_ts(1683244800.0 * multi) == datetime(2023, 5, 5, tzinfo=timezone.utc)
-    assert dt_from_ts(1683244800.5555 * multi) == datetime(2023, 5, 5, 0, 0, 0, 555500,
-                                                           tzinfo=timezone.utc)
+    assert dt_from_ts(1683244800.5555 * multi) == datetime(
+        2023, 5, 5, 0, 0, 0, 555500, tzinfo=timezone.utc
+    )
     # As int
     assert dt_from_ts(1683244800 * multi) == datetime(2023, 5, 5, tzinfo=timezone.utc)
     # As milliseconds
@@ -63,18 +75,18 @@ def test_dt_floor_day():
 
 
 def test_shorten_date() -> None:
-    str_data = '1 day, 2 hours, 3 minutes, 4 seconds ago'
-    str_shorten_data = '1 d, 2 h, 3 min, 4 sec ago'
+    str_data = "1 day, 2 hours, 3 minutes, 4 seconds ago"
+    str_shorten_data = "1 d, 2 h, 3 min, 4 sec ago"
     assert shorten_date(str_data) == str_shorten_data
 
 
 def test_dt_humanize() -> None:
-    assert dt_humanize_delta(dt_now()) == 'now'
-    assert dt_humanize_delta(dt_now() - timedelta(minutes=50)) == '50 minutes ago'
-    assert dt_humanize_delta(dt_now() - timedelta(hours=16)) == '16 hours ago'
-    assert dt_humanize_delta(dt_now() - timedelta(hours=16, minutes=30)) == '16 hours ago'
-    assert dt_humanize_delta(dt_now() - timedelta(days=16, hours=10, minutes=25)) == '16 days ago'
-    assert dt_humanize_delta(dt_now() - timedelta(minutes=50)) == '50 minutes ago'
+    assert dt_humanize_delta(dt_now()) == "now"
+    assert dt_humanize_delta(dt_now() - timedelta(minutes=50)) == "50 minutes ago"
+    assert dt_humanize_delta(dt_now() - timedelta(hours=16)) == "16 hours ago"
+    assert dt_humanize_delta(dt_now() - timedelta(hours=16, minutes=30)) == "16 hours ago"
+    assert dt_humanize_delta(dt_now() - timedelta(days=16, hours=10, minutes=25)) == "16 days ago"
+    assert dt_humanize_delta(dt_now() - timedelta(minutes=50)) == "50 minutes ago"
 
 
 def test_format_ms_time() -> None:
@@ -83,20 +95,19 @@ def test_format_ms_time() -> None:
     date = format_ms_time(date_in_epoch_ms)
     assert isinstance(date, str)
     res = datetime(2018, 4, 10, 18, 2, 1, tzinfo=timezone.utc)
-    assert date == res.strftime('%Y-%m-%dT%H:%M:%S')
-    assert date == '2018-04-10T18:02:01'
+    assert date == res.strftime("%Y-%m-%dT%H:%M:%S")
+    assert date == "2018-04-10T18:02:01"
     res = datetime(2017, 12, 13, 8, 2, 1, tzinfo=timezone.utc)
     # Date 2017-12-13 08:02:01
     date_in_epoch_ms = 1513152121000
-    assert format_ms_time(date_in_epoch_ms) == res.strftime('%Y-%m-%dT%H:%M:%S')
+    assert format_ms_time(date_in_epoch_ms) == res.strftime("%Y-%m-%dT%H:%M:%S")
 
 
 def test_format_date() -> None:
-
     date = datetime(2023, 9, 1, 5, 2, 3, 455555, tzinfo=timezone.utc)
-    assert format_date(date) == '2023-09-01 05:02:03'
-    assert format_date(None) == ''
+    assert format_date(date) == "2023-09-01 05:02:03"
+    assert format_date(None) == ""
 
     date = datetime(2021, 9, 30, 22, 59, 3, 455555, tzinfo=timezone.utc)
-    assert format_date(date) == '2021-09-30 22:59:03'
-    assert format_date(None) == ''
+    assert format_date(date) == "2021-09-30 22:59:03"
+    assert format_date(None) == ""
diff --git a/tests/utils/test_formatters.py b/tests/utils/test_formatters.py
index 2a989ce81..ab86e224c 100644
--- a/tests/utils/test_formatters.py
+++ b/tests/utils/test_formatters.py
@@ -2,36 +2,35 @@ from freqtrade.util import decimals_per_coin, fmt_coin, round_value
 
 
 def test_decimals_per_coin():
-    assert decimals_per_coin('USDT') == 3
-    assert decimals_per_coin('EUR') == 3
-    assert decimals_per_coin('BTC') == 8
-    assert decimals_per_coin('ETH') == 5
+    assert decimals_per_coin("USDT") == 3
+    assert decimals_per_coin("EUR") == 3
+    assert decimals_per_coin("BTC") == 8
+    assert decimals_per_coin("ETH") == 5
 
 
 def test_fmt_coin():
-    assert fmt_coin(222.222222, 'USDT') == '222.222 USDT'
-    assert fmt_coin(222.2, 'USDT', keep_trailing_zeros=True) == '222.200 USDT'
-    assert fmt_coin(222.2, 'USDT') == '222.2 USDT'
-    assert fmt_coin(222.12745, 'EUR') == '222.127 EUR'
-    assert fmt_coin(0.1274512123, 'BTC') == '0.12745121 BTC'
-    assert fmt_coin(0.1274512123, 'ETH') == '0.12745 ETH'
+    assert fmt_coin(222.222222, "USDT") == "222.222 USDT"
+    assert fmt_coin(222.2, "USDT", keep_trailing_zeros=True) == "222.200 USDT"
+    assert fmt_coin(222.2, "USDT") == "222.2 USDT"
+    assert fmt_coin(222.12745, "EUR") == "222.127 EUR"
+    assert fmt_coin(0.1274512123, "BTC") == "0.12745121 BTC"
+    assert fmt_coin(0.1274512123, "ETH") == "0.12745 ETH"
 
-    assert fmt_coin(222.222222, 'USDT', False) == '222.222'
-    assert fmt_coin(222.2, 'USDT', False) == '222.2'
-    assert fmt_coin(222.00, 'USDT', False) == '222'
-    assert fmt_coin(222.12745, 'EUR', False) == '222.127'
-    assert fmt_coin(0.1274512123, 'BTC', False) == '0.12745121'
-    assert fmt_coin(0.1274512123, 'ETH', False) == '0.12745'
-    assert fmt_coin(222.2, 'USDT', False, True) == '222.200'
+    assert fmt_coin(222.222222, "USDT", False) == "222.222"
+    assert fmt_coin(222.2, "USDT", False) == "222.2"
+    assert fmt_coin(222.00, "USDT", False) == "222"
+    assert fmt_coin(222.12745, "EUR", False) == "222.127"
+    assert fmt_coin(0.1274512123, "BTC", False) == "0.12745121"
+    assert fmt_coin(0.1274512123, "ETH", False) == "0.12745"
+    assert fmt_coin(222.2, "USDT", False, True) == "222.200"
 
 
 def test_round_value():
-
-    assert round_value(222.222222, 3) == '222.222'
-    assert round_value(222.2, 3) == '222.2'
-    assert round_value(222.00, 3) == '222'
-    assert round_value(222.12745, 3) == '222.127'
-    assert round_value(0.1274512123, 8) == '0.12745121'
-    assert round_value(0.1274512123, 5) == '0.12745'
-    assert round_value(222.2, 3, True) == '222.200'
-    assert round_value(222.2, 0, True) == '222'
+    assert round_value(222.222222, 3) == "222.222"
+    assert round_value(222.2, 3) == "222.2"
+    assert round_value(222.00, 3) == "222"
+    assert round_value(222.12745, 3) == "222.127"
+    assert round_value(0.1274512123, 8) == "0.12745121"
+    assert round_value(0.1274512123, 5) == "0.12745"
+    assert round_value(222.2, 3, True) == "222.200"
+    assert round_value(222.2, 0, True) == "222"
diff --git a/tests/utils/test_funding_rate_migration.py b/tests/utils/test_funding_rate_migration.py
index ccb8435cf..094ee1562 100644
--- a/tests/utils/test_funding_rate_migration.py
+++ b/tests/utils/test_funding_rate_migration.py
@@ -4,22 +4,21 @@ from freqtrade.util.migrations import migrate_funding_fee_timeframe
 
 
 def test_migrate_funding_rate_timeframe(default_conf_usdt, tmp_path, testdatadir):
-
-    copytree(testdatadir / 'futures', tmp_path / 'futures')
-    file_4h = tmp_path / 'futures' / 'XRP_USDT_USDT-4h-funding_rate.feather'
-    file_8h = tmp_path / 'futures' / 'XRP_USDT_USDT-8h-funding_rate.feather'
-    file_1h = tmp_path / 'futures' / 'XRP_USDT_USDT-1h-futures.feather'
+    copytree(testdatadir / "futures", tmp_path / "futures")
+    file_4h = tmp_path / "futures" / "XRP_USDT_USDT-4h-funding_rate.feather"
+    file_8h = tmp_path / "futures" / "XRP_USDT_USDT-8h-funding_rate.feather"
+    file_1h = tmp_path / "futures" / "XRP_USDT_USDT-1h-futures.feather"
     file_8h.rename(file_4h)
     assert file_1h.exists()
     assert file_4h.exists()
     assert not file_8h.exists()
 
-    default_conf_usdt['datadir'] = tmp_path
+    default_conf_usdt["datadir"] = tmp_path
 
     # Inactive on spot trading ...
     migrate_funding_fee_timeframe(default_conf_usdt, None)
 
-    default_conf_usdt['trading_mode'] = 'futures'
+    default_conf_usdt["trading_mode"] = "futures"
 
     migrate_funding_fee_timeframe(default_conf_usdt, None)
 
diff --git a/tests/utils/test_measure_time.py b/tests/utils/test_measure_time.py
index dac509907..2d44a3254 100644
--- a/tests/utils/test_measure_time.py
+++ b/tests/utils/test_measure_time.py
@@ -6,10 +6,8 @@ from freqtrade.util import MeasureTime
 
 
 def test_measure_time():
-
     callback = MagicMock()
     with time_machine.travel("2021-09-01 05:00:00 +00:00", tick=False) as t:
-
         measure = MeasureTime(callback, 5, ttl=60)
         with measure:
             pass
diff --git a/tests/utils/test_periodiccache.py b/tests/utils/test_periodiccache.py
index a8931d6a2..684e7755d 100644
--- a/tests/utils/test_periodiccache.py
+++ b/tests/utils/test_periodiccache.py
@@ -4,31 +4,29 @@ from freqtrade.util import PeriodicCache
 
 
 def test_ttl_cache():
-
     with time_machine.travel("2021-09-01 05:00:00 +00:00", tick=False) as t:
-
         cache = PeriodicCache(5, ttl=60)
         cache1h = PeriodicCache(5, ttl=3600)
 
         assert cache.timer() == 1630472400.0
-        cache['a'] = 1235
-        cache1h['a'] = 555123
-        assert 'a' in cache
-        assert 'a' in cache1h
+        cache["a"] = 1235
+        cache1h["a"] = 555123
+        assert "a" in cache
+        assert "a" in cache1h
 
         t.move_to("2021-09-01 05:00:59 +00:00")
-        assert 'a' in cache
-        assert 'a' in cache1h
+        assert "a" in cache
+        assert "a" in cache1h
 
         # Cache expired
         t.move_to("2021-09-01 05:01:00 +00:00")
-        assert 'a' not in cache
-        assert 'a' in cache1h
+        assert "a" not in cache
+        assert "a" in cache1h
 
         t.move_to("2021-09-01 05:59:59 +00:00")
-        assert 'a' not in cache
-        assert 'a' in cache1h
+        assert "a" not in cache
+        assert "a" in cache1h
 
         t.move_to("2021-09-01 06:00:00 +00:00")
-        assert 'a' not in cache
-        assert 'a' not in cache1h
+        assert "a" not in cache
+        assert "a" not in cache1h
diff --git a/tests/utils/test_rendering_utils.py b/tests/utils/test_rendering_utils.py
index e03307ff1..f42c77dea 100644
--- a/tests/utils/test_rendering_utils.py
+++ b/tests/utils/test_rendering_utils.py
@@ -5,15 +5,16 @@ from freqtrade.util import render_template, render_template_with_fallback
 
 def test_render_template_fallback():
     from jinja2.exceptions import TemplateNotFound
+
     with pytest.raises(TemplateNotFound):
         val = render_template(
-            templatefile='subtemplates/indicators_does-not-exist.j2',
+            templatefile="subtemplates/indicators_does-not-exist.j2",
             arguments={},
         )
 
     val = render_template_with_fallback(
-        templatefile='strategy_subtemplates/indicators_does-not-exist.j2',
-        templatefallbackfile='strategy_subtemplates/indicators_minimal.j2',
+        templatefile="strategy_subtemplates/indicators_does-not-exist.j2",
+        templatefallbackfile="strategy_subtemplates/indicators_minimal.j2",
     )
     assert isinstance(val, str)
-    assert 'if self.dp' in val
+    assert "if self.dp" in val