Merge branch 'freqtrade:develop' into develop

This commit is contained in:
Simon Waiblinger 2024-01-25 22:28:05 +01:00 committed by GitHub
commit 060198c04c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
83 changed files with 847 additions and 689 deletions

View File

@ -25,7 +25,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ ubuntu-20.04, ubuntu-22.04 ] os: [ ubuntu-20.04, ubuntu-22.04 ]
python-version: ["3.9", "3.10", "3.11"] python-version: ["3.9", "3.10", "3.11", "3.12"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -36,14 +36,14 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Cache_dependencies - name: Cache_dependencies
uses: actions/cache@v3 uses: actions/cache@v4
id: cache id: cache
with: with:
path: ~/dependencies/ path: ~/dependencies/
key: ${{ runner.os }}-dependencies key: ${{ runner.os }}-dependencies
- name: pip cache (linux) - name: pip cache (linux)
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip
@ -125,7 +125,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ "macos-latest", "macos-13" ] os: [ "macos-latest", "macos-13" ]
python-version: ["3.9", "3.10", "3.11"] python-version: ["3.9", "3.10", "3.11", "3.12"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -137,14 +137,14 @@ jobs:
check-latest: true check-latest: true
- name: Cache_dependencies - name: Cache_dependencies
uses: actions/cache@v3 uses: actions/cache@v4
id: cache id: cache
with: with:
path: ~/dependencies/ path: ~/dependencies/
key: ${{ matrix.os }}-dependencies key: ${{ matrix.os }}-dependencies
- name: pip cache (macOS) - name: pip cache (macOS)
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/Library/Caches/pip path: ~/Library/Caches/pip
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
@ -238,7 +238,7 @@ jobs:
strategy: strategy:
matrix: matrix:
os: [ windows-latest ] os: [ windows-latest ]
python-version: ["3.9", "3.10", "3.11"] python-version: ["3.9", "3.10", "3.11", "3.12"]
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@ -249,7 +249,7 @@ jobs:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- name: Pip cache (Windows) - name: Pip cache (Windows)
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~\AppData\Local\pip\Cache path: ~\AppData\Local\pip\Cache
key: ${{ matrix.os }}-${{ matrix.python-version }}-pip key: ${{ matrix.os }}-${{ matrix.python-version }}-pip
@ -368,14 +368,14 @@ jobs:
python-version: "3.11" python-version: "3.11"
- name: Cache_dependencies - name: Cache_dependencies
uses: actions/cache@v3 uses: actions/cache@v4
id: cache id: cache
with: with:
path: ~/dependencies/ path: ~/dependencies/
key: ${{ runner.os }}-dependencies key: ${{ runner.os }}-dependencies
- name: pip cache (linux) - name: pip cache (linux)
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip key: test-${{ matrix.os }}-${{ matrix.python-version }}-pip

View File

@ -2,7 +2,7 @@
# See https://pre-commit.com/hooks.html for more hooks # See https://pre-commit.com/hooks.html for more hooks
repos: repos:
- repo: https://github.com/pycqa/flake8 - repo: https://github.com/pycqa/flake8
rev: "6.1.0" rev: "7.0.0"
hooks: hooks:
- id: flake8 - id: flake8
additional_dependencies: [Flake8-pyproject] additional_dependencies: [Flake8-pyproject]
@ -19,7 +19,7 @@ repos:
- types-requests==2.31.0.20240106 - types-requests==2.31.0.20240106
- types-tabulate==0.9.0.20240106 - types-tabulate==0.9.0.20240106
- types-python-dateutil==2.8.19.20240106 - types-python-dateutil==2.8.19.20240106
- SQLAlchemy==2.0.23 - SQLAlchemy==2.0.25
# stages: [push] # stages: [push]
- repo: https://github.com/pycqa/isort - repo: https://github.com/pycqa/isort
@ -31,7 +31,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit - repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: 'v0.1.9' rev: 'v0.1.14'
hooks: hooks:
- id: ruff - id: ruff

View File

@ -1,4 +1,4 @@
FROM python:3.11.6-slim-bookworm as base FROM python:3.11.7-slim-bookworm as base
# Setup env # Setup env
ENV LANG C.UTF-8 ENV LANG C.UTF-8

View File

@ -30,7 +30,7 @@ Please read the [exchange specific notes](docs/exchanges.md) to learn about even
- [X] [Binance](https://www.binance.com/) - [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/) - [X] [Bitmart](https://bitmart.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Huobi](http://huobi.com/) - [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Kraken](https://kraken.com/) - [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX) - [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_ - [ ] [potentially many others](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_

View File

@ -1,4 +1,4 @@
FROM python:3.11.6-slim-bookworm as base FROM python:3.11.7-slim-bookworm as base
# Setup env # Setup env
ENV LANG C.UTF-8 ENV LANG C.UTF-8

View File

@ -1,8 +1,8 @@
FROM freqtradeorg/freqtrade:develop_plot FROM freqtradeorg/freqtrade:develop_plot
# Pin jupyter-client to avoid tornado version conflict # Pin prompt-toolkit to avoid questionary version conflict
RUN pip install jupyterlab jupyter-client==7.3.4 --user --no-cache-dir RUN pip install jupyterlab "prompt-toolkit<=3.0.36" jupyter-client --user --no-cache-dir
# Empty the ENTRYPOINT to allow all commands # Empty the ENTRYPOINT to allow all commands
ENTRYPOINT [] ENTRYPOINT []

View File

@ -6,7 +6,7 @@ services:
context: .. context: ..
dockerfile: docker/Dockerfile.jupyter dockerfile: docker/Dockerfile.jupyter
restart: unless-stopped restart: unless-stopped
container_name: freqtrade # container_name: freqtrade
ports: ports:
- "127.0.0.1:8888:8888" - "127.0.0.1:8888:8888"
volumes: volumes:

View File

@ -208,10 +208,10 @@ Kucoin supports [time_in_force](configuration.md#understand-order_time_in_force)
For Kucoin, it is suggested to add `"KCS/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `KCS` on the account or unless you're willing to disable using `KCS` for fees. For Kucoin, it is suggested to add `"KCS/<STAKE>"` to your blacklist to avoid issues, unless you are willing to maintain enough extra `KCS` on the account or unless you're willing to disable using `KCS` for fees.
Kucoin accounts may use `KCS` for fees, and if a trade happens to be on `KCS`, further trades may consume this position and make the initial `KCS` trade unsellable as the expected amount is not there anymore. Kucoin accounts may use `KCS` for fees, and if a trade happens to be on `KCS`, further trades may consume this position and make the initial `KCS` trade unsellable as the expected amount is not there anymore.
## Huobi ## HTX (formerly Huobi)
!!! Tip "Stoploss on Exchange" !!! Tip "Stoploss on Exchange"
Huobi supports `stoploss_on_exchange` and uses `stop-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange. HTX supports `stoploss_on_exchange` and uses `stop-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
## OKX (former OKEX) ## OKX (former OKEX)

View File

@ -162,7 +162,8 @@ Below are the values you can expect to include/use inside a typical strategy dat
| `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float. | `df['&*_std/mean']` | Standard deviation and mean values of the defined labels during training (or live tracking with `fit_live_predictions_candles`). Commonly used to understand the rarity of a prediction (use the z-score as shown in `templates/FreqaiExampleStrategy.py` and explained [here](#creating-a-dynamic-target-threshold) to evaluate how often a particular prediction was observed during training or historically with `fit_live_predictions_candles`). <br> **Datatype:** Float.
| `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2. | `df['do_predict']` | Indication of an outlier data point. The return value is integer between -2 and 2, which lets you know if the prediction is trustworthy or not. `do_predict==1` means that the prediction is trustworthy. If the Dissimilarity Index (DI, see details [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di)) of the input data point is above the threshold defined in the config, FreqAI will subtract 1 from `do_predict`, resulting in `do_predict==0`. If `use_SVM_to_remove_outliers` is active, the Support Vector Machine (SVM, see details [here](freqai-feature-engineering.md#identifying-outliers-using-a-support-vector-machine-svm)) may also detect outliers in training and prediction data. In this case, the SVM will also subtract 1 from `do_predict`. If the input data point was considered an outlier by the SVM but not by the DI, or vice versa, the result will be `do_predict==0`. If both the DI and the SVM considers the input data point to be an outlier, the result will be `do_predict==-1`. As with the SVM, if `use_DBSCAN_to_remove_outliers` is active, DBSCAN (see details [here](freqai-feature-engineering.md#identifying-outliers-with-dbscan)) may also detect outliers and subtract 1 from `do_predict`. Hence, if both the SVM and DBSCAN are active and identify a datapoint that was above the DI threshold as an outlier, the result will be `do_predict==-2`. A particular case is when `do_predict == 2`, which means that the model has expired due to exceeding `expired_hours`. <br> **Datatype:** Integer between -2 and 2.
| `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float. | `df['DI_values']` | Dissimilarity Index (DI) values are proxies for the level of confidence FreqAI has in the prediction. A lower DI means the prediction is close to the training data, i.e., higher prediction confidence. See details about the DI [here](freqai-feature-engineering.md#identifying-outliers-with-the-dissimilarity-index-di). <br> **Datatype:** Float.
| `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%`. <br> **Datatype:** Depends on the output of the model. | `df['%*']` | Any dataframe column prepended with `%` in `feature_engineering_*()` is treated as a training feature. For example, you can include the RSI in the training feature set (similar to in `templates/FreqaiExampleStrategy.py`) by setting `df['%-rsi']`. See more details on how this is done [here](freqai-feature-engineering.md). <br> **Note:** Since the number of features prepended with `%` can multiply very quickly (10s of thousands of features are easily engineered using the multiplictative functionality of, e.g., `include_shifted_candles` and `include_timeframes` as described in the [parameter table](freqai-parameter-table.md)), these features are removed from the dataframe that is returned from FreqAI to the strategy. To keep a particular type of feature for plotting purposes, you would prepend it with `%%` (see details below). <br> **Datatype:** Depends on the feature created by the user.
| `df['%%*']` | Any dataframe column prepended with `%%` in `feature_engineering_*()` is treated as a training feature, just the same as the above `%` prepend. However, in this case, the features are returned back to the strategy for FreqUI/plot-dataframe plotting and monitoring in Dry/Live/Backtesting <br> **Datatype:** Depends on the feature created by the user. Please note that features created in `feature_engineering_expand()` will have automatic FreqAI naming schemas depending on the expansions that you configured (i.e. `include_timeframes`, `include_corr_pairlist`, `indicators_periods_candles`, `include_shifted_candles`). So if you want to plot `%%-rsi` from `feature_engineering_expand_all()`, the final naming scheme for your plotting config would be: `%%-rsi-period_10_ETH/USDT:USDT_1h` for the `rsi` feature with `period=10`, `timeframe=1h`, and `pair=ETH/USDT:USDT` (the `:USDT` is added if you are using futures pairs). It is useful to simply add `print(dataframe.columns)` in your `populate_indicators()` after `self.freqai.start()` to see the full list of available features that are returned to the strategy for plotting purposes.
## Setting the `startup_candle_count` ## Setting the `startup_candle_count`

View File

@ -439,7 +439,7 @@ While this strategy is most likely too simple to provide consistent profit, it s
??? Hint "Performance tip" ??? Hint "Performance tip"
During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, there are two alternatives to reduce RAM usage During normal hyperopting, indicators are calculated once and supplied to each epoch, linearly increasing RAM usage as a factor of increasing cores. As this also has performance implications, there are two alternatives to reduce RAM usage
* Move `ema_short` and `ema_long` calculations from `populate_indicators()` to `populate_entry_trend()`. Since `populate_entry_trend()` gonna be calculated every epochs, you don't need to use `.range` functionality. * Move `ema_short` and `ema_long` calculations from `populate_indicators()` to `populate_entry_trend()`. Since `populate_entry_trend()` will be calculated every epoch, you don't need to use `.range` functionality.
* hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value. * hyperopt provides `--analyze-per-epoch` which will move the execution of `populate_indicators()` to the epoch process, calculating a single value per parameter per epoch instead of using the `.range` functionality. In this case, `.range` functionality will only return the actually used value.
These alternatives will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues. These alternatives will reduce RAM usage, but increase CPU usage. However, your hyperopting run will be less likely to fail due to Out Of Memory (OOM) issues.
@ -926,6 +926,12 @@ Once the optimized strategy has been implemented into your strategy, you should
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt `--dmmp`/`--disable-max-market-positions` and `--eps`/`--enable-position-stacking` for Backtesting. To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt `--dmmp`/`--disable-max-market-positions` and `--eps`/`--enable-position-stacking` for Backtesting.
Should results not match, please double-check to make sure you transferred all conditions correctly. ### Why do my backtest results not match my hyperopt results?
Pay special care to the stoploss, max_open_trades and trailing stoploss parameters, as these are often set in configuration files, which override changes to the strategy. Should results not match, check the following factors:
You should also carefully review the log of your backtest to ensure that there were no parameters inadvertently set by the configuration (like `stoploss`, `max_open_trades` or `trailing_stop`).
* You may have added parameters to hyperopt in `populate_indicators()` where they will be calculated only once **for all epochs**. If you are, for example, trying to optimise multiple SMA timeperiod values, the hyperoptable timeperiod parameter should be placed in `populate_entry_trend()` which is calculated every epoch. See [Optimizing an indicator parameter](https://www.freqtrade.io/en/stable/hyperopt/#optimizing-an-indicator-parameter).
* If you have disabled the auto-export of hyperopt parameters into the JSON parameters file, double-check to make sure you transferred all hyperopted values into your strategy correctly.
* Check the logs to verify what parameters are being set and what values are being used.
* Pay special care to the stoploss, max_open_trades and trailing stoploss parameters, as these are often set in configuration files, which override changes to the strategy. Check the logs of your backtest to ensure that there were no parameters inadvertently set by the configuration (like `stoploss`, `max_open_trades` or `trailing_stop`).
* Verify that you do not have an unexpected parameters JSON file overriding the parameters or the default hyperopt settings in your strategy.
* Verify that any protections that are enabled in backtesting are also enabled when hyperopting, and vice versa. When using `--space protection`, protections are auto-enabled for hyperopting.

View File

@ -5,7 +5,7 @@ This section will highlight a few projects from members of the community.
- [Example freqtrade strategies](https://github.com/freqtrade/freqtrade-strategies/) - [Example freqtrade strategies](https://github.com/freqtrade/freqtrade-strategies/)
- [FrequentHippo - Grafana dashboard with dry/live runs and backtests](http://frequenthippo.ddns.net:3000/) (by hippocritical). - [FrequentHippo - Grafana dashboard with dry/live runs and backtests](http://frequenthippo.ddns.net:3000/) (by hippocritical).
- [Online pairlist generator](https://remotepairlist.com/) (by Blood4rc). - [Online pairlist generator](https://remotepairlist.com/) (by Blood4rc).
- [Freqtrade Backtesting Project](https://bt.robot.co.network/) (by Blood4rc). - [Freqtrade Backtesting Project](https://strat.ninja/) (by Blood4rc).
- [Freqtrade analysis notebook](https://github.com/froggleston/freqtrade_analysis_notebook) (by Froggleston). - [Freqtrade analysis notebook](https://github.com/froggleston/freqtrade_analysis_notebook) (by Froggleston).
- [TUI for freqtrade](https://github.com/froggleston/freqtrade-frogtrade9000) (by Froggleston). - [TUI for freqtrade](https://github.com/froggleston/freqtrade-frogtrade9000) (by Froggleston).
- [Bot Academy](https://botacademy.ddns.net/) (by stash86) - Blog about crypto bot projects. - [Bot Academy](https://botacademy.ddns.net/) (by stash86) - Blog about crypto bot projects.

View File

@ -42,7 +42,7 @@ Please read the [exchange specific notes](exchanges.md) to learn about eventual,
- [X] [Binance](https://www.binance.com/) - [X] [Binance](https://www.binance.com/)
- [X] [Bitmart](https://bitmart.com/) - [X] [Bitmart](https://bitmart.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Huobi](http://huobi.com/) - [X] [HTX](https://www.htx.com/) (Former Huobi)
- [X] [Kraken](https://kraken.com/) - [X] [Kraken](https://kraken.com/)
- [X] [OKX](https://okx.com/) (Former OKEX) - [X] [OKX](https://okx.com/) (Former OKEX)
- [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_ - [ ] [potentially many others through <img alt="ccxt" width="30px" src="assets/ccxt-logo.svg" />](https://github.com/ccxt/ccxt/). _(We cannot guarantee they will work)_

View File

@ -1,6 +1,6 @@
markdown==3.5.1 markdown==3.5.2
mkdocs==1.5.3 mkdocs==1.5.3
mkdocs-material==9.5.3 mkdocs-material==9.5.4
mdx_truly_sane_lists==1.3 mdx_truly_sane_lists==1.3
pymdown-extensions==10.7 pymdown-extensions==10.7
jinja2==3.1.2 jinja2==3.1.3

View File

@ -30,7 +30,7 @@ The Order-type will be ignored if only one mode is available.
|----------|-------------| |----------|-------------|
| Binance | limit | | Binance | limit |
| Binance Futures | market, limit | | Binance Futures | market, limit |
| Huobi | limit | | HTX (former Huobi) | limit |
| kraken | market, limit | | kraken | market, limit |
| Gate | limit | | Gate | limit |
| Okx | limit | | Okx | limit |

View File

@ -156,9 +156,9 @@ def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame
Out of the box, freqtrade installs the following technical libraries: Out of the box, freqtrade installs the following technical libraries:
* [ta-lib](http://mrjbq7.github.io/ta-lib/) - [ta-lib](https://ta-lib.github.io/ta-lib-python/)
* [pandas-ta](https://twopirllc.github.io/pandas-ta/) - [pandas-ta](https://twopirllc.github.io/pandas-ta/)
* [technical](https://github.com/freqtrade/technical/) - [technical](https://github.com/freqtrade/technical/)
Additional technical libraries can be installed as necessary, or custom indicators may be written / invented by the strategy author. Additional technical libraries can be installed as necessary, or custom indicators may be written / invented by the strategy author.
@ -1009,8 +1009,8 @@ This is a common pain-point, which can cause huge differences between backtestin
The following lists some common patterns which should be avoided to prevent frustration: The following lists some common patterns which should be avoided to prevent frustration:
- don't use `shift(-1)`. This uses data from the future, which is not available. - don't use `shift(-1)` or other negative values. This uses data from the future in backtesting, which is not available in dry or live modes.
- don't use `.iloc[-1]` or any other absolute position in the dataframe, this will be different between dry-run and backtesting. - don't use `.iloc[-1]` or any other absolute position in the dataframe within `populate_` functions, as this will be different between dry-run and backtesting. Absolute `iloc` indexing is safe to use in callbacks however - see [Strategy Callbacks](strategy-callbacks.md).
- don't use `dataframe['volume'].mean()`. This uses the full DataFrame for backtesting, including data from the future. Use `dataframe['volume'].rolling(<window>).mean()` instead - don't use `dataframe['volume'].mean()`. This uses the full DataFrame for backtesting, including data from the future. Use `dataframe['volume'].rolling(<window>).mean()` instead
- don't use `.resample('1h')`. This uses the left border of the interval, so moves data from an hour to the start of the hour. Use `.resample('1h', label='right')` instead. - don't use `.resample('1h')`. This uses the left border of the interval, so moves data from an hour to the start of the hour. Use `.resample('1h', label='right')` instead.

View File

@ -22,7 +22,7 @@ git clone https://github.com/freqtrade/freqtrade.git
### 2. Install ta-lib ### 2. Install ta-lib
Install ta-lib according to the [ta-lib documentation](https://github.com/mrjbq7/ta-lib#windows). Install ta-lib according to the [ta-lib documentation](https://github.com/TA-Lib/ta-lib-python#windows).
As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10 and 3.11) and for 64bit Windows. As compiling from source on windows has heavy dependencies (requires a partial visual studio installation), Freqtrade provides these dependencies (in the binary wheel format) for the latest 3 Python versions (3.9, 3.10 and 3.11) and for 64bit Windows.
These Wheels are also used by CI running on windows, and are therefore tested together with freqtrade. These Wheels are also used by CI running on windows, and are therefore tested together with freqtrade.

View File

@ -219,27 +219,35 @@ class Arguments:
) )
# Add trade subcommand # Add trade subcommand
trade_cmd = subparsers.add_parser('trade', help='Trade module.', trade_cmd = subparsers.add_parser(
parents=[_common_parser, _strategy_parser]) 'trade',
help='Trade module.',
parents=[_common_parser, _strategy_parser]
)
trade_cmd.set_defaults(func=start_trading) trade_cmd.set_defaults(func=start_trading)
self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd) self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd)
# add create-userdir subcommand # add create-userdir subcommand
create_userdir_cmd = subparsers.add_parser('create-userdir', create_userdir_cmd = subparsers.add_parser(
'create-userdir',
help="Create user-data directory.", help="Create user-data directory.",
) )
create_userdir_cmd.set_defaults(func=start_create_userdir) create_userdir_cmd.set_defaults(func=start_create_userdir)
self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd) self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd)
# add new-config subcommand # add new-config subcommand
build_config_cmd = subparsers.add_parser('new-config', build_config_cmd = subparsers.add_parser(
help="Create new config") 'new-config',
help="Create new config",
)
build_config_cmd.set_defaults(func=start_new_config) build_config_cmd.set_defaults(func=start_new_config)
self._build_args(optionlist=ARGS_BUILD_CONFIG, parser=build_config_cmd) self._build_args(optionlist=ARGS_BUILD_CONFIG, parser=build_config_cmd)
# add new-strategy subcommand # add new-strategy subcommand
build_strategy_cmd = subparsers.add_parser('new-strategy', build_strategy_cmd = subparsers.add_parser(
help="Create new strategy") 'new-strategy',
help="Create new strategy",
)
build_strategy_cmd.set_defaults(func=start_new_strategy) build_strategy_cmd.set_defaults(func=start_new_strategy)
self._build_args(optionlist=ARGS_BUILD_STRATEGY, parser=build_strategy_cmd) self._build_args(optionlist=ARGS_BUILD_STRATEGY, parser=build_strategy_cmd)
@ -289,8 +297,11 @@ class Arguments:
self._build_args(optionlist=ARGS_LIST_DATA, parser=list_data_cmd) self._build_args(optionlist=ARGS_LIST_DATA, parser=list_data_cmd)
# Add backtesting subcommand # Add backtesting subcommand
backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.', backtesting_cmd = subparsers.add_parser(
parents=[_common_parser, _strategy_parser]) 'backtesting',
help='Backtesting module.',
parents=[_common_parser, _strategy_parser]
)
backtesting_cmd.set_defaults(func=start_backtesting) backtesting_cmd.set_defaults(func=start_backtesting)
self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd) self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)
@ -304,20 +315,27 @@ class Arguments:
self._build_args(optionlist=ARGS_BACKTEST_SHOW, parser=backtesting_show_cmd) self._build_args(optionlist=ARGS_BACKTEST_SHOW, parser=backtesting_show_cmd)
# Add backtesting analysis subcommand # Add backtesting analysis subcommand
analysis_cmd = subparsers.add_parser('backtesting-analysis', analysis_cmd = subparsers.add_parser(
'backtesting-analysis',
help='Backtest Analysis module.', help='Backtest Analysis module.',
parents=[_common_parser]) parents=[_common_parser]
)
analysis_cmd.set_defaults(func=start_analysis_entries_exits) analysis_cmd.set_defaults(func=start_analysis_entries_exits)
self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd) self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd)
# Add edge subcommand # Add edge subcommand
edge_cmd = subparsers.add_parser('edge', help='Edge module.', edge_cmd = subparsers.add_parser(
parents=[_common_parser, _strategy_parser]) 'edge',
help='Edge module.',
parents=[_common_parser, _strategy_parser]
)
edge_cmd.set_defaults(func=start_edge) edge_cmd.set_defaults(func=start_edge)
self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd) self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)
# Add hyperopt subcommand # Add hyperopt subcommand
hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.', hyperopt_cmd = subparsers.add_parser(
'hyperopt',
help='Hyperopt module.',
parents=[_common_parser, _strategy_parser], parents=[_common_parser, _strategy_parser],
) )
hyperopt_cmd.set_defaults(func=start_hyperopt) hyperopt_cmd.set_defaults(func=start_hyperopt)
@ -447,16 +465,20 @@ class Arguments:
self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd) self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)
# Add webserver subcommand # Add webserver subcommand
webserver_cmd = subparsers.add_parser('webserver', help='Webserver module.', webserver_cmd = subparsers.add_parser(
parents=[_common_parser]) 'webserver',
help='Webserver module.',
parents=[_common_parser]
)
webserver_cmd.set_defaults(func=start_webserver) webserver_cmd.set_defaults(func=start_webserver)
self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd) self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd)
# Add strategy_updater subcommand # Add strategy_updater subcommand
strategy_updater_cmd = subparsers.add_parser('strategy-updater', strategy_updater_cmd = subparsers.add_parser(
help='updates outdated strategy' 'strategy-updater',
'files to the current version', help='updates outdated strategy files to the current version',
parents=[_common_parser]) parents=[_common_parser]
)
strategy_updater_cmd.set_defaults(func=start_strategy_update) strategy_updater_cmd.set_defaults(func=start_strategy_update)
self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd) self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd)
@ -464,8 +486,8 @@ class Arguments:
lookahead_analayis_cmd = subparsers.add_parser( lookahead_analayis_cmd = subparsers.add_parser(
'lookahead-analysis', 'lookahead-analysis',
help="Check for potential look ahead bias.", help="Check for potential look ahead bias.",
parents=[_common_parser, _strategy_parser]) parents=[_common_parser, _strategy_parser]
)
lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis) lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS,
@ -475,8 +497,8 @@ class Arguments:
recursive_analayis_cmd = subparsers.add_parser( recursive_analayis_cmd = subparsers.add_parser(
'recursive-analysis', 'recursive-analysis',
help="Check for potential recursive formula issue.", help="Check for potential recursive formula issue.",
parents=[_common_parser, _strategy_parser]) parents=[_common_parser, _strategy_parser]
)
recursive_analayis_cmd.set_defaults(func=start_recursive_analysis) recursive_analayis_cmd.set_defaults(func=start_recursive_analysis)
self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS, self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS,

View File

@ -109,7 +109,7 @@ def ask_user_config() -> Dict[str, Any]:
"binance", "binance",
"binanceus", "binanceus",
"gate", "gate",
"huobi", "htx",
"kraken", "kraken",
"kucoin", "kucoin",
"okx", "okx",

View File

@ -134,10 +134,10 @@ def start_list_data(args: Dict[str, Any]) -> None:
print(tabulate([ print(tabulate([
(pair, timeframe, candle_type, (pair, timeframe, candle_type,
start.strftime(DATETIME_PRINT_FORMAT), start.strftime(DATETIME_PRINT_FORMAT),
end.strftime(DATETIME_PRINT_FORMAT)) end.strftime(DATETIME_PRINT_FORMAT), length)
for pair, timeframe, candle_type, start, end in sorted( for pair, timeframe, candle_type, start, end, length in sorted(
paircombs1, paircombs1,
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])) key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]))
], ],
headers=("Pair", "Timeframe", "Type", 'From', 'To'), headers=("Pair", "Timeframe", "Type", 'From', 'To', 'Candles'),
tablefmt='psql', stralign='right')) tablefmt='psql', stralign='right'))

View File

@ -15,6 +15,7 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
""" """
Test Pairlist configuration Test Pairlist configuration
""" """
from freqtrade.persistence import FtNoDBContext
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
@ -24,6 +25,7 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
if not quote_currencies: if not quote_currencies:
quote_currencies = [config.get('stake_currency')] quote_currencies = [config.get('stake_currency')]
results = {} results = {}
with FtNoDBContext():
for curr in quote_currencies: for curr in quote_currencies:
config['stake_currency'] = curr config['stake_currency'] = curr
pairlists = PairListManager(exchange, config) pairlists = PairListManager(exchange, config)

View File

@ -5,7 +5,7 @@ import logging
import warnings import warnings
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
from typing import Any, Callable, Dict, List, Optional from typing import Any, Callable, Dict, List, Optional, Tuple
from freqtrade import constants from freqtrade import constants
from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings from freqtrade.configuration.deprecated_settings import process_temporary_deprecated_settings
@ -68,6 +68,8 @@ class Configuration:
config: Config = load_from_files(self.args.get("config", [])) config: Config = load_from_files(self.args.get("config", []))
# Load environment variables # Load environment variables
from freqtrade.commands.arguments import NO_CONF_ALLOWED
if self.args.get('command') not in NO_CONF_ALLOWED:
env_data = enironment_vars_to_dict() env_data = enironment_vars_to_dict()
config = deep_merge_dicts(env_data, config) config = deep_merge_dicts(env_data, config)
@ -233,54 +235,37 @@ class Configuration:
except ValueError: except ValueError:
pass pass
self._args_to_config(config, argname='timeframe_detail', configurations = [
logstring='Parameter --timeframe-detail detected, ' ('timeframe_detail',
'using {} for intra-candle backtesting ...') 'Parameter --timeframe-detail detected, using {} for intra-candle backtesting ...'),
('backtest_show_pair_list', 'Parameter --show-pair-list detected.'),
('stake_amount',
'Parameter --stake-amount detected, overriding stake_amount to: {} ...'),
('dry_run_wallet',
'Parameter --dry-run-wallet detected, overriding dry_run_wallet to: {} ...'),
('fee', 'Parameter --fee detected, setting fee to: {} ...'),
('timerange', 'Parameter --timerange detected: {} ...'),
]
self._args_to_config(config, argname='backtest_show_pair_list', self._args_to_config_loop(config, configurations)
logstring='Parameter --show-pair-list detected.')
self._args_to_config(config, argname='stake_amount',
logstring='Parameter --stake-amount detected, '
'overriding stake_amount to: {} ...')
self._args_to_config(config, argname='dry_run_wallet',
logstring='Parameter --dry-run-wallet detected, '
'overriding dry_run_wallet to: {} ...')
self._args_to_config(config, argname='fee',
logstring='Parameter --fee detected, '
'setting fee to: {} ...')
self._args_to_config(config, argname='timerange',
logstring='Parameter --timerange detected: {} ...')
self._process_datadir_options(config) self._process_datadir_options(config)
self._args_to_config(config, argname='strategy_list', self._args_to_config(config, argname='strategy_list',
logstring='Using strategy list of {} strategies', logfun=len) logstring='Using strategy list of {} strategies', logfun=len)
self._args_to_config( configurations = [
config, ('recursive_strategy_search',
argname='recursive_strategy_search', 'Recursively searching for a strategy in the strategies folder.'),
logstring='Recursively searching for a strategy in the strategies folder.', ('timeframe', 'Overriding timeframe with Command line argument'),
) ('export', 'Parameter --export detected: {} ...'),
('backtest_breakdown', 'Parameter --breakdown detected ...'),
self._args_to_config(config, argname='timeframe', ('backtest_cache', 'Parameter --cache={} detected ...'),
logstring='Overriding timeframe with Command line argument') ('disableparamexport', 'Parameter --disableparamexport detected: {} ...'),
('freqai_backtest_live_models',
self._args_to_config(config, argname='export', 'Parameter --freqai-backtest-live-models detected ...'),
logstring='Parameter --export detected: {} ...') ]
self._args_to_config_loop(config, configurations)
self._args_to_config(config, argname='backtest_breakdown',
logstring='Parameter --breakdown detected ...')
self._args_to_config(config, argname='backtest_cache',
logstring='Parameter --cache={} detected ...')
self._args_to_config(config, argname='disableparamexport',
logstring='Parameter --disableparamexport detected: {} ...')
self._args_to_config(config, argname='freqai_backtest_live_models',
logstring='Parameter --freqai-backtest-live-models detected ...')
# Edge section: # Edge section:
if 'stoploss_range' in self.args and self.args["stoploss_range"]: if 'stoploss_range' in self.args and self.args["stoploss_range"]:
@ -291,31 +276,18 @@ class Configuration:
logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"]) logger.info('Parameter --stoplosses detected: %s ...', self.args["stoploss_range"])
# Hyperopt section # Hyperopt section
self._args_to_config(config, argname='hyperopt',
logstring='Using Hyperopt class name: {}')
self._args_to_config(config, argname='hyperopt_path', configurations = [
logstring='Using additional Hyperopt lookup path: {}') ('hyperopt', 'Using Hyperopt class name: {}'),
('hyperopt_path', 'Using additional Hyperopt lookup path: {}'),
self._args_to_config(config, argname='hyperoptexportfilename', ('hyperoptexportfilename', 'Using hyperopt file: {}'),
logstring='Using hyperopt file: {}') ('lookahead_analysis_exportfilename', 'Saving lookahead analysis results into {} ...'),
('epochs', 'Parameter --epochs detected ... Will run Hyperopt with for {} epochs ...'),
self._args_to_config(config, argname='lookahead_analysis_exportfilename', ('spaces', 'Parameter -s/--spaces detected: {}'),
logstring='Saving lookahead analysis results into {} ...') ('analyze_per_epoch', 'Parameter --analyze-per-epoch detected.'),
('print_all', 'Parameter --print-all detected ...'),
self._args_to_config(config, argname='epochs', ]
logstring='Parameter --epochs detected ... ' self._args_to_config_loop(config, configurations)
'Will run Hyperopt with for {} epochs ...'
)
self._args_to_config(config, argname='spaces',
logstring='Parameter -s/--spaces detected: {}')
self._args_to_config(config, argname='analyze_per_epoch',
logstring='Parameter --analyze-per-epoch detected.')
self._args_to_config(config, argname='print_all',
logstring='Parameter --print-all detected ...')
if 'print_colorized' in self.args and not self.args["print_colorized"]: if 'print_colorized' in self.args and not self.args["print_colorized"]:
logger.info('Parameter --no-color detected ...') logger.info('Parameter --no-color detected ...')
@ -323,123 +295,55 @@ class Configuration:
else: else:
config.update({'print_colorized': True}) config.update({'print_colorized': True})
self._args_to_config(config, argname='print_json', configurations = [
logstring='Parameter --print-json detected ...') ('print_json', 'Parameter --print-json detected ...'),
('export_csv', 'Parameter --export-csv detected: {}'),
('hyperopt_jobs', 'Parameter -j/--job-workers detected: {}'),
('hyperopt_random_state', 'Parameter --random-state detected: {}'),
('hyperopt_min_trades', 'Parameter --min-trades detected: {}'),
('hyperopt_loss', 'Using Hyperopt loss class name: {}'),
('hyperopt_show_index', 'Parameter -n/--index detected: {}'),
('hyperopt_list_best', 'Parameter --best detected: {}'),
('hyperopt_list_profitable', 'Parameter --profitable detected: {}'),
('hyperopt_list_min_trades', 'Parameter --min-trades detected: {}'),
('hyperopt_list_max_trades', 'Parameter --max-trades detected: {}'),
('hyperopt_list_min_avg_time', 'Parameter --min-avg-time detected: {}'),
('hyperopt_list_max_avg_time', 'Parameter --max-avg-time detected: {}'),
('hyperopt_list_min_avg_profit', 'Parameter --min-avg-profit detected: {}'),
('hyperopt_list_max_avg_profit', 'Parameter --max-avg-profit detected: {}'),
('hyperopt_list_min_total_profit', 'Parameter --min-total-profit detected: {}'),
('hyperopt_list_max_total_profit', 'Parameter --max-total-profit detected: {}'),
('hyperopt_list_min_objective', 'Parameter --min-objective detected: {}'),
('hyperopt_list_max_objective', 'Parameter --max-objective detected: {}'),
('hyperopt_list_no_details', 'Parameter --no-details detected: {}'),
('hyperopt_show_no_header', 'Parameter --no-header detected: {}'),
('hyperopt_ignore_missing_space', 'Paramter --ignore-missing-space detected: {}'),
]
self._args_to_config(config, argname='export_csv', self._args_to_config_loop(config, configurations)
logstring='Parameter --export-csv detected: {}')
self._args_to_config(config, argname='hyperopt_jobs',
logstring='Parameter -j/--job-workers detected: {}')
self._args_to_config(config, argname='hyperopt_random_state',
logstring='Parameter --random-state detected: {}')
self._args_to_config(config, argname='hyperopt_min_trades',
logstring='Parameter --min-trades detected: {}')
self._args_to_config(config, argname='hyperopt_loss',
logstring='Using Hyperopt loss class name: {}')
self._args_to_config(config, argname='hyperopt_show_index',
logstring='Parameter -n/--index detected: {}')
self._args_to_config(config, argname='hyperopt_list_best',
logstring='Parameter --best detected: {}')
self._args_to_config(config, argname='hyperopt_list_profitable',
logstring='Parameter --profitable detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_trades',
logstring='Parameter --min-trades detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_trades',
logstring='Parameter --max-trades detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_avg_time',
logstring='Parameter --min-avg-time detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_avg_time',
logstring='Parameter --max-avg-time detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_avg_profit',
logstring='Parameter --min-avg-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_avg_profit',
logstring='Parameter --max-avg-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_total_profit',
logstring='Parameter --min-total-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_total_profit',
logstring='Parameter --max-total-profit detected: {}')
self._args_to_config(config, argname='hyperopt_list_min_objective',
logstring='Parameter --min-objective detected: {}')
self._args_to_config(config, argname='hyperopt_list_max_objective',
logstring='Parameter --max-objective detected: {}')
self._args_to_config(config, argname='hyperopt_list_no_details',
logstring='Parameter --no-details detected: {}')
self._args_to_config(config, argname='hyperopt_show_no_header',
logstring='Parameter --no-header detected: {}')
self._args_to_config(config, argname="hyperopt_ignore_missing_space",
logstring="Paramter --ignore-missing-space detected: {}")
def _process_plot_options(self, config: Config) -> None: def _process_plot_options(self, config: Config) -> None:
self._args_to_config(config, argname='pairs', configurations = [
logstring='Using pairs {}') ('pairs', 'Using pairs {}'),
('indicators1', 'Using indicators1: {}'),
self._args_to_config(config, argname='indicators1', ('indicators2', 'Using indicators2: {}'),
logstring='Using indicators1: {}') ('trade_ids', 'Filtering on trade_ids: {}'),
('plot_limit', 'Limiting plot to: {}'),
self._args_to_config(config, argname='indicators2', ('plot_auto_open', 'Parameter --auto-open detected.'),
logstring='Using indicators2: {}') ('trade_source', 'Using trades from: {}'),
('prepend_data', 'Prepend detected. Allowing data prepending.'),
self._args_to_config(config, argname='trade_ids', ('erase', 'Erase detected. Deleting existing data.'),
logstring='Filtering on trade_ids: {}') ('no_trades', 'Parameter --no-trades detected.'),
('timeframes', 'timeframes --timeframes: {}'),
self._args_to_config(config, argname='plot_limit', ('days', 'Detected --days: {}'),
logstring='Limiting plot to: {}') ('include_inactive', 'Detected --include-inactive-pairs: {}'),
('download_trades', 'Detected --dl-trades: {}'),
self._args_to_config(config, argname='plot_auto_open', ('dataformat_ohlcv', 'Using "{}" to store OHLCV data.'),
logstring='Parameter --auto-open detected.') ('dataformat_trades', 'Using "{}" to store trades data.'),
('show_timerange', 'Detected --show-timerange'),
self._args_to_config(config, argname='trade_source', ]
logstring='Using trades from: {}') self._args_to_config_loop(config, configurations)
self._args_to_config(config, argname='prepend_data',
logstring='Prepend detected. Allowing data prepending.')
self._args_to_config(config, argname='erase',
logstring='Erase detected. Deleting existing data.')
self._args_to_config(config, argname='no_trades',
logstring='Parameter --no-trades detected.')
self._args_to_config(config, argname='timeframes',
logstring='timeframes --timeframes: {}')
self._args_to_config(config, argname='days',
logstring='Detected --days: {}')
self._args_to_config(config, argname='include_inactive',
logstring='Detected --include-inactive-pairs: {}')
self._args_to_config(config, argname='download_trades',
logstring='Detected --dl-trades: {}')
self._args_to_config(config, argname='dataformat_ohlcv',
logstring='Using "{}" to store OHLCV data.')
self._args_to_config(config, argname='dataformat_trades',
logstring='Using "{}" to store trades data.')
self._args_to_config(config, argname='show_timerange',
logstring='Detected --show-timerange')
def _process_data_options(self, config: Config) -> None: def _process_data_options(self, config: Config) -> None:
self._args_to_config(config, argname='new_pairs_days', self._args_to_config(config, argname='new_pairs_days',
@ -453,45 +357,27 @@ class Configuration:
logstring='Detected --candle-types: {}') logstring='Detected --candle-types: {}')
def _process_analyze_options(self, config: Config) -> None: def _process_analyze_options(self, config: Config) -> None:
self._args_to_config(config, argname='analysis_groups', configurations = [
logstring='Analysis reason groups: {}') ('analysis_groups', 'Analysis reason groups: {}'),
('enter_reason_list', 'Analysis enter tag list: {}'),
self._args_to_config(config, argname='enter_reason_list', ('exit_reason_list', 'Analysis exit tag list: {}'),
logstring='Analysis enter tag list: {}') ('indicator_list', 'Analysis indicator list: {}'),
('timerange', 'Filter trades by timerange: {}'),
self._args_to_config(config, argname='exit_reason_list', ('analysis_rejected', 'Analyse rejected signals: {}'),
logstring='Analysis exit tag list: {}') ('analysis_to_csv', 'Store analysis tables to CSV: {}'),
('analysis_csv_path', 'Path to store analysis CSVs: {}'),
self._args_to_config(config, argname='indicator_list',
logstring='Analysis indicator list: {}')
self._args_to_config(config, argname='timerange',
logstring='Filter trades by timerange: {}')
self._args_to_config(config, argname='analysis_rejected',
logstring='Analyse rejected signals: {}')
self._args_to_config(config, argname='analysis_to_csv',
logstring='Store analysis tables to CSV: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
# Lookahead analysis results # Lookahead analysis results
self._args_to_config(config, argname='targeted_trade_amount', ('targeted_trade_amount', 'Targeted Trade amount: {}'),
logstring='Targeted Trade amount: {}') ('minimum_trade_amount', 'Minimum Trade amount: {}'),
('lookahead_analysis_exportfilename', 'Path to store lookahead-analysis-results: {}'),
('startup_candle', 'Startup candle to be used on recursive analysis: {}'),
]
self._args_to_config_loop(config, configurations)
self._args_to_config(config, argname='minimum_trade_amount', def _args_to_config_loop(self, config, configurations: List[Tuple[str, str]]) -> None:
logstring='Minimum Trade amount: {}')
self._args_to_config(config, argname='lookahead_analysis_exportfilename', for argname, logstring in configurations:
logstring='Path to store lookahead-analysis-results: {}') self._args_to_config(config, argname=argname, logstring=logstring)
self._args_to_config(config, argname='startup_candle',
logstring='Startup candle to be used on recursive analysis: {}')
def _process_runmode(self, config: Config) -> None: def _process_runmode(self, config: Config) -> None:

View File

@ -9,7 +9,7 @@ from freqtrade.misc import deep_merge_dicts
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def get_var_typed(val): def _get_var_typed(val):
try: try:
return int(val) return int(val)
except ValueError: except ValueError:
@ -24,7 +24,7 @@ def get_var_typed(val):
return val return val
def flat_vars_to_nested_dict(env_dict: Dict[str, Any], prefix: str) -> Dict[str, Any]: def _flat_vars_to_nested_dict(env_dict: Dict[str, Any], prefix: str) -> Dict[str, Any]:
""" """
Environment variables must be prefixed with FREQTRADE. Environment variables must be prefixed with FREQTRADE.
FREQTRADE__{section}__{key} FREQTRADE__{section}__{key}
@ -40,7 +40,7 @@ def flat_vars_to_nested_dict(env_dict: Dict[str, Any], prefix: str) -> Dict[str,
logger.info(f"Loading variable '{env_var}'") logger.info(f"Loading variable '{env_var}'")
key = env_var.replace(prefix, '') key = env_var.replace(prefix, '')
for k in reversed(key.split('__')): for k in reversed(key.split('__')):
val = {k.lower(): get_var_typed(val) val = {k.lower(): _get_var_typed(val)
if not isinstance(val, dict) and k not in no_convert else val} if not isinstance(val, dict) and k not in no_convert else val}
relevant_vars = deep_merge_dicts(val, relevant_vars) relevant_vars = deep_merge_dicts(val, relevant_vars)
return relevant_vars return relevant_vars
@ -52,4 +52,4 @@ def enironment_vars_to_dict() -> Dict[str, Any]:
Relevant variables must follow the FREQTRADE__{section}__{key} pattern Relevant variables must follow the FREQTRADE__{section}__{key} pattern
:return: Nested dict based on available and relevant variables. :return: Nested dict based on available and relevant variables.
""" """
return flat_vars_to_nested_dict(os.environ.copy(), ENV_VAR_PREFIX) return _flat_vars_to_nested_dict(os.environ.copy(), ENV_VAR_PREFIX)

View File

@ -357,10 +357,10 @@ def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataF
:param timeframe: Timeframe used for backtest :param timeframe: Timeframe used for backtest
:return: dataframe with open-counts per time-period in timeframe :return: dataframe with open-counts per time-period in timeframe
""" """
from freqtrade.exchange import timeframe_to_minutes from freqtrade.exchange import timeframe_to_resample_freq
timeframe_min = timeframe_to_minutes(timeframe) timeframe_freq = timeframe_to_resample_freq(timeframe)
dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'], dates = [pd.Series(pd.date_range(row[1]['open_date'], row[1]['close_date'],
freq=f"{timeframe_min}min")) freq=timeframe_freq))
for row in results[['open_date', 'close_date']].iterrows()] for row in results[['open_date', 'close_date']].iterrows()]
deltas = [len(x) for x in dates] deltas = [len(x) for x in dates]
dates = pd.Series(pd.concat(dates).values, name='date') dates = pd.Series(pd.concat(dates).values, name='date')
@ -368,7 +368,7 @@ def analyze_trade_parallelism(results: pd.DataFrame, timeframe: str) -> pd.DataF
df2 = pd.concat([dates, df2], axis=1) df2 = pd.concat([dates, df2], axis=1)
df2 = df2.set_index('date') df2 = df2.set_index('date')
df_final = df2.resample(f"{timeframe_min}min")[['pair']].count() df_final = df2.resample(timeframe_freq)[['pair']].count()
df_final = df_final.rename({'pair': 'open_trades'}, axis=1) df_final = df_final.rename({'pair': 'open_trades'}, axis=1)
return df_final return df_final

View File

@ -84,7 +84,7 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
using the previous close as price for "open", "high" "low" and "close", volume is set to 0 using the previous close as price for "open", "high" "low" and "close", volume is set to 0
""" """
from freqtrade.exchange import timeframe_to_minutes from freqtrade.exchange import timeframe_to_resample_freq
ohlcv_dict = { ohlcv_dict = {
'open': 'first', 'open': 'first',
@ -93,13 +93,7 @@ def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str)
'close': 'last', 'close': 'last',
'volume': 'sum' 'volume': 'sum'
} }
timeframe_minutes = timeframe_to_minutes(timeframe) resample_interval = timeframe_to_resample_freq(timeframe)
resample_interval = f'{timeframe_minutes}min'
if timeframe_minutes >= 43200 and timeframe_minutes < 525600:
# Monthly candles need special treatment to stick to the 1st of the month
resample_interval = f'{timeframe}S'
elif timeframe_minutes > 43200:
resample_interval = timeframe
# Resample to create "NAN" values # Resample to create "NAN" values
df = dataframe.resample(resample_interval, on='date').agg(ohlcv_dict) df = dataframe.resample(resample_interval, on='date').agg(ohlcv_dict)

View File

@ -70,14 +70,13 @@ def trades_to_ohlcv(trades: DataFrame, timeframe: str) -> DataFrame:
:return: OHLCV Dataframe. :return: OHLCV Dataframe.
:raises: ValueError if no trades are provided :raises: ValueError if no trades are provided
""" """
from freqtrade.exchange import timeframe_to_minutes from freqtrade.exchange import timeframe_to_resample_freq
timeframe_minutes = timeframe_to_minutes(timeframe)
if trades.empty: if trades.empty:
raise ValueError('Trade-list empty.') raise ValueError('Trade-list empty.')
df = trades.set_index('date', drop=True) df = trades.set_index('date', drop=True)
resample_interval = timeframe_to_resample_freq(timeframe)
df_new = df['price'].resample(f'{timeframe_minutes}min').ohlc() df_new = df['price'].resample(resample_interval).ohlc()
df_new['volume'] = df['amount'].resample(f'{timeframe_minutes}min').sum() df_new['volume'] = df['amount'].resample(resample_interval).sum()
df_new['date'] = df_new.index df_new['date'] = df_new.index
# Drop 0 volume rows # Drop 0 volume rows
df_new = df_new.dropna() df_new = df_new.dropna()

View File

@ -94,21 +94,22 @@ class IDataHandler(ABC):
""" """
def ohlcv_data_min_max(self, pair: str, timeframe: str, def ohlcv_data_min_max(self, pair: str, timeframe: str,
candle_type: CandleType) -> Tuple[datetime, datetime]: candle_type: CandleType) -> Tuple[datetime, datetime, int]:
""" """
Returns the min and max timestamp for the given pair and timeframe. Returns the min and max timestamp for the given pair and timeframe.
:param pair: Pair to get min/max for :param pair: Pair to get min/max for
:param timeframe: Timeframe to get min/max for :param timeframe: Timeframe to get min/max for
:param candle_type: Any of the enum CandleType (must match trading mode!) :param candle_type: Any of the enum CandleType (must match trading mode!)
:return: (min, max) :return: (min, max, len)
""" """
data = self._ohlcv_load(pair, timeframe, None, candle_type) df = self._ohlcv_load(pair, timeframe, None, candle_type)
if data.empty: if df.empty:
return ( return (
datetime.fromtimestamp(0, tz=timezone.utc), datetime.fromtimestamp(0, tz=timezone.utc),
datetime.fromtimestamp(0, tz=timezone.utc) datetime.fromtimestamp(0, tz=timezone.utc),
0,
) )
return data.iloc[0]['date'].to_pydatetime(), data.iloc[-1]['date'].to_pydatetime() return df.iloc[0]['date'].to_pydatetime(), df.iloc[-1]['date'].to_pydatetime(), len(df)
@abstractmethod @abstractmethod
def _ohlcv_load(self, pair: str, timeframe: str, timerange: Optional[TimeRange], def _ohlcv_load(self, pair: str, timeframe: str, timerange: Optional[TimeRange],

View File

@ -61,10 +61,10 @@ def create_cum_profit(df: pd.DataFrame, trades: pd.DataFrame, col_name: str,
""" """
if len(trades) == 0: if len(trades) == 0:
raise ValueError("Trade dataframe empty.") raise ValueError("Trade dataframe empty.")
from freqtrade.exchange import timeframe_to_minutes from freqtrade.exchange import timeframe_to_resample_freq
timeframe_minutes = timeframe_to_minutes(timeframe) timeframe_freq = timeframe_to_resample_freq(timeframe)
# Resample to timeframe to make sure trades match candles # Resample to timeframe to make sure trades match candles
_trades_sum = trades.resample(f'{timeframe_minutes}min', on='close_date' _trades_sum = trades.resample(timeframe_freq, on='close_date'
)[['profit_abs']].sum() )[['profit_abs']].sum()
df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum() df.loc[:, col_name] = _trades_sum['profit_abs'].cumsum()
# Set first value to 0 # Set first value to 0

View File

@ -17,10 +17,11 @@ from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_c
market_is_active, price_to_precision, market_is_active, price_to_precision,
timeframe_to_minutes, timeframe_to_msecs, timeframe_to_minutes, timeframe_to_msecs,
timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_next_date, timeframe_to_prev_date,
timeframe_to_seconds, validate_exchange) timeframe_to_resample_freq, timeframe_to_seconds,
validate_exchange)
from freqtrade.exchange.gate import Gate from freqtrade.exchange.gate import Gate
from freqtrade.exchange.hitbtc import Hitbtc from freqtrade.exchange.hitbtc import Hitbtc
from freqtrade.exchange.huobi import Huobi from freqtrade.exchange.htx import Htx
from freqtrade.exchange.kraken import Kraken from freqtrade.exchange.kraken import Kraken
from freqtrade.exchange.kucoin import Kucoin from freqtrade.exchange.kucoin import Kucoin
from freqtrade.exchange.okx import Okx from freqtrade.exchange.okx import Okx

View File

@ -48,13 +48,14 @@ MAP_EXCHANGE_CHILDCLASS = {
'binanceusdm': 'binance', 'binanceusdm': 'binance',
'okex': 'okx', 'okex': 'okx',
'gateio': 'gate', 'gateio': 'gate',
'huboi': 'htx',
} }
SUPPORTED_EXCHANGES = [ SUPPORTED_EXCHANGES = [
'binance', 'binance',
'bitmart', 'bitmart',
'gate', 'gate',
'huobi', 'htx',
'kraken', 'kraken',
'okx', 'okx',
] ]

View File

@ -2216,13 +2216,13 @@ class Exchange:
@retrier_async @retrier_async
async def _async_fetch_trades(self, pair: str, async def _async_fetch_trades(self, pair: str,
since: Optional[int] = None, since: Optional[int] = None,
params: Optional[dict] = None) -> List[List]: params: Optional[dict] = None) -> Tuple[List[List], Any]:
""" """
Asyncronously gets trade history using fetch_trades. Asyncronously gets trade history using fetch_trades.
Handles exchange errors, does one call to the exchange. Handles exchange errors, does one call to the exchange.
:param pair: Pair to fetch trade data for :param pair: Pair to fetch trade data for
:param since: Since as integer timestamp in milliseconds :param since: Since as integer timestamp in milliseconds
returns: List of dicts containing trades returns: List of dicts containing trades, the next iteration value (new "since" or trade_id)
""" """
try: try:
# fetch trades asynchronously # fetch trades asynchronously
@ -2237,7 +2237,8 @@ class Exchange:
) )
trades = await self._api_async.fetch_trades(pair, since=since, limit=1000) trades = await self._api_async.fetch_trades(pair, since=since, limit=1000)
trades = self._trades_contracts_to_amount(trades) trades = self._trades_contracts_to_amount(trades)
return trades_dict_to_list(trades) pagination_value = self._get_trade_pagination_next_value(trades)
return trades_dict_to_list(trades), pagination_value
except ccxt.NotSupported as e: except ccxt.NotSupported as e:
raise OperationalException( raise OperationalException(
f'Exchange {self._api.name} does not support fetching historical trade data.' f'Exchange {self._api.name} does not support fetching historical trade data.'
@ -2250,6 +2251,25 @@ class Exchange:
except ccxt.BaseError as e: except ccxt.BaseError as e:
raise OperationalException(f'Could not fetch trade data. Msg: {e}') from e raise OperationalException(f'Could not fetch trade data. Msg: {e}') from e
def _valid_trade_pagination_id(self, pair: str, from_id: str) -> bool:
"""
Verify trade-pagination id is valid.
Workaround for odd Kraken issue where ID is sometimes wrong.
"""
return True
def _get_trade_pagination_next_value(self, trades: List[Dict]):
"""
Extract pagination id for the next "from_id" value
Applies only to fetch_trade_history by id.
"""
if not trades:
return None
if self._trades_pagination == 'id':
return trades[-1].get('id')
else:
return trades[-1].get('timestamp')
async def _async_get_trade_history_id(self, pair: str, async def _async_get_trade_history_id(self, pair: str,
until: int, until: int,
since: Optional[int] = None, since: Optional[int] = None,
@ -2265,33 +2285,35 @@ class Exchange:
""" """
trades: List[List] = [] trades: List[List] = []
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp
# DEFAULT_TRADES_COLUMNS: 1 -> id
has_overlap = self._ft_has.get('trades_pagination_overlap', True)
# Skip last trade by default since its the key for the next call
x = slice(None, -1) if has_overlap else slice(None)
if not from_id: if not from_id or not self._valid_trade_pagination_id(pair, from_id):
# Fetch first elements using timebased method to get an ID to paginate on # Fetch first elements using timebased method to get an ID to paginate on
# Depending on the Exchange, this can introduce a drift at the start of the interval # Depending on the Exchange, this can introduce a drift at the start of the interval
# of up to an hour. # of up to an hour.
# e.g. Binance returns the "last 1000" candles within a 1h time interval # e.g. Binance returns the "last 1000" candles within a 1h time interval
# - so we will miss the first trades. # - so we will miss the first trades.
t = await self._async_fetch_trades(pair, since=since) t, from_id = await self._async_fetch_trades(pair, since=since)
# DEFAULT_TRADES_COLUMNS: 0 -> timestamp trades.extend(t[x])
# DEFAULT_TRADES_COLUMNS: 1 -> id
from_id = t[-1][1]
trades.extend(t[:-1])
while True: while True:
try: try:
t = await self._async_fetch_trades(pair, t, from_id_next = await self._async_fetch_trades(
params={self._trades_pagination_arg: from_id}) pair, params={self._trades_pagination_arg: from_id})
if t: if t:
# Skip last id since its the key for the next call trades.extend(t[x])
trades.extend(t[:-1]) if from_id == from_id_next or t[-1][0] > until:
if from_id == t[-1][1] or t[-1][0] > until:
logger.debug(f"Stopping because from_id did not change. " logger.debug(f"Stopping because from_id did not change. "
f"Reached {t[-1][0]} > {until}") f"Reached {t[-1][0]} > {until}")
# Reached the end of the defined-download period - add last trade as well. # Reached the end of the defined-download period - add last trade as well.
if has_overlap:
trades.extend(t[-1:]) trades.extend(t[-1:])
break break
from_id = t[-1][1] from_id = from_id_next
else: else:
logger.debug("Stopping as no more trades were returned.") logger.debug("Stopping as no more trades were returned.")
break break
@ -2317,19 +2339,19 @@ class Exchange:
# DEFAULT_TRADES_COLUMNS: 1 -> id # DEFAULT_TRADES_COLUMNS: 1 -> id
while True: while True:
try: try:
t = await self._async_fetch_trades(pair, since=since) t, since_next = await self._async_fetch_trades(pair, since=since)
if t: if t:
# No more trades to download available at the exchange, # No more trades to download available at the exchange,
# So we repeatedly get the same trade over and over again. # So we repeatedly get the same trade over and over again.
if since == t[-1][0] and len(t) == 1: if since == since_next and len(t) == 1:
logger.debug("Stopping because no more trades are available.") logger.debug("Stopping because no more trades are available.")
break break
since = t[-1][0] since = since_next
trades.extend(t) trades.extend(t)
# Reached the end of the defined-download period # Reached the end of the defined-download period
if until and t[-1][0] > until: if until and since_next > until:
logger.debug( logger.debug(
f"Stopping because until was reached. {t[-1][0]} > {until}") f"Stopping because until was reached. {since_next} > {until}")
break break
else: else:
logger.debug("Stopping as no more trades were returned.") logger.debug("Stopping as no more trades were returned.")

View File

@ -118,6 +118,27 @@ def timeframe_to_msecs(timeframe: str) -> int:
return ccxt.Exchange.parse_timeframe(timeframe) * 1000 return ccxt.Exchange.parse_timeframe(timeframe) * 1000
def timeframe_to_resample_freq(timeframe: str) -> str:
"""
Translates the timeframe interval value written in the human readable
form ('1m', '5m', '1h', '1d', '1w', etc.) to the resample frequency
used by pandas ('1T', '5T', '1H', '1D', '1W', etc.)
"""
if timeframe == '1y':
return '1YS'
timeframe_seconds = timeframe_to_seconds(timeframe)
timeframe_minutes = timeframe_seconds // 60
resample_interval = f'{timeframe_seconds}s'
if 10000 < timeframe_minutes < 43200:
resample_interval = '1W-MON'
elif timeframe_minutes >= 43200 and timeframe_minutes < 525600:
# Monthly candles need special treatment to stick to the 1st of the month
resample_interval = f'{timeframe}S'
elif timeframe_minutes > 43200:
resample_interval = timeframe
return resample_interval
def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> datetime: def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
""" """
Use Timeframe and determine the candle start date for this date. Use Timeframe and determine the candle start date for this date.

View File

@ -1,4 +1,4 @@
""" Huobi exchange subclass """ """ HTX exchange subclass """
import logging import logging
from typing import Dict from typing import Dict
@ -9,9 +9,9 @@ from freqtrade.exchange import Exchange
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class Huobi(Exchange): class Htx(Exchange):
""" """
Huobi exchange class. Contains adjustments needed for Freqtrade to work HTX exchange class. Contains adjustments needed for Freqtrade to work
with this exchange. with this exchange.
""" """

View File

@ -8,11 +8,9 @@ from pandas import DataFrame
from freqtrade.constants import BuySell from freqtrade.constants import BuySell
from freqtrade.enums import MarginMode, TradingMode from freqtrade.enums import MarginMode, TradingMode
from freqtrade.exceptions import (DDosProtection, InsufficientFundsError, InvalidOrderException, from freqtrade.exceptions import DDosProtection, OperationalException, TemporaryError
OperationalException, TemporaryError)
from freqtrade.exchange import Exchange from freqtrade.exchange import Exchange
from freqtrade.exchange.common import retrier from freqtrade.exchange.common import retrier
from freqtrade.exchange.exchange_utils import ROUND_DOWN, ROUND_UP
from freqtrade.exchange.types import Tickers from freqtrade.exchange.types import Tickers
@ -24,13 +22,15 @@ class Kraken(Exchange):
_params: Dict = {"trading_agreement": "agree"} _params: Dict = {"trading_agreement": "agree"}
_ft_has: Dict = { _ft_has: Dict = {
"stoploss_on_exchange": True, "stoploss_on_exchange": True,
"stop_price_param": "stopPrice", "stop_price_param": "stopLossPrice",
"stop_price_prop": "stopPrice", "stop_price_prop": "stopLossPrice",
"stoploss_order_types": {"limit": "limit", "market": "market"},
"order_time_in_force": ["GTC", "IOC", "PO"], "order_time_in_force": ["GTC", "IOC", "PO"],
"ohlcv_candle_limit": 720, "ohlcv_candle_limit": 720,
"ohlcv_has_history": False, "ohlcv_has_history": False,
"trades_pagination": "id", "trades_pagination": "id",
"trades_pagination_arg": "since", "trades_pagination_arg": "since",
"trades_pagination_overlap": False,
"mark_ohlcv_timeframe": "4h", "mark_ohlcv_timeframe": "4h",
} }
@ -90,75 +90,6 @@ class Kraken(Exchange):
except ccxt.BaseError as e: except ccxt.BaseError as e:
raise OperationalException(e) from e raise OperationalException(e) from e
def stoploss_adjust(self, stop_loss: float, order: Dict, side: str) -> bool:
"""
Verify stop_loss against stoploss-order value (limit or price)
Returns True if adjustment is necessary.
"""
return (order['type'] in ('stop-loss', 'stop-loss-limit') and (
(side == "sell" and stop_loss > float(order['price'])) or
(side == "buy" and stop_loss < float(order['price']))
))
@retrier(retries=0)
def create_stoploss(self, pair: str, amount: float, stop_price: float,
order_types: Dict, side: BuySell, leverage: float) -> Dict:
"""
Creates a stoploss market order.
Stoploss market orders is the only stoploss type supported by kraken.
TODO: investigate if this can be combined with generic implementation
(careful, prices are reversed)
"""
params = self._params.copy()
if self.trading_mode == TradingMode.FUTURES:
params.update({'reduceOnly': True})
round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP
if order_types.get('stoploss', 'market') == 'limit':
ordertype = "stop-loss-limit"
limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99)
if side == "sell":
limit_rate = stop_price * limit_price_pct
else:
limit_rate = stop_price * (2 - limit_price_pct)
params['price2'] = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode)
else:
ordertype = "stop-loss"
stop_price = self.price_to_precision(pair, stop_price, rounding_mode=round_mode)
if self._config['dry_run']:
dry_order = self.create_dry_run_order(
pair, ordertype, side, amount, stop_price, leverage, stop_loss=True)
return dry_order
try:
amount = self.amount_to_precision(pair, amount)
order = self._api.create_order(symbol=pair, type=ordertype, side=side,
amount=amount, price=stop_price, params=params)
self._log_exchange_response('create_stoploss_order', order)
logger.info('stoploss order added for %s. '
'stop price: %s.', pair, stop_price)
return order
except ccxt.InsufficientFunds as e:
raise InsufficientFundsError(
f'Insufficient funds to create {ordertype} {side} order on market {pair}. '
f'Tried to create stoploss with amount {amount} at stoploss {stop_price}. '
f'Message: {e}') from e
except ccxt.InvalidOrder as e:
raise InvalidOrderException(
f'Could not create {ordertype} {side} order on market {pair}. '
f'Tried to create stoploss with amount {amount} at stoploss {stop_price}. '
f'Message: {e}') from e
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not place {side} order due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
def _set_leverage( def _set_leverage(
self, self,
leverage: float, leverage: float,
@ -227,18 +158,30 @@ class Kraken(Exchange):
return fees if is_short else -fees return fees if is_short else -fees
def _trades_contracts_to_amount(self, trades: List) -> List: def _get_trade_pagination_next_value(self, trades: List[Dict]):
""" """
Fix "last" id issue for kraken data downloads Extract pagination id for the next "from_id" value
This whole override can probably be removed once the following Applies only to fetch_trade_history by id.
issue is closed in ccxt: https://github.com/ccxt/ccxt/issues/15827
""" """
super()._trades_contracts_to_amount(trades) if len(trades) > 0:
if ( if (
len(trades) > 0 isinstance(trades[-1].get('info'), list)
and isinstance(trades[-1].get('info'), list)
and len(trades[-1].get('info', [])) > 7 and len(trades[-1].get('info', [])) > 7
): ):
# Trade response's "last" value.
return trades[-1].get('info', [])[-1]
# Fall back to timestamp if info is somehow empty.
return trades[-1].get('timestamp')
return None
trades[-1]['id'] = trades[-1].get('info', [])[-1] def _valid_trade_pagination_id(self, pair: str, from_id: str) -> bool:
return trades """
Verify trade-pagination id is valid.
Workaround for odd Kraken issue where ID is sometimes wrong.
"""
# Regular id's are in timestamp format 1705443695120072285
# If the id is smaller than 19 characters, it's not a valid timestamp.
if len(from_id) >= 19:
return True
logger.debug(f"{pair} - trade-pagination id is not valid. Fallback to timestamp.")
return False

View File

@ -255,7 +255,7 @@ class FreqaiDataKitchen:
if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live: if (1 - len(filtered_df) / len(unfiltered_df)) > 0.1 and self.live:
worst_indicator = str(unfiltered_df.count().idxmin()) worst_indicator = str(unfiltered_df.count().idxmin())
logger.warning( logger.warning(
f" {(1 - len(filtered_df)/len(unfiltered_df)) * 100:.0f} percent " f" {(1 - len(filtered_df) / len(unfiltered_df)) * 100:.0f} percent "
" of training data dropped due to NaNs, model may perform inconsistent " " of training data dropped due to NaNs, model may perform inconsistent "
f"with expectations. Verify {worst_indicator}" f"with expectations. Verify {worst_indicator}"
) )
@ -432,8 +432,12 @@ class FreqaiDataKitchen:
if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0: if self.freqai_config["feature_parameters"].get("DI_threshold", 0) > 0:
append_df["DI_values"] = self.DI_values append_df["DI_values"] = self.DI_values
user_cols = [col for col in dataframe_backtest.columns if col.startswith("%%")]
cols = ["date"]
cols.extend(user_cols)
dataframe_backtest.reset_index(drop=True, inplace=True) dataframe_backtest.reset_index(drop=True, inplace=True)
merged_df = pd.concat([dataframe_backtest["date"], append_df], axis=1) merged_df = pd.concat([dataframe_backtest[cols], append_df], axis=1)
return merged_df return merged_df
def append_predictions(self, append_df: DataFrame) -> None: def append_predictions(self, append_df: DataFrame) -> None:
@ -451,7 +455,8 @@ class FreqaiDataKitchen:
Back fill values to before the backtesting range so that the dataframe matches size Back fill values to before the backtesting range so that the dataframe matches size
when it goes back to the strategy. These rows are not included in the backtest. when it goes back to the strategy. These rows are not included in the backtest.
""" """
to_keep = [col for col in dataframe.columns if not col.startswith("&")] to_keep = [col for col in dataframe.columns if
not col.startswith("&") and not col.startswith("%%")]
self.return_dataframe = pd.merge(dataframe[to_keep], self.return_dataframe = pd.merge(dataframe[to_keep],
self.full_df, how='left', on='date') self.full_df, how='left', on='date')
self.return_dataframe[self.full_df.columns] = ( self.return_dataframe[self.full_df.columns] = (

View File

@ -13,7 +13,6 @@ from freqtrade.data.dataprovider import DataProvider
from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data from freqtrade.data.history.history_utils import refresh_backtest_ohlcv_data
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange import timeframe_to_seconds
from freqtrade.exchange.exchange import market_is_active
from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.freqai.data_drawer import FreqaiDataDrawer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist from freqtrade.plugins.pairlist.pairlist_helpers import dynamic_expand_pairlist
@ -33,8 +32,11 @@ def download_all_data_for_training(dp: DataProvider, config: Config) -> None:
if dp._exchange is None: if dp._exchange is None:
raise OperationalException('No exchange object found.') raise OperationalException('No exchange object found.')
markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m) markets = [
or config.get('include_inactive')] p for p in dp._exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
]
all_pairs = dynamic_expand_pairlist(config, markets) all_pairs = dynamic_expand_pairlist(config, markets)

View File

@ -1424,11 +1424,11 @@ class FreqtradeBot(LoggingMixin):
# New candle # New candle
proposed_rate = self.exchange.get_rate( proposed_rate = self.exchange.get_rate(
trade.pair, side='entry', is_short=trade.is_short, refresh=True) trade.pair, side='entry', is_short=trade.is_short, refresh=True)
adjusted_entry_price = strategy_safe_wrapper(self.strategy.adjust_entry_price, adjusted_entry_price = strategy_safe_wrapper(
default_retval=order_obj.price)( self.strategy.adjust_entry_price, default_retval=order_obj.safe_placement_price)(
trade=trade, order=order_obj, pair=trade.pair, trade=trade, order=order_obj, pair=trade.pair,
current_time=datetime.now(timezone.utc), proposed_rate=proposed_rate, current_time=datetime.now(timezone.utc), proposed_rate=proposed_rate,
current_order_rate=order_obj.safe_price, entry_tag=trade.enter_tag, current_order_rate=order_obj.safe_placement_price, entry_tag=trade.enter_tag,
side=trade.trade_direction) side=trade.trade_direction)
replacing = True replacing = True
@ -1436,7 +1436,7 @@ class FreqtradeBot(LoggingMixin):
if not adjusted_entry_price: if not adjusted_entry_price:
replacing = False replacing = False
cancel_reason = constants.CANCEL_REASON['USER_CANCEL'] cancel_reason = constants.CANCEL_REASON['USER_CANCEL']
if order_obj.price != adjusted_entry_price: if order_obj.safe_placement_price != adjusted_entry_price:
# cancel existing order if new price is supplied or None # cancel existing order if new price is supplied or None
res = self.handle_cancel_enter(trade, order, order_obj, cancel_reason, res = self.handle_cancel_enter(trade, order, order_obj, cancel_reason,
replacing=replacing) replacing=replacing)

View File

@ -33,7 +33,8 @@ from freqtrade.optimize.optimize_reports import (generate_backtest_stats, genera
show_backtest_results, show_backtest_results,
store_backtest_analysis_results, store_backtest_analysis_results,
store_backtest_stats) store_backtest_stats)
from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade from freqtrade.persistence import (LocalTrade, Order, PairLocks, Trade, disable_database_use,
enable_database_use)
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
from freqtrade.plugins.protectionmanager import ProtectionManager from freqtrade.plugins.protectionmanager import ProtectionManager
from freqtrade.resolvers import ExchangeResolver, StrategyResolver from freqtrade.resolvers import ExchangeResolver, StrategyResolver
@ -116,8 +117,9 @@ class Backtesting:
raise OperationalException("Timeframe needs to be set in either " raise OperationalException("Timeframe needs to be set in either "
"configuration or as cli argument `--timeframe 5m`") "configuration or as cli argument `--timeframe 5m`")
self.timeframe = str(self.config.get('timeframe')) self.timeframe = str(self.config.get('timeframe'))
self.disable_database_use()
self.timeframe_min = timeframe_to_minutes(self.timeframe) self.timeframe_min = timeframe_to_minutes(self.timeframe)
self.timeframe_td = timedelta(minutes=self.timeframe_min)
self.disable_database_use()
self.init_backtest_detail() self.init_backtest_detail()
self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider) self.pairlists = PairListManager(self.exchange, self.config, self.dataprovider)
self._validate_pairlists_for_backtesting() self._validate_pairlists_for_backtesting()
@ -177,8 +179,7 @@ class Backtesting:
@staticmethod @staticmethod
def cleanup(): def cleanup():
LoggingMixin.show_output = True LoggingMixin.show_output = True
PairLocks.use_db = True enable_database_use()
Trade.use_db = True
def init_backtest_detail(self) -> None: def init_backtest_detail(self) -> None:
# Load detail timeframe if specified # Load detail timeframe if specified
@ -325,9 +326,7 @@ class Backtesting:
self.futures_data = {} self.futures_data = {}
def disable_database_use(self): def disable_database_use(self):
PairLocks.use_db = False disable_database_use(self.timeframe)
PairLocks.timeframe = self.timeframe
Trade.use_db = False
def prepare_backtest(self, enable_protections): def prepare_backtest(self, enable_protections):
""" """
@ -1207,10 +1206,10 @@ class Backtesting:
# Indexes per pair, so some pairs are allowed to have a missing start. # Indexes per pair, so some pairs are allowed to have a missing start.
indexes: Dict = defaultdict(int) indexes: Dict = defaultdict(int)
current_time = start_date + timedelta(minutes=self.timeframe_min) current_time = start_date + self.timeframe_td
self.progress.init_step(BacktestState.BACKTEST, int( self.progress.init_step(BacktestState.BACKTEST, int(
(end_date - start_date) / timedelta(minutes=self.timeframe_min))) (end_date - start_date) / self.timeframe_td))
# Loop timerange and get candle for each pair at that point in time # Loop timerange and get candle for each pair at that point in time
while current_time <= end_date: while current_time <= end_date:
open_trade_count_start = LocalTrade.bt_open_open_trade_count open_trade_count_start = LocalTrade.bt_open_open_trade_count
@ -1237,7 +1236,7 @@ class Backtesting:
# Spread out into detail timeframe. # Spread out into detail timeframe.
# Should only happen when we are either in a trade for this pair # Should only happen when we are either in a trade for this pair
# or when we got the signal for a new trade. # or when we got the signal for a new trade.
exit_candle_end = current_detail_time + timedelta(minutes=self.timeframe_min) exit_candle_end = current_detail_time + self.timeframe_td
detail_data = self.detail_data[pair] detail_data = self.detail_data[pair]
detail_data = detail_data.loc[ detail_data = detail_data.loc[
@ -1273,7 +1272,7 @@ class Backtesting:
# Move time one configured time_interval ahead. # Move time one configured time_interval ahead.
self.progress.increment() self.progress.increment()
current_time += timedelta(minutes=self.timeframe_min) current_time += self.timeframe_td
self.handle_left_open(LocalTrade.bt_trades_open_pp, data=data) self.handle_left_open(LocalTrade.bt_trades_open_pp, data=data)
self.wallets.update() self.wallets.update()

View File

@ -54,7 +54,7 @@ class BaseAnalysis:
self.full_varHolder.from_dt = parsed_timerange.startdt self.full_varHolder.from_dt = parsed_timerange.startdt
if parsed_timerange.stopdt is None: if parsed_timerange.stopdt is None:
self.full_varHolder.to_dt = datetime.utcnow() self.full_varHolder.to_dt = datetime.now(timezone.utc)
else: else:
self.full_varHolder.to_dt = parsed_timerange.stopdt self.full_varHolder.to_dt = parsed_timerange.stopdt

View File

@ -4,3 +4,5 @@ from freqtrade.persistence.key_value_store import KeyStoreKeys, KeyValueStore
from freqtrade.persistence.models import init_db from freqtrade.persistence.models import init_db
from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.persistence.pairlock_middleware import PairLocks
from freqtrade.persistence.trade_model import LocalTrade, Order, Trade from freqtrade.persistence.trade_model import LocalTrade, Order, Trade
from freqtrade.persistence.usedb_context import (FtNoDBContext, disable_database_use,
enable_database_use)

View File

@ -106,6 +106,11 @@ class Order(ModelBase):
def safe_amount(self) -> float: def safe_amount(self) -> float:
return self.amount or self.ft_amount return self.amount or self.ft_amount
@property
def safe_placement_price(self) -> float:
"""Price at which the order was placed"""
return self.price or self.stop_price or self.ft_price
@property @property
def safe_price(self) -> float: def safe_price(self) -> float:
return self.average or self.price or self.stop_price or self.ft_price return self.average or self.price or self.stop_price or self.ft_price
@ -1637,7 +1642,7 @@ class Trade(ModelBase, LocalTrade):
Retrieves total realized profit Retrieves total realized profit
""" """
if Trade.use_db: if Trade.use_db:
total_profit: float = Trade.session.execute( total_profit = Trade.session.execute(
select(func.sum(Trade.close_profit_abs)).filter(Trade.is_open.is_(False)) select(func.sum(Trade.close_profit_abs)).filter(Trade.is_open.is_(False))
).scalar_one() ).scalar_one()
else: else:
@ -1845,4 +1850,4 @@ class Trade(ModelBase, LocalTrade):
Order.order_filled_date >= start_date, Order.order_filled_date >= start_date,
Order.status == 'closed' Order.status == 'closed'
)).scalar_one() )).scalar_one()
return trading_volume return trading_volume or 0.0

View File

@ -0,0 +1,33 @@
from freqtrade.persistence.pairlock_middleware import PairLocks
from freqtrade.persistence.trade_model import Trade
def disable_database_use(timeframe: str) -> None:
"""
Disable database usage for PairLocks and Trade models.
Used for backtesting, and some other utility commands.
"""
PairLocks.use_db = False
PairLocks.timeframe = timeframe
Trade.use_db = False
def enable_database_use() -> None:
"""
Cleanup function to restore database usage.
"""
PairLocks.use_db = True
PairLocks.timeframe = ''
Trade.use_db = True
class FtNoDBContext:
def __init__(self, timeframe: str = ''):
self.timeframe = timeframe
def __enter__(self):
disable_database_use(self.timeframe)
def __exit__(self, exc_type, exc_val, exc_tb):
enable_database_use()

View File

@ -62,16 +62,16 @@ class VolumePairList(IPairList):
# get timeframe in minutes and seconds # get timeframe in minutes and seconds
self._tf_in_min = timeframe_to_minutes(self._lookback_timeframe) self._tf_in_min = timeframe_to_minutes(self._lookback_timeframe)
self._tf_in_sec = self._tf_in_min * 60 _tf_in_sec = self._tf_in_min * 60
# wether to use range lookback or not # wether to use range lookback or not
self._use_range = (self._tf_in_min > 0) & (self._lookback_period > 0) self._use_range = (self._tf_in_min > 0) & (self._lookback_period > 0)
if self._use_range & (self._refresh_period < self._tf_in_sec): if self._use_range & (self._refresh_period < _tf_in_sec):
raise OperationalException( raise OperationalException(
f'Refresh period of {self._refresh_period} seconds is smaller than one ' f'Refresh period of {self._refresh_period} seconds is smaller than one '
f'timeframe of {self._lookback_timeframe}. Please adjust refresh_period ' f'timeframe of {self._lookback_timeframe}. Please adjust refresh_period '
f'to at least {self._tf_in_sec} and restart the bot.' f'to at least {_tf_in_sec} and restart the bot.'
) )
if (not self._use_range and not ( if (not self._use_range and not (

View File

@ -1,6 +1,6 @@
import logging import logging
import secrets import secrets
from datetime import datetime, timedelta from datetime import datetime, timedelta, timezone
from typing import Any, Dict, Union from typing import Any, Dict, Union
import jwt import jwt
@ -88,14 +88,14 @@ async def validate_ws_token(
def create_token(data: dict, secret_key: str, token_type: str = "access") -> str: def create_token(data: dict, secret_key: str, token_type: str = "access") -> str:
to_encode = data.copy() to_encode = data.copy()
if token_type == "access": if token_type == "access":
expire = datetime.utcnow() + timedelta(minutes=15) expire = datetime.now(timezone.utc) + timedelta(minutes=15)
elif token_type == "refresh": elif token_type == "refresh":
expire = datetime.utcnow() + timedelta(days=30) expire = datetime.now(timezone.utc) + timedelta(days=30)
else: else:
raise ValueError() raise ValueError()
to_encode.update({ to_encode.update({
"exp": expire, "exp": expire,
"iat": datetime.utcnow(), "iat": datetime.now(timezone.utc),
"type": token_type, "type": token_type,
}) })
encoded_jwt = jwt.encode(to_encode, secret_key, algorithm=ALGORITHM) encoded_jwt = jwt.encode(to_encode, secret_key, algorithm=ALGORITHM)

View File

@ -7,6 +7,7 @@ from fastapi.exceptions import HTTPException
from freqtrade.constants import Config from freqtrade.constants import Config
from freqtrade.enums import CandleType from freqtrade.enums import CandleType
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.persistence import FtNoDBContext
from freqtrade.rpc.api_server.api_schemas import (BackgroundTaskStatus, BgJobStarted, from freqtrade.rpc.api_server.api_schemas import (BackgroundTaskStatus, BgJobStarted,
ExchangeModePayloadMixin, PairListsPayload, ExchangeModePayloadMixin, PairListsPayload,
PairListsResponse, WhitelistEvaluateResponse) PairListsResponse, WhitelistEvaluateResponse)
@ -57,7 +58,7 @@ def __run_pairlist(job_id: str, config_loc: Config):
ApiBG.jobs[job_id]['is_running'] = True ApiBG.jobs[job_id]['is_running'] = True
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
with FtNoDBContext():
exchange = get_exchange(config_loc) exchange = get_exchange(config_loc)
pairlists = PairListManager(exchange, config_loc) pairlists = PairListManager(exchange, config_loc)
pairlists.refresh_pairlist() pairlists.refresh_pairlist()

View File

@ -107,7 +107,7 @@ class ApiServer(RPCHandler):
ApiServer._message_stream.publish(msg) ApiServer._message_stream.publish(msg)
def handle_rpc_exception(self, request, exc): def handle_rpc_exception(self, request, exc):
logger.exception(f"API Error calling: {exc}") logger.error(f"API Error calling: {exc}")
return JSONResponse( return JSONResponse(
status_code=502, status_code=502,
content={'error': f"Error querying {request.url.path}: {exc.message}"} content={'error': f"Error querying {request.url.path}: {exc.message}"}

View File

@ -1346,7 +1346,7 @@ class Telegram(RPCHandler):
output = "<b>Performance:</b>\n" output = "<b>Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['pair']}\t" f"{i + 1}.\t <code>{trade['pair']}\t"
f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
@ -1378,7 +1378,7 @@ class Telegram(RPCHandler):
output = "<b>Entry Tag Performance:</b>\n" output = "<b>Entry Tag Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['enter_tag']}\t" f"{i + 1}.\t <code>{trade['enter_tag']}\t"
f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
@ -1410,7 +1410,7 @@ class Telegram(RPCHandler):
output = "<b>Exit Reason Performance:</b>\n" output = "<b>Exit Reason Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['exit_reason']}\t" f"{i + 1}.\t <code>{trade['exit_reason']}\t"
f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
@ -1442,7 +1442,7 @@ class Telegram(RPCHandler):
output = "<b>Mix Tag Performance:</b>\n" output = "<b>Mix Tag Performance:</b>\n"
for i, trade in enumerate(trades): for i, trade in enumerate(trades):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['mix_tag']}\t" f"{i + 1}.\t <code>{trade['mix_tag']}\t"
f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} " f"{fmt_coin(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit_ratio']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")

View File

@ -1004,7 +1004,7 @@ class IStrategy(ABC, HyperStrategyMixin):
:param is_short: Indicating existing trade direction. :param is_short: Indicating existing trade direction.
:return: (enter, exit) A bool-tuple with enter / exit values. :return: (enter, exit) A bool-tuple with enter / exit values.
""" """
latest, latest_date = self.get_latest_candle(pair, timeframe, dataframe) latest, _latest_date = self.get_latest_candle(pair, timeframe, dataframe)
if latest is None: if latest is None:
return False, False, None return False, False, None
@ -1388,7 +1388,8 @@ class IStrategy(ABC, HyperStrategyMixin):
""" """
logger.debug(f"Populating enter signals for pair {metadata.get('pair')}.") logger.debug(f"Populating enter signals for pair {metadata.get('pair')}.")
# Initialize column to work around Pandas bug #56503.
dataframe.loc[:, 'enter_tag'] = ''
df = self.populate_entry_trend(dataframe, metadata) df = self.populate_entry_trend(dataframe, metadata)
if 'enter_long' not in df.columns: if 'enter_long' not in df.columns:
df = df.rename({'buy': 'enter_long', 'buy_tag': 'enter_tag'}, axis='columns') df = df.rename({'buy': 'enter_long', 'buy_tag': 'enter_tag'}, axis='columns')
@ -1404,6 +1405,8 @@ class IStrategy(ABC, HyperStrategyMixin):
currently traded pair currently traded pair
:return: DataFrame with exit column :return: DataFrame with exit column
""" """
# Initialize column to work around Pandas bug #56503.
dataframe.loc[:, 'exit_tag'] = ''
logger.debug(f"Populating exit signals for pair {metadata.get('pair')}.") logger.debug(f"Populating exit signals for pair {metadata.get('pair')}.")
df = self.populate_exit_trend(dataframe, metadata) df = self.populate_exit_trend(dataframe, metadata)
if 'exit_long' not in df.columns: if 'exit_long' not in df.columns:

View File

@ -7,21 +7,21 @@
-r docs/requirements-docs.txt -r docs/requirements-docs.txt
coveralls==3.3.1 coveralls==3.3.1
ruff==0.1.11 ruff==0.1.14
mypy==1.8.0 mypy==1.8.0
pre-commit==3.6.0 pre-commit==3.6.0
pytest==7.4.4 pytest==7.4.4
pytest-asyncio==0.21.1 pytest-asyncio==0.21.1
pytest-cov==4.1.0 pytest-cov==4.1.0
pytest-mock==3.12.0 pytest-mock==3.12.0
pytest-random-order==1.1.0 pytest-random-order==1.1.1
pytest-xdist==3.5.0 pytest-xdist==3.5.0
isort==5.13.2 isort==5.13.2
# For datetime mocking # For datetime mocking
time-machine==2.13.0 time-machine==2.13.0
# Convert jupyter notebooks to markdown documents # Convert jupyter notebooks to markdown documents
nbconvert==7.14.0 nbconvert==7.14.2
# mypy types # mypy types
types-cachetools==5.3.0.7 types-cachetools==5.3.0.7

View File

@ -2,10 +2,10 @@
-r requirements-freqai.txt -r requirements-freqai.txt
# Required for freqai-rl # Required for freqai-rl
torch==2.1.2 torch==2.1.2; python_version < '3.12'
#until these branches will be released we can use this #until these branches will be released we can use this
gymnasium==0.29.1 gymnasium==0.29.1; python_version < '3.12'
stable_baselines3==2.2.1 stable_baselines3==2.2.1; python_version < '3.12'
sb3_contrib>=2.0.0a9 sb3_contrib>=2.0.0a9; python_version < '3.12'
# Progress bar for stable-baselines3 and sb3-contrib # Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.66.1 tqdm==4.66.1

View File

@ -3,9 +3,9 @@
-r requirements-plot.txt -r requirements-plot.txt
# Required for freqai # Required for freqai
scikit-learn==1.3.2 scikit-learn==1.4.0
joblib==1.3.2 joblib==1.3.2
catboost==1.2.2; 'arm' not in platform_machine catboost==1.2.2; 'arm' not in platform_machine and python_version < '3.12'
lightgbm==4.2.0 lightgbm==4.2.0
xgboost==2.0.3 xgboost==2.0.3
tensorboard==2.15.1 tensorboard==2.15.1

View File

@ -2,7 +2,7 @@
-r requirements.txt -r requirements.txt
# Required for hyperopt # Required for hyperopt
scipy==1.11.4 scipy==1.12.0
scikit-learn==1.3.2 scikit-learn==1.4.0
ft-scikit-optimize==0.9.2 ft-scikit-optimize==0.9.2
filelock==3.13.1 filelock==3.13.1

View File

@ -2,10 +2,10 @@ numpy==1.26.3
pandas==2.1.4 pandas==2.1.4
pandas-ta==0.3.14b pandas-ta==0.3.14b
ccxt==4.2.9 ccxt==4.2.21
cryptography==41.0.7 cryptography==41.0.7
aiohttp==3.9.1 aiohttp==3.9.1
SQLAlchemy==2.0.23 SQLAlchemy==2.0.25
python-telegram-bot==20.7 python-telegram-bot==20.7
# can't be hard-pinned due to telegram-bot pinning httpx with ~ # can't be hard-pinned due to telegram-bot pinning httpx with ~
httpx>=0.24.1 httpx>=0.24.1
@ -13,16 +13,16 @@ arrow==1.3.0
cachetools==5.3.2 cachetools==5.3.2
requests==2.31.0 requests==2.31.0
urllib3==2.1.0 urllib3==2.1.0
jsonschema==4.20.0 jsonschema==4.21.1
TA-Lib==0.4.28 TA-Lib==0.4.28
technical==1.4.2 technical==1.4.2
tabulate==0.9.0 tabulate==0.9.0
pycoingecko==3.1.0 pycoingecko==3.1.0
jinja2==3.1.2 jinja2==3.1.3
tables==3.9.1 tables==3.9.1
joblib==1.3.2 joblib==1.3.2
rich==13.7.0 rich==13.7.0
pyarrow==14.0.2; platform_machine != 'armv7l' pyarrow==15.0.0; platform_machine != 'armv7l'
# find first, C search in arrays # find first, C search in arrays
py_find_1st==1.1.6 py_find_1st==1.1.6
@ -30,18 +30,18 @@ py_find_1st==1.1.6
# Load ticker files 30% faster # Load ticker files 30% faster
python-rapidjson==1.14 python-rapidjson==1.14
# Properly format api responses # Properly format api responses
orjson==3.9.10 orjson==3.9.12
# Notify systemd # Notify systemd
sdnotify==0.3.2 sdnotify==0.3.2
# API Server # API Server
fastapi==0.108.0 fastapi==0.109.0
pydantic==2.5.3 pydantic==2.5.3
uvicorn==0.25.0 uvicorn==0.26.0
pyjwt==2.8.0 pyjwt==2.8.0
aiofiles==23.2.1 aiofiles==23.2.1
psutil==5.9.7 psutil==5.9.8
# Support for colorized terminal output # Support for colorized terminal output
colorama==0.4.6 colorama==0.4.6

View File

@ -70,7 +70,7 @@ setup(
], ],
install_requires=[ install_requires=[
# from requirements.txt # from requirements.txt
'ccxt>=4.0.0', 'ccxt>=4.2.15',
'SQLAlchemy>=2.0.6', 'SQLAlchemy>=2.0.6',
'python-telegram-bot>=20.1', 'python-telegram-bot>=20.1',
'arrow>=1.0.0', 'arrow>=1.0.0',

View File

@ -1445,12 +1445,13 @@ def test_start_list_data(testdatadir, capsys):
start_list_data(pargs) start_list_data(pargs)
captured = capsys.readouterr() captured = capsys.readouterr()
assert "Found 2 pair / timeframe combinations." in captured.out assert "Found 2 pair / timeframe combinations." in captured.out
assert ("\n| Pair | Timeframe | Type | From | To |\n" assert (
in captured.out) "\n| Pair | Timeframe | Type "
"| From | To | Candles |\n") in captured.out
assert "UNITTEST/BTC" not in captured.out assert "UNITTEST/BTC" not in captured.out
assert ( assert (
"\n| XRP/ETH | 1m | spot | 2019-10-11 00:00:00 | 2019-10-13 11:19:00 |\n" "\n| XRP/ETH | 1m | spot | "
in captured.out) "2019-10-11 00:00:00 | 2019-10-13 11:19:00 | 2469 |\n") in captured.out
@pytest.mark.usefixtures("init_persistence") @pytest.mark.usefixtures("init_persistence")
@ -1508,7 +1509,7 @@ def test_backtesting_show(mocker, testdatadir, capsys):
pargs['config'] = None pargs['config'] = None
start_backtesting_show(pargs) start_backtesting_show(pargs)
assert sbr.call_count == 1 assert sbr.call_count == 1
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert "Pairs for Strategy" in out assert "Pairs for Strategy" in out

View File

@ -3,7 +3,7 @@ import json
import logging import logging
import re import re
from copy import deepcopy from copy import deepcopy
from datetime import timedelta from datetime import datetime, timedelta, timezone
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
from unittest.mock import MagicMock, Mock, PropertyMock from unittest.mock import MagicMock, Mock, PropertyMock
@ -18,13 +18,11 @@ from freqtrade.commands import Arguments
from freqtrade.data.converter import ohlcv_to_dataframe, trades_list_to_df from freqtrade.data.converter import ohlcv_to_dataframe, trades_list_to_df
from freqtrade.edge import PairInfo from freqtrade.edge import PairInfo
from freqtrade.enums import CandleType, MarginMode, RunMode, SignalDirection, TradingMode from freqtrade.enums import CandleType, MarginMode, RunMode, SignalDirection, TradingMode
from freqtrade.exchange import Exchange from freqtrade.exchange import Exchange, timeframe_to_minutes, timeframe_to_seconds
from freqtrade.exchange.exchange import timeframe_to_minutes
from freqtrade.freqtradebot import FreqtradeBot from freqtrade.freqtradebot import FreqtradeBot
from freqtrade.persistence import LocalTrade, Order, Trade, init_db from freqtrade.persistence import LocalTrade, Order, Trade, init_db
from freqtrade.resolvers import ExchangeResolver from freqtrade.resolvers import ExchangeResolver
from freqtrade.util import dt_ts from freqtrade.util import dt_now, dt_ts
from freqtrade.util.datetime_helpers import dt_now
from freqtrade.worker import Worker from freqtrade.worker import Worker
from tests.conftest_trades import (leverage_trade, mock_trade_1, mock_trade_2, mock_trade_3, from tests.conftest_trades import (leverage_trade, mock_trade_1, mock_trade_2, mock_trade_3,
mock_trade_4, mock_trade_5, mock_trade_6, short_trade) mock_trade_4, mock_trade_5, mock_trade_6, short_trade)
@ -107,17 +105,62 @@ def get_args(args):
return Arguments(args).get_parsed_arg() return Arguments(args).get_parsed_arg()
def generate_trades_history(n_rows, start_date: Optional[datetime] = None, days=5):
np.random.seed(42)
if not start_date:
start_date = datetime(2020, 1, 1, tzinfo=timezone.utc)
# Generate random data
end_date = start_date + timedelta(days=days)
_start_timestamp = start_date.timestamp()
_end_timestamp = pd.to_datetime(end_date).timestamp()
random_timestamps_in_seconds = np.random.uniform(_start_timestamp, _end_timestamp, n_rows)
timestamp = pd.to_datetime(random_timestamps_in_seconds, unit='s')
id = [
f'a{np.random.randint(1e6, 1e7 - 1)}cd{np.random.randint(100, 999)}'
for _ in range(n_rows)
]
side = np.random.choice(['buy', 'sell'], n_rows)
# Initial price and subsequent changes
initial_price = 0.019626
price_changes = np.random.normal(0, initial_price * 0.05, n_rows)
price = np.cumsum(np.concatenate(([initial_price], price_changes)))[:n_rows]
amount = np.random.uniform(0.011, 20, n_rows)
cost = price * amount
# Create DataFrame
df = pd.DataFrame({'timestamp': timestamp, 'id': id, 'type': None, 'side': side,
'price': price, 'amount': amount, 'cost': cost})
df['date'] = pd.to_datetime(df['timestamp'], unit='ms', utc=True)
df = df.sort_values('timestamp').reset_index(drop=True)
assert list(df.columns) == constants.DEFAULT_TRADES_COLUMNS + ['date']
return df
def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05'): def generate_test_data(timeframe: str, size: int, start: str = '2020-07-05'):
np.random.seed(42) np.random.seed(42)
base = np.random.normal(20, 2, size=size) base = np.random.normal(20, 2, size=size)
if timeframe == '1M': if timeframe == '1y':
date = pd.date_range(start, periods=size, freq='1YS', tz='UTC')
elif timeframe == '1M':
date = pd.date_range(start, periods=size, freq='1MS', tz='UTC') date = pd.date_range(start, periods=size, freq='1MS', tz='UTC')
elif timeframe == '1w': elif timeframe == '3M':
date = pd.date_range(start, periods=size, freq='3MS', tz='UTC')
elif timeframe == '1w' or timeframe == '7d':
date = pd.date_range(start, periods=size, freq='1W-MON', tz='UTC') date = pd.date_range(start, periods=size, freq='1W-MON', tz='UTC')
else: else:
tf_mins = timeframe_to_minutes(timeframe) tf_mins = timeframe_to_minutes(timeframe)
if tf_mins >= 1:
date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC') date = pd.date_range(start, periods=size, freq=f'{tf_mins}min', tz='UTC')
else:
tf_secs = timeframe_to_seconds(timeframe)
date = pd.date_range(start, periods=size, freq=f'{tf_secs}s', tz='UTC')
df = pd.DataFrame({ df = pd.DataFrame({
'date': date, 'date': date,
'open': base, 'open': base,
@ -2386,14 +2429,7 @@ def trades_history_df(trades_history):
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def fetch_trades_result(): def fetch_trades_result():
return [{'info': {'a': 126181329, return [{'info': ['0.01962700', '0.04000000', '1565798399.4631551', 'b', 'm', '', '126181329'],
'p': '0.01962700',
'q': '0.04000000',
'f': 138604155,
'l': 138604155,
'T': 1565798399463,
'm': False,
'M': True},
'timestamp': 1565798399463, 'timestamp': 1565798399463,
'datetime': '2019-08-14T15:59:59.463Z', 'datetime': '2019-08-14T15:59:59.463Z',
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',
@ -2406,14 +2442,7 @@ def fetch_trades_result():
'amount': 0.04, 'amount': 0.04,
'cost': 0.00078508, 'cost': 0.00078508,
'fee': None}, 'fee': None},
{'info': {'a': 126181330, {'info': ['0.01962700', '0.24400000', '1565798399.6291551', 'b', 'm', '', '126181330'],
'p': '0.01962700',
'q': '0.24400000',
'f': 138604156,
'l': 138604156,
'T': 1565798399629,
'm': False,
'M': True},
'timestamp': 1565798399629, 'timestamp': 1565798399629,
'datetime': '2019-08-14T15:59:59.629Z', 'datetime': '2019-08-14T15:59:59.629Z',
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',
@ -2426,14 +2455,7 @@ def fetch_trades_result():
'amount': 0.244, 'amount': 0.244,
'cost': 0.004788987999999999, 'cost': 0.004788987999999999,
'fee': None}, 'fee': None},
{'info': {'a': 126181331, {'info': ['0.01962600', '0.01100000', '1565798399.7521551', 's', 'm', '', '126181331'],
'p': '0.01962600',
'q': '0.01100000',
'f': 138604157,
'l': 138604157,
'T': 1565798399752,
'm': True,
'M': True},
'timestamp': 1565798399752, 'timestamp': 1565798399752,
'datetime': '2019-08-14T15:59:59.752Z', 'datetime': '2019-08-14T15:59:59.752Z',
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',
@ -2446,14 +2468,7 @@ def fetch_trades_result():
'amount': 0.011, 'amount': 0.011,
'cost': 0.00021588599999999999, 'cost': 0.00021588599999999999,
'fee': None}, 'fee': None},
{'info': {'a': 126181332, {'info': ['0.01962600', '0.01100000', '1565798399.8621551', 's', 'm', '', '126181332'],
'p': '0.01962600',
'q': '0.01100000',
'f': 138604158,
'l': 138604158,
'T': 1565798399862,
'm': True,
'M': True},
'timestamp': 1565798399862, 'timestamp': 1565798399862,
'datetime': '2019-08-14T15:59:59.862Z', 'datetime': '2019-08-14T15:59:59.862Z',
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',
@ -2466,14 +2481,8 @@ def fetch_trades_result():
'amount': 0.011, 'amount': 0.011,
'cost': 0.00021588599999999999, 'cost': 0.00021588599999999999,
'fee': None}, 'fee': None},
{'info': {'a': 126181333, {'info': ['0.01952600', '0.01200000', '1565798399.8721551', 's', 'm', '', '126181333',
'p': '0.01952600', 1565798399872512133],
'q': '0.01200000',
'f': 138604158,
'l': 138604158,
'T': 1565798399872,
'm': True,
'M': True},
'timestamp': 1565798399872, 'timestamp': 1565798399872,
'datetime': '2019-08-14T15:59:59.872Z', 'datetime': '2019-08-14T15:59:59.872Z',
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',

View File

@ -17,7 +17,8 @@ from freqtrade.data.history import (get_timerange, load_data, load_pair_history,
validate_backtest_data) validate_backtest_data)
from freqtrade.data.history.idatahandler import IDataHandler from freqtrade.data.history.idatahandler import IDataHandler
from freqtrade.enums import CandleType from freqtrade.enums import CandleType
from tests.conftest import generate_test_data, log_has, log_has_re from freqtrade.exchange import timeframe_to_minutes, timeframe_to_seconds
from tests.conftest import generate_test_data, generate_trades_history, log_has, log_has_re
from tests.data.test_history import _clean_test_file from tests.data.test_history import _clean_test_file
@ -51,6 +52,49 @@ def test_trades_to_ohlcv(trades_history_df, caplog):
assert 'close' in df.columns assert 'close' in df.columns
assert df.iloc[0, :]['high'] == 0.019627 assert df.iloc[0, :]['high'] == 0.019627
assert df.iloc[0, :]['low'] == 0.019626 assert df.iloc[0, :]['low'] == 0.019626
assert df.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:00+0000')
df_1h = trades_to_ohlcv(trades_history_df, '1h')
assert len(df_1h) == 1
assert df_1h.iloc[0, :]['high'] == 0.019627
assert df_1h.iloc[0, :]['low'] == 0.019626
assert df_1h.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:00:00+0000')
df_1s = trades_to_ohlcv(trades_history_df, '1s')
assert len(df_1s) == 2
assert df_1s.iloc[0, :]['high'] == 0.019627
assert df_1s.iloc[0, :]['low'] == 0.019627
assert df_1s.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:49+0000')
assert df_1s.iloc[-1, :]['date'] == pd.Timestamp('2019-08-14 15:59:59+0000')
@pytest.mark.parametrize('timeframe,rows,days,candles,start,end,weekday', [
('1s', 20_000, 5, 19522, '2020-01-01 00:00:05', '2020-01-05 23:59:27', None),
('1m', 20_000, 5, 6745, '2020-01-01 00:00:00', '2020-01-05 23:59:00', None),
('5m', 20_000, 5, 1440, '2020-01-01 00:00:00', '2020-01-05 23:55:00', None),
('15m', 20_000, 5, 480, '2020-01-01 00:00:00', '2020-01-05 23:45:00', None),
('1h', 20_000, 5, 120, '2020-01-01 00:00:00', '2020-01-05 23:00:00', None),
('2h', 20_000, 5, 60, '2020-01-01 00:00:00', '2020-01-05 22:00:00', None),
('4h', 20_000, 5, 30, '2020-01-01 00:00:00', '2020-01-05 20:00:00', None),
('8h', 20_000, 5, 15, '2020-01-01 00:00:00', '2020-01-05 16:00:00', None),
('12h', 20_000, 5, 10, '2020-01-01 00:00:00', '2020-01-05 12:00:00', None),
('1d', 20_000, 5, 5, '2020-01-01 00:00:00', '2020-01-05 00:00:00', 'Sunday'),
('7d', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'),
('1w', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'),
('1M', 20_000, 74, 3, '2020-01-01 00:00:00', '2020-03-01 00:00:00', None),
('3M', 20_000, 100, 2, '2020-01-01 00:00:00', '2020-04-01 00:00:00', None),
('1y', 20_000, 1000, 3, '2020-01-01 00:00:00', '2022-01-01 00:00:00', None),
])
def test_trades_to_ohlcv_multi(timeframe, rows, days, candles, start, end, weekday):
trades_history = generate_trades_history(n_rows=rows, days=days)
df = trades_to_ohlcv(trades_history, timeframe)
assert not df.empty
assert len(df) == candles
assert df.iloc[0, :]['date'] == pd.Timestamp(f'{start}+0000')
assert df.iloc[-1, :]['date'] == pd.Timestamp(f'{end}+0000')
if weekday:
# Weekday is only relevant for daily and weekly candles.
assert df.iloc[-1, :]['date'].day_name() == weekday
def test_ohlcv_fill_up_missing_data(testdatadir, caplog): def test_ohlcv_fill_up_missing_data(testdatadir, caplog):
@ -132,6 +176,45 @@ def test_ohlcv_fill_up_missing_data2(caplog):
f"{len(data)} - after: {len(data2)}.*", caplog) f"{len(data)} - after: {len(data2)}.*", caplog)
@pytest.mark.parametrize('timeframe', [
'1s', '1m', '5m', '15m', '1h', '2h', '4h', '8h', '12h', '1d', '7d', '1w', '1M', '3M', '1y'
])
def test_ohlcv_to_dataframe_multi(timeframe):
data = generate_test_data(timeframe, 180)
assert len(data) == 180
df = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT')
assert len(df) == len(data) - 1
df1 = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT', drop_incomplete=False)
assert len(df1) == len(data)
assert data.equals(df1)
data1 = data.copy()
if timeframe in ('1M', '3M', '1y'):
data1.loc[:, 'date'] = data1.loc[:, 'date'] + pd.to_timedelta('1w')
else:
# Shift by half a timeframe
data1.loc[:, 'date'] = data1.loc[:, 'date'] + (pd.to_timedelta(timeframe) / 2)
df2 = ohlcv_to_dataframe(data1, timeframe, 'UNITTEST/USDT')
assert len(df2) == len(data) - 1
tfs = timeframe_to_seconds(timeframe)
tfm = timeframe_to_minutes(timeframe)
if 1 <= tfm < 10000:
# minute based resampling does not work on timeframes >= 1 week
ohlcv_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'
}
dfs = data1.resample(f"{tfs}s", on='date').agg(ohlcv_dict).reset_index(drop=False)
dfm = data1.resample(f"{tfm}min", on='date').agg(ohlcv_dict).reset_index(drop=False)
assert dfs.equals(dfm)
assert dfs.equals(df1)
def test_ohlcv_to_dataframe_1M(): def test_ohlcv_to_dataframe_1M():
# Monthly ticks from 2019-09-01 to 2023-07-01 # Monthly ticks from 2019-09-01 to 2023-07-01

View File

@ -148,19 +148,25 @@ def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
def test_datahandler_ohlcv_data_min_max(testdatadir): def test_datahandler_ohlcv_data_min_max(testdatadir):
dh = JsonDataHandler(testdatadir) dh = JsonDataHandler(testdatadir)
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot') min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot')
assert len(min_max) == 2 assert len(min_max) == 3
# Empty pair # Empty pair
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot') min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot')
assert len(min_max) == 2 assert len(min_max) == 3
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
assert min_max[0] == min_max[1] assert min_max[0] == min_max[1]
# Empty pair2 # Empty pair2
min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '4m', 'spot') min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '41m', 'spot')
assert len(min_max) == 2 assert len(min_max) == 3
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc) assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
assert min_max[0] == min_max[1] assert min_max[0] == min_max[1]
# Existing pair ...
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '1m', 'spot')
assert len(min_max) == 3
assert min_max[0] == datetime(2017, 11, 4, 23, 2, tzinfo=timezone.utc)
assert min_max[1] == datetime(2017, 11, 14, 22, 59, tzinfo=timezone.utc)
def test_datahandler__check_empty_df(testdatadir, caplog): def test_datahandler__check_empty_df(testdatadir, caplog):
dh = JsonDataHandler(testdatadir) dh = JsonDataHandler(testdatadir)

View File

@ -194,7 +194,7 @@ def test_get_producer_df(default_conf):
assert la == empty_la assert la == empty_la
# non existent timeframe, empty dataframe # non existent timeframe, empty dataframe
datframe, la = dataprovider.get_producer_df(pair, timeframe='1h') _dataframe, la = dataprovider.get_producer_df(pair, timeframe='1h')
assert dataframe.empty assert dataframe.empty
assert la == empty_la assert la == empty_la

View File

@ -2844,10 +2844,17 @@ async def test__async_fetch_trades(default_conf, mocker, caplog, exchange_name,
exchange._api_async.fetch_trades = get_mock_coro(fetch_trades_result) exchange._api_async.fetch_trades = get_mock_coro(fetch_trades_result)
pair = 'ETH/BTC' pair = 'ETH/BTC'
res = await exchange._async_fetch_trades(pair, since=None, params=None) res, pagid = await exchange._async_fetch_trades(pair, since=None, params=None)
assert isinstance(res, list) assert isinstance(res, list)
assert isinstance(res[0], list) assert isinstance(res[0], list)
assert isinstance(res[1], list) assert isinstance(res[1], list)
if exchange._trades_pagination == 'id':
if exchange_name == 'kraken':
assert pagid == 1565798399872512133
else:
assert pagid == '126181333'
else:
assert pagid == 1565798399872
assert exchange._api_async.fetch_trades.call_count == 1 assert exchange._api_async.fetch_trades.call_count == 1
assert exchange._api_async.fetch_trades.call_args[0][0] == pair assert exchange._api_async.fetch_trades.call_args[0][0] == pair
@ -2856,11 +2863,20 @@ async def test__async_fetch_trades(default_conf, mocker, caplog, exchange_name,
assert log_has_re(f"Fetching trades for pair {pair}, since .*", caplog) assert log_has_re(f"Fetching trades for pair {pair}, since .*", caplog)
caplog.clear() caplog.clear()
exchange._api_async.fetch_trades.reset_mock() exchange._api_async.fetch_trades.reset_mock()
res = await exchange._async_fetch_trades(pair, since=None, params={'from': '123'}) res, pagid = await exchange._async_fetch_trades(pair, since=None, params={'from': '123'})
assert exchange._api_async.fetch_trades.call_count == 1 assert exchange._api_async.fetch_trades.call_count == 1
assert exchange._api_async.fetch_trades.call_args[0][0] == pair assert exchange._api_async.fetch_trades.call_args[0][0] == pair
assert exchange._api_async.fetch_trades.call_args[1]['limit'] == 1000 assert exchange._api_async.fetch_trades.call_args[1]['limit'] == 1000
assert exchange._api_async.fetch_trades.call_args[1]['params'] == {'from': '123'} assert exchange._api_async.fetch_trades.call_args[1]['params'] == {'from': '123'}
if exchange._trades_pagination == 'id':
if exchange_name == 'kraken':
assert pagid == 1565798399872512133
else:
assert pagid == '126181333'
else:
assert pagid == 1565798399872
assert log_has_re(f"Fetching trades for pair {pair}, params: .*", caplog) assert log_has_re(f"Fetching trades for pair {pair}, params: .*", caplog)
exchange.close() exchange.close()
@ -2915,8 +2931,9 @@ async def test__async_fetch_trades_contract_size(default_conf, mocker, caplog, e
) )
pair = 'ETH/USDT:USDT' pair = 'ETH/USDT:USDT'
res = await exchange._async_fetch_trades(pair, since=None, params=None) res, pagid = await exchange._async_fetch_trades(pair, since=None, params=None)
assert res[0][5] == 300 assert res[0][5] == 300
assert pagid is not None
exchange.close() exchange.close()
@ -2926,13 +2943,17 @@ async def test__async_get_trade_history_id(default_conf, mocker, exchange_name,
fetch_trades_result): fetch_trades_result):
exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name)
if exchange._trades_pagination != 'id':
exchange.close()
pytest.skip("Exchange does not support pagination by trade id")
pagination_arg = exchange._trades_pagination_arg pagination_arg = exchange._trades_pagination_arg
async def mock_get_trade_hist(pair, *args, **kwargs): async def mock_get_trade_hist(pair, *args, **kwargs):
if 'since' in kwargs: if 'since' in kwargs:
# Return first 3 # Return first 3
return fetch_trades_result[:-2] return fetch_trades_result[:-2]
elif kwargs.get('params', {}).get(pagination_arg) == fetch_trades_result[-3]['id']: elif kwargs.get('params', {}).get(pagination_arg) in (
fetch_trades_result[-3]['id'], 1565798399752):
# Return 2 # Return 2
return fetch_trades_result[-3:-1] return fetch_trades_result[-3:-1]
else: else:
@ -2948,6 +2969,7 @@ async def test__async_get_trade_history_id(default_conf, mocker, exchange_name,
assert isinstance(ret, tuple) assert isinstance(ret, tuple)
assert ret[0] == pair assert ret[0] == pair
assert isinstance(ret[1], list) assert isinstance(ret[1], list)
if exchange_name != 'kraken':
assert len(ret[1]) == len(fetch_trades_result) assert len(ret[1]) == len(fetch_trades_result)
assert exchange._api_async.fetch_trades.call_count == 3 assert exchange._api_async.fetch_trades.call_count == 3
fetch_trades_cal = exchange._api_async.fetch_trades.call_args_list fetch_trades_cal = exchange._api_async.fetch_trades.call_args_list
@ -2961,6 +2983,21 @@ async def test__async_get_trade_history_id(default_conf, mocker, exchange_name,
assert exchange._ft_has['trades_pagination_arg'] in fetch_trades_cal[1][1]['params'] assert exchange._ft_has['trades_pagination_arg'] in fetch_trades_cal[1][1]['params']
@pytest.mark.parametrize('trade_id, expected', [
('1234', True),
('170544369512007228', True),
('1705443695120072285', True),
('170544369512007228555', True),
])
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test__valid_trade_pagination_id(mocker, default_conf_usdt, exchange_name, trade_id, expected):
if exchange_name == 'kraken':
pytest.skip("Kraken has a different pagination id format, and an explicit test.")
exchange = get_patched_exchange(mocker, default_conf_usdt, id=exchange_name)
assert exchange._valid_trade_pagination_id('XRP/USDT', trade_id) == expected
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("exchange_name", EXCHANGES) @pytest.mark.parametrize("exchange_name", EXCHANGES)
async def test__async_get_trade_history_time(default_conf, mocker, caplog, exchange_name, async def test__async_get_trade_history_time(default_conf, mocker, caplog, exchange_name,
@ -2976,6 +3013,9 @@ async def test__async_get_trade_history_time(default_conf, mocker, caplog, excha
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name)
if exchange._trades_pagination != 'time':
exchange.close()
pytest.skip("Exchange does not support pagination by timestamp")
# Monkey-patch async function # Monkey-patch async function
exchange._api_async.fetch_trades = MagicMock(side_effect=mock_get_trade_hist) exchange._api_async.fetch_trades = MagicMock(side_effect=mock_get_trade_hist)
pair = 'ETH/BTC' pair = 'ETH/BTC'
@ -3008,9 +3048,9 @@ async def test__async_get_trade_history_time_empty(default_conf, mocker, caplog,
async def mock_get_trade_hist(pair, *args, **kwargs): async def mock_get_trade_hist(pair, *args, **kwargs):
if kwargs['since'] == trades_history[0][0]: if kwargs['since'] == trades_history[0][0]:
return trades_history[:-1] return trades_history[:-1], trades_history[:-1][-1][0]
else: else:
return [] return [], None
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
exchange = get_patched_exchange(mocker, default_conf, id=exchange_name) exchange = get_patched_exchange(mocker, default_conf, id=exchange_name)
@ -5312,3 +5352,4 @@ def test_price_to_precision_with_default_conf(default_conf, mocker):
patched_ex = get_patched_exchange(mocker, conf) patched_ex = get_patched_exchange(mocker, conf)
prec_price = patched_ex.price_to_precision("XRP/USDT", 1.0000000101) prec_price = patched_ex.price_to_precision("XRP/USDT", 1.0000000101)
assert prec_price == 1.00000001 assert prec_price == 1.00000001
assert prec_price == 1.00000001

View File

@ -10,7 +10,7 @@ from freqtrade.exceptions import OperationalException
from freqtrade.exchange import (amount_to_contract_precision, amount_to_precision, from freqtrade.exchange import (amount_to_contract_precision, amount_to_precision,
date_minus_candles, price_to_precision, timeframe_to_minutes, date_minus_candles, price_to_precision, timeframe_to_minutes,
timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date,
timeframe_to_seconds) timeframe_to_resample_freq, timeframe_to_seconds)
from freqtrade.exchange.check_exchange import check_exchange from freqtrade.exchange.check_exchange import check_exchange
from tests.conftest import log_has_re from tests.conftest import log_has_re
@ -124,6 +124,21 @@ def test_timeframe_to_msecs():
assert timeframe_to_msecs("1d") == 86400000 assert timeframe_to_msecs("1d") == 86400000
@pytest.mark.parametrize("timeframe,expected", [
("1s", '1s'),
("15s", '15s'),
("5m", '300s'),
("10m", '600s'),
("1h", '3600s'),
("1d", '86400s'),
("1w", '1W-MON'),
("1M", '1MS'),
("1y", '1YS'),
])
def test_timeframe_to_resample_freq(timeframe, expected):
assert timeframe_to_resample_freq(timeframe) == expected
def test_timeframe_to_prev_date(): def test_timeframe_to_prev_date():
# 2019-08-12 13:22:08 # 2019-08-12 13:22:08
date = datetime.fromtimestamp(1565616128, tz=timezone.utc) date = datetime.fromtimestamp(1565616128, tz=timezone.utc)

View File

@ -14,7 +14,7 @@ from tests.exchange.test_exchange import ccxt_exceptionhandlers
(0.99, 220 * 0.99, "sell"), (0.99, 220 * 0.99, "sell"),
(0.98, 220 * 0.98, "sell"), (0.98, 220 * 0.98, "sell"),
]) ])
def test_create_stoploss_order_huobi(default_conf, mocker, limitratio, expected, side): def test_create_stoploss_order_htx(default_conf, mocker, limitratio, expected, side):
api_mock = MagicMock() api_mock = MagicMock()
order_id = f'test_prod_buy_{randint(0, 10 ** 6)}' order_id = f'test_prod_buy_{randint(0, 10 ** 6)}'
order_type = 'stop-limit' order_type = 'stop-limit'
@ -29,7 +29,7 @@ def test_create_stoploss_order_huobi(default_conf, mocker, limitratio, expected,
mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y)
mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y)
exchange = get_patched_exchange(mocker, default_conf, api_mock, 'huobi') exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx')
with pytest.raises(InvalidOrderException): with pytest.raises(InvalidOrderException):
order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190,
@ -58,7 +58,7 @@ def test_create_stoploss_order_huobi(default_conf, mocker, limitratio, expected,
# test exception handling # test exception handling
with pytest.raises(DependencyException): with pytest.raises(DependencyException):
api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance")) api_mock.create_order = MagicMock(side_effect=ccxt.InsufficientFunds("0 balance"))
exchange = get_patched_exchange(mocker, default_conf, api_mock, 'huobi') exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx')
exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220,
order_types={}, side=side, leverage=1.0) order_types={}, side=side, leverage=1.0)
@ -69,20 +69,20 @@ def test_create_stoploss_order_huobi(default_conf, mocker, limitratio, expected,
exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220, exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=220,
order_types={}, side=side, leverage=1.0) order_types={}, side=side, leverage=1.0)
ccxt_exceptionhandlers(mocker, default_conf, api_mock, "huobi", ccxt_exceptionhandlers(mocker, default_conf, api_mock, "htx",
"create_stoploss", "create_order", retries=1, "create_stoploss", "create_order", retries=1,
pair='ETH/BTC', amount=1, stop_price=220, order_types={}, pair='ETH/BTC', amount=1, stop_price=220, order_types={},
side=side, leverage=1.0) side=side, leverage=1.0)
def test_create_stoploss_order_dry_run_huobi(default_conf, mocker): def test_create_stoploss_order_dry_run_htx(default_conf, mocker):
api_mock = MagicMock() api_mock = MagicMock()
order_type = 'stop-limit' order_type = 'stop-limit'
default_conf['dry_run'] = True default_conf['dry_run'] = True
mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y) mocker.patch(f'{EXMS}.amount_to_precision', lambda s, x, y: y)
mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y) mocker.patch(f'{EXMS}.price_to_precision', lambda s, x, y, **kwargs: y)
exchange = get_patched_exchange(mocker, default_conf, api_mock, 'huobi') exchange = get_patched_exchange(mocker, default_conf, api_mock, 'htx')
with pytest.raises(InvalidOrderException): with pytest.raises(InvalidOrderException):
order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190, order = exchange.create_stoploss(pair='ETH/BTC', amount=1, stop_price=190,
@ -103,8 +103,8 @@ def test_create_stoploss_order_dry_run_huobi(default_conf, mocker):
assert order['amount'] == 1 assert order['amount'] == 1
def test_stoploss_adjust_huobi(mocker, default_conf): def test_stoploss_adjust_htx(mocker, default_conf):
exchange = get_patched_exchange(mocker, default_conf, id='huobi') exchange = get_patched_exchange(mocker, default_conf, id='htx')
order = { order = {
'type': 'stop', 'type': 'stop',
'price': 1500, 'price': 1500,

View File

@ -183,19 +183,17 @@ def test_create_stoploss_order_kraken(default_conf, mocker, ordertype, side, adj
assert 'info' in order assert 'info' in order
assert order['id'] == order_id assert order['id'] == order_id
assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC' assert api_mock.create_order.call_args_list[0][1]['symbol'] == 'ETH/BTC'
if ordertype == 'limit': assert api_mock.create_order.call_args_list[0][1]['type'] == ordertype
assert api_mock.create_order.call_args_list[0][1]['type'] == STOPLOSS_LIMIT_ORDERTYPE
assert api_mock.create_order.call_args_list[0][1]['params'] == { assert api_mock.create_order.call_args_list[0][1]['params'] == {
'trading_agreement': 'agree', 'trading_agreement': 'agree',
'price2': adjustedprice 'stopLossPrice': 220
} }
else:
assert api_mock.create_order.call_args_list[0][1]['type'] == STOPLOSS_ORDERTYPE
assert api_mock.create_order.call_args_list[0][1]['params'] == {
'trading_agreement': 'agree'}
assert api_mock.create_order.call_args_list[0][1]['side'] == side assert api_mock.create_order.call_args_list[0][1]['side'] == side
assert api_mock.create_order.call_args_list[0][1]['amount'] == 1 assert api_mock.create_order.call_args_list[0][1]['amount'] == 1
assert api_mock.create_order.call_args_list[0][1]['price'] == 220 if ordertype == 'limit':
assert api_mock.create_order.call_args_list[0][1]['price'] == adjustedprice
else:
assert api_mock.create_order.call_args_list[0][1]['price'] is None
# test exception handling # test exception handling
with pytest.raises(DependencyException): with pytest.raises(DependencyException):
@ -253,7 +251,7 @@ def test_create_stoploss_order_dry_run_kraken(default_conf, mocker, side):
assert 'info' in order assert 'info' in order
assert 'type' in order assert 'type' in order
assert order['type'] == STOPLOSS_ORDERTYPE assert order['type'] == 'market'
assert order['price'] == 220 assert order['price'] == 220
assert order['amount'] == 1 assert order['amount'] == 1
@ -265,11 +263,22 @@ def test_create_stoploss_order_dry_run_kraken(default_conf, mocker, side):
def test_stoploss_adjust_kraken(mocker, default_conf, sl1, sl2, sl3, side): def test_stoploss_adjust_kraken(mocker, default_conf, sl1, sl2, sl3, side):
exchange = get_patched_exchange(mocker, default_conf, id='kraken') exchange = get_patched_exchange(mocker, default_conf, id='kraken')
order = { order = {
'type': STOPLOSS_ORDERTYPE, 'type': 'market',
'price': 1500, 'stopLossPrice': 1500,
} }
assert exchange.stoploss_adjust(sl1, order, side=side) assert exchange.stoploss_adjust(sl1, order, side=side)
assert not exchange.stoploss_adjust(sl2, order, side=side) assert not exchange.stoploss_adjust(sl2, order, side=side)
# Test with invalid order case ... # diff. order type ...
order['type'] = 'stop_loss_limit' order['type'] = 'limit'
assert not exchange.stoploss_adjust(sl3, order, side=side) assert exchange.stoploss_adjust(sl3, order, side=side)
@pytest.mark.parametrize('trade_id, expected', [
('1234', False),
('170544369512007228', False),
('1705443695120072285', True),
('170544369512007228555', True),
])
def test__valid_trade_pagination_id_kraken(mocker, default_conf_usdt, trade_id, expected):
exchange = get_patched_exchange(mocker, default_conf_usdt, id='kraken')
assert exchange._valid_trade_pagination_id('XRP/USDT', trade_id) == expected

View File

@ -247,7 +247,7 @@ EXCHANGES = {
'timeframe': '1h', 'timeframe': '1h',
'orderbook_max_entries': 50, 'orderbook_max_entries': 50,
}, },
'huobi': { 'htx': {
'pair': 'ETH/BTC', 'pair': 'ETH/BTC',
'stake_currency': 'BTC', 'stake_currency': 'BTC',
'hasQuoteVolume': True, 'hasQuoteVolume': True,

View File

@ -1,4 +1,5 @@
import platform import platform
import sys
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
from typing import Any, Dict from typing import Any, Dict
@ -15,6 +16,10 @@ from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
from tests.conftest import get_patched_exchange from tests.conftest import get_patched_exchange
def is_py12() -> bool:
return sys.version_info >= (3, 12)
def is_mac() -> bool: def is_mac() -> bool:
machine = platform.system() machine = platform.system()
return "Darwin" in machine return "Darwin" in machine
@ -31,7 +36,7 @@ def patch_torch_initlogs(mocker) -> None:
module_name = 'torch' module_name = 'torch'
mocked_module = types.ModuleType(module_name) mocked_module = types.ModuleType(module_name)
sys.modules[module_name] = mocked_module sys.modules[module_name] = mocked_module
else: elif not is_py12():
mocker.patch("torch._logging._init_logs") mocker.patch("torch._logging._init_logs")

View File

@ -76,7 +76,7 @@ def test_filter_features(mocker, freqai_conf):
freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf) freqai, unfiltered_dataframe = make_unfiltered_dataframe(mocker, freqai_conf)
freqai.dk.find_features(unfiltered_dataframe) freqai.dk.find_features(unfiltered_dataframe)
filtered_df, labels = freqai.dk.filter_features( filtered_df, _labels = freqai.dk.filter_features(
unfiltered_dataframe, unfiltered_dataframe,
freqai.dk.training_features_list, freqai.dk.training_features_list,
freqai.dk.label_list, freqai.dk.label_list,

View File

@ -1,7 +1,6 @@
import logging import logging
import platform import platform
import shutil import shutil
import sys
from pathlib import Path from pathlib import Path
from unittest.mock import MagicMock from unittest.mock import MagicMock
@ -16,24 +15,24 @@ from freqtrade.optimize.backtesting import Backtesting
from freqtrade.persistence import Trade from freqtrade.persistence import Trade
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re
from tests.freqai.conftest import (get_patched_freqai_strategy, is_mac, make_rl_config, from tests.freqai.conftest import (get_patched_freqai_strategy, is_mac, is_py12, make_rl_config,
mock_pytorch_mlp_model_training_parameters) mock_pytorch_mlp_model_training_parameters)
def is_py12() -> bool:
return sys.version_info >= (3, 12)
def is_arm() -> bool: def is_arm() -> bool:
machine = platform.machine() machine = platform.machine()
return "arm" in machine or "aarch64" in machine return "arm" in machine or "aarch64" in machine
def can_run_model(model: str) -> None: def can_run_model(model: str) -> None:
is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model
if is_py12() and ("Catboost" in model or is_pytorch_model):
pytest.skip("Model not supported on python 3.12 yet.")
if is_arm() and "Catboost" in model: if is_arm() and "Catboost" in model:
pytest.skip("CatBoost is not supported on ARM.") pytest.skip("CatBoost is not supported on ARM.")
is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model
if is_pytorch_model and is_mac() and not is_arm(): if is_pytorch_model and is_mac() and not is_arm():
pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.") pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.")

View File

@ -734,7 +734,7 @@ def test_backtest_one(default_conf, fee, mocker, testdatadir) -> None:
'min_rate': [0.10370188, 0.10300000000000001], 'min_rate': [0.10370188, 0.10300000000000001],
'max_rate': [0.10501, 0.1038888], 'max_rate': [0.10501, 0.1038888],
'is_open': [False, False], 'is_open': [False, False],
'enter_tag': [None, None], 'enter_tag': ['', ''],
"leverage": [1.0, 1.0], "leverage": [1.0, 1.0],
"is_short": [False, False], "is_short": [False, False],
'open_timestamp': [1517251200000, 1517283000000], 'open_timestamp': [1517251200000, 1517283000000],

View File

@ -72,7 +72,7 @@ def test_backtest_position_adjustment(default_conf, fee, mocker, testdatadir) ->
'min_rate': [0.10370188, 0.10300000000000001], 'min_rate': [0.10370188, 0.10300000000000001],
'max_rate': [0.10481985, 0.1038888], 'max_rate': [0.10481985, 0.1038888],
'is_open': [False, False], 'is_open': [False, False],
'enter_tag': [None, None], 'enter_tag': ['', ''],
'leverage': [1.0, 1.0], 'leverage': [1.0, 1.0],
'is_short': [False, False], 'is_short': [False, False],
'open_timestamp': [1517251200000, 1517283000000], 'open_timestamp': [1517251200000, 1517283000000],

View File

@ -254,7 +254,7 @@ def test_log_results_if_loss_improves(hyperopt, capsys) -> None:
'is_best': True 'is_best': True
} }
) )
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert all(x in out assert all(x in out
for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"]) for x in ["Best", "2/2", " 1", "0.10%", "0.00100000 BTC (1.00%)", "00:20:00"])
@ -333,7 +333,7 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
# Should be called for historical candle data # Should be called for historical candle data
assert dumper.call_count == 1 assert dumper.call_count == 1
@ -577,7 +577,7 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
result_str = ( result_str = (
'{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi"' '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi"'
':{},"stoploss":null,"trailing_stop":null,"max_open_trades":null}' ':{},"stoploss":null,"trailing_stop":null,"max_open_trades":null}'
@ -624,7 +624,7 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501 assert '{"params":{"mfi-value":null,"sell-mfi-value":null},"minimal_roi":{},"stoploss":null}' in out # noqa: E501
# Should be called for historical candle data # Should be called for historical candle data
assert dumper.call_count == 1 assert dumper.call_count == 1
@ -666,7 +666,7 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert '{"minimal_roi":{},"stoploss":null}' in out assert '{"minimal_roi":{},"stoploss":null}' in out
assert dumper.call_count == 1 assert dumper.call_count == 1
@ -704,7 +704,7 @@ def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> Non
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.call_count == 1 assert dumper.call_count == 1
assert dumper2.call_count == 1 assert dumper2.call_count == 1
@ -777,7 +777,7 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
assert dumper.call_count == 1 assert dumper.call_count == 1
@ -819,7 +819,7 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
parallel.assert_called_once() parallel.assert_called_once()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out assert 'Best result:\n\n* 1/1: foo result Objective: 1.00000\n' in out
assert dumper.called assert dumper.called
assert dumper.call_count == 1 assert dumper.call_count == 1
@ -1051,7 +1051,7 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N
hyperopt.start() hyperopt.start()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'max_open_trades = -1' in out assert 'max_open_trades = -1' in out
assert 'max_open_trades = inf' not in out assert 'max_open_trades = inf' not in out
@ -1070,7 +1070,7 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N
hyperopt.start() hyperopt.start()
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert '"max_open_trades":-1' in out assert '"max_open_trades":-1' in out

View File

@ -143,7 +143,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
instance = LookaheadAnalysis(lookahead_conf, strategy_obj) instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
instance.current_analysis = analysis instance.current_analysis = analysis
table, headers, data = (LookaheadAnalysisSubFunctions. _table, _headers, data = (LookaheadAnalysisSubFunctions.
text_table_lookahead_analysis_instances(lookahead_conf, [instance])) text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
# check row contents for a try that has too few signals # check row contents for a try that has too few signals
@ -158,13 +158,13 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
analysis.false_exit_signals = 10 analysis.false_exit_signals = 10
instance = LookaheadAnalysis(lookahead_conf, strategy_obj) instance = LookaheadAnalysis(lookahead_conf, strategy_obj)
instance.current_analysis = analysis instance.current_analysis = analysis
table, headers, data = (LookaheadAnalysisSubFunctions. _table, _headers, data = (LookaheadAnalysisSubFunctions.
text_table_lookahead_analysis_instances(lookahead_conf, [instance])) text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
assert data[0][2].__contains__("error") assert data[0][2].__contains__("error")
# edit it into not showing an error # edit it into not showing an error
instance.failed_bias_check = False instance.failed_bias_check = False
table, headers, data = (LookaheadAnalysisSubFunctions. _table, _headers, data = (LookaheadAnalysisSubFunctions.
text_table_lookahead_analysis_instances(lookahead_conf, [instance])) text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py' assert data[0][0] == 'strategy_test_v3_with_lookahead_bias.py'
assert data[0][1] == 'strategy_test_v3_with_lookahead_bias' assert data[0][1] == 'strategy_test_v3_with_lookahead_bias'
@ -176,7 +176,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
analysis.false_indicators.append('falseIndicator1') analysis.false_indicators.append('falseIndicator1')
analysis.false_indicators.append('falseIndicator2') analysis.false_indicators.append('falseIndicator2')
table, headers, data = (LookaheadAnalysisSubFunctions. _table, _headers, data = (LookaheadAnalysisSubFunctions.
text_table_lookahead_analysis_instances(lookahead_conf, [instance])) text_table_lookahead_analysis_instances(lookahead_conf, [instance]))
assert data[0][6] == 'falseIndicator1, falseIndicator2' assert data[0][6] == 'falseIndicator1, falseIndicator2'
@ -185,7 +185,7 @@ def test_lookahead_helper_text_table_lookahead_analysis_instances(lookahead_conf
assert len(data) == 1 assert len(data) == 1
# check amount of multiple rows # check amount of multiple rows
table, headers, data = (LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances( _table, _headers, data = (LookaheadAnalysisSubFunctions.text_table_lookahead_analysis_instances(
lookahead_conf, [instance, instance, instance])) lookahead_conf, [instance, instance, instance]))
assert len(data) == 3 assert len(data) == 3

View File

@ -513,7 +513,7 @@ def test_show_sorted_pairlist(testdatadir, default_conf, capsys):
show_sorted_pairlist(default_conf, bt_data) show_sorted_pairlist(default_conf, bt_data)
out, err = capsys.readouterr() out, _err = capsys.readouterr()
assert 'Pairs for Strategy StrategyTestV3: \n[' in out assert 'Pairs for Strategy StrategyTestV3: \n[' in out
assert 'TOTAL' not in out assert 'TOTAL' not in out
assert '"ETH/BTC", // ' in out assert '"ETH/BTC", // ' in out

View File

@ -107,7 +107,7 @@ def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf
instance = RecursiveAnalysis(recursive_conf, strategy_obj) instance = RecursiveAnalysis(recursive_conf, strategy_obj)
instance.dict_recursive = dict_diff instance.dict_recursive = dict_diff
table, headers, data = (RecursiveAnalysisSubFunctions. _table, _headers, data = (RecursiveAnalysisSubFunctions.
text_table_recursive_analysis_instances([instance])) text_table_recursive_analysis_instances([instance]))
# check row contents for a try that has too few signals # check row contents for a try that has too few signals
@ -119,7 +119,7 @@ def test_recursive_helper_text_table_recursive_analysis_instances(recursive_conf
dict_diff = dict() dict_diff = dict()
instance = RecursiveAnalysis(recursive_conf, strategy_obj) instance = RecursiveAnalysis(recursive_conf, strategy_obj)
instance.dict_recursive = dict_diff instance.dict_recursive = dict_diff
table, headers, data = (RecursiveAnalysisSubFunctions. _table, _headers, data = (RecursiveAnalysisSubFunctions.
text_table_recursive_analysis_instances([instance])) text_table_recursive_analysis_instances([instance]))
assert len(data) == 0 assert len(data) == 0

View File

@ -0,0 +1,24 @@
import pytest
from freqtrade.persistence import FtNoDBContext, PairLocks, Trade
@pytest.mark.parametrize('timeframe', ['', '5m', '1d'])
def test_FtNoDBContext(timeframe):
PairLocks.timeframe = ''
assert Trade.use_db is True
assert PairLocks.use_db is True
assert PairLocks.timeframe == ''
with FtNoDBContext(timeframe):
assert Trade.use_db is False
assert PairLocks.use_db is False
assert PairLocks.timeframe == timeframe
with FtNoDBContext():
assert Trade.use_db is False
assert PairLocks.use_db is False
assert PairLocks.timeframe == ''
assert Trade.use_db is True
assert PairLocks.use_db is True

View File

@ -108,7 +108,7 @@ def test_fetch_pairlist_timeout_keep_last_pairlist(mocker, rpl_config, caplog):
remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"] remote_pairlist._last_pairlist = ["BTC/USDT", "ETH/USDT", "LTC/USDT"]
pairs, time_elapsed = remote_pairlist.fetch_pairlist() pairs, _time_elapsed = remote_pairlist.fetch_pairlist()
assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url}", caplog) assert log_has(f"Was not able to fetch pairlist from: {remote_pairlist._pairlist_url}", caplog)
assert log_has("Keeping last fetched pairlist", caplog) assert log_has("Keeping last fetched pairlist", caplog)
assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"] assert pairs == ["BTC/USDT", "ETH/USDT", "LTC/USDT"]
@ -281,7 +281,7 @@ def test_remote_pairlist_blacklist(mocker, rpl_config, caplog, markets, tickers)
remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config,
rpl_config["pairlists"][1], 1) rpl_config["pairlists"][1], 1)
pairs, time_elapsed = remote_pairlist.fetch_pairlist() pairs, _time_elapsed = remote_pairlist.fetch_pairlist()
assert pairs == ["XRP/USDT"] assert pairs == ["XRP/USDT"]
@ -334,7 +334,7 @@ def test_remote_pairlist_whitelist(mocker, rpl_config, processing_mode, markets,
remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config, remote_pairlist = RemotePairList(exchange, pairlistmanager, rpl_config,
rpl_config["pairlists"][1], 1) rpl_config["pairlists"][1], 1)
pairs, time_elapsed = remote_pairlist.fetch_pairlist() pairs, _time_elapsed = remote_pairlist.fetch_pairlist()
assert pairs == ["XRP/USDT"] assert pairs == ["XRP/USDT"]

View File

@ -112,7 +112,7 @@ def assert_response(response, expected_code=200, needs_cors=True):
def test_api_not_found(botclient): def test_api_not_found(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/invalid_url") rc = client_get(client, f"{BASE_URI}/invalid_url")
assert_response(rc, 404) assert_response(rc, 404)
@ -120,7 +120,7 @@ def test_api_not_found(botclient):
def test_api_ui_fallback(botclient, mocker): def test_api_ui_fallback(botclient, mocker):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, "/favicon.ico") rc = client_get(client, "/favicon.ico")
assert rc.status_code == 200 assert rc.status_code == 200
@ -150,7 +150,7 @@ def test_api_ui_fallback(botclient, mocker):
def test_api_ui_version(botclient, mocker): def test_api_ui_version(botclient, mocker):
ftbot, client = botclient _ftbot, client = botclient
mocker.patch('freqtrade.commands.deploy_commands.read_ui_version', return_value='0.1.2') mocker.patch('freqtrade.commands.deploy_commands.read_ui_version', return_value='0.1.2')
rc = client_get(client, "/ui_version") rc = client_get(client, "/ui_version")
@ -230,7 +230,7 @@ def test_api_unauthorized(botclient):
def test_api_token_login(botclient): def test_api_token_login(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client.post(f"{BASE_URI}/token/login", rc = client.post(f"{BASE_URI}/token/login",
data=None, data=None,
headers={'Authorization': _basic_auth_str('WRONG_USER', 'WRONG_PASS'), headers={'Authorization': _basic_auth_str('WRONG_USER', 'WRONG_PASS'),
@ -249,7 +249,7 @@ def test_api_token_login(botclient):
def test_api_token_refresh(botclient): def test_api_token_refresh(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_post(client, f"{BASE_URI}/token/login") rc = client_post(client, f"{BASE_URI}/token/login")
assert_response(rc) assert_response(rc)
rc = client.post(f"{BASE_URI}/token/refresh", rc = client.post(f"{BASE_URI}/token/refresh",
@ -541,7 +541,7 @@ def test_api_count(botclient, mocker, ticker, fee, markets, is_short):
def test_api_locks(botclient): def test_api_locks(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/locks") rc = client_get(client, f"{BASE_URI}/locks")
assert_response(rc) assert_response(rc)
@ -821,7 +821,7 @@ def test_api_trade_reload_trade(botclient, mocker, fee, markets, ticker, is_shor
def test_api_logs(botclient): def test_api_logs(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/logs") rc = client_get(client, f"{BASE_URI}/logs")
assert_response(rc) assert_response(rc)
assert len(rc.json()) == 2 assert len(rc.json()) == 2
@ -1228,7 +1228,7 @@ def test_api_status(botclient, mocker, ticker, fee, markets, is_short,
def test_api_version(botclient): def test_api_version(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/version") rc = client_get(client, f"{BASE_URI}/version")
assert_response(rc) assert_response(rc)
@ -1236,7 +1236,7 @@ def test_api_version(botclient):
def test_api_blacklist(botclient, mocker): def test_api_blacklist(botclient, mocker):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/blacklist") rc = client_get(client, f"{BASE_URI}/blacklist")
assert_response(rc) assert_response(rc)
@ -1303,7 +1303,7 @@ def test_api_blacklist(botclient, mocker):
def test_api_whitelist(botclient): def test_api_whitelist(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/whitelist") rc = client_get(client, f"{BASE_URI}/whitelist")
assert_response(rc) assert_response(rc)
@ -1558,7 +1558,7 @@ def test_api_pair_candles(botclient, ohlcv_history):
def test_api_pair_history(botclient, mocker): def test_api_pair_history(botclient, mocker):
ftbot, client = botclient _ftbot, client = botclient
timeframe = '5m' timeframe = '5m'
lfm = mocker.patch('freqtrade.strategy.interface.IStrategy.load_freqAI_model') lfm = mocker.patch('freqtrade.strategy.interface.IStrategy.load_freqAI_model')
# No pair # No pair
@ -1603,9 +1603,9 @@ def test_api_pair_history(botclient, mocker):
assert 'data' in result assert 'data' in result
data = result['data'] data = result['data']
assert len(data) == 289 assert len(data) == 289
# analyed DF has 28 columns # analyed DF has 30 columns
assert len(result['columns']) == 28 assert len(result['columns']) == 30
assert len(data[0]) == 28 assert len(data[0]) == 30
date_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'date'][0] date_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'date'][0]
rsi_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'rsi'][0] rsi_col_idx = [idx for idx, c in enumerate(result['columns']) if c == 'rsi'][0]
@ -1698,7 +1698,7 @@ def test_api_strategies(botclient, tmp_path):
def test_api_strategy(botclient): def test_api_strategy(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/strategy/{CURRENT_TEST_STRATEGY}") rc = client_get(client, f"{BASE_URI}/strategy/{CURRENT_TEST_STRATEGY}")
@ -1717,7 +1717,7 @@ def test_api_strategy(botclient):
def test_api_exchanges(botclient): def test_api_exchanges(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/exchanges") rc = client_get(client, f"{BASE_URI}/exchanges")
assert_response(rc) assert_response(rc)
@ -1954,7 +1954,7 @@ def test_list_available_pairs(botclient):
def test_sysinfo(botclient): def test_sysinfo(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/sysinfo") rc = client_get(client, f"{BASE_URI}/sysinfo")
assert_response(rc) assert_response(rc)
@ -2234,7 +2234,7 @@ def test_api_patch_backtest_history_entry(botclient, tmp_path: Path):
def test_health(botclient): def test_health(botclient):
ftbot, client = botclient _ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/health") rc = client_get(client, f"{BASE_URI}/health")
@ -2245,7 +2245,7 @@ def test_health(botclient):
def test_api_ws_subscribe(botclient, mocker): def test_api_ws_subscribe(botclient, mocker):
ftbot, client = botclient _ftbot, client = botclient
ws_url = f"/api/v1/message/ws?token={_TEST_WS_TOKEN}" ws_url = f"/api/v1/message/ws?token={_TEST_WS_TOKEN}"
sub_mock = mocker.patch('freqtrade.rpc.api_server.ws.WebSocketChannel.set_subscriptions') sub_mock = mocker.patch('freqtrade.rpc.api_server.ws.WebSocketChannel.set_subscriptions')
@ -2268,7 +2268,7 @@ def test_api_ws_subscribe(botclient, mocker):
def test_api_ws_requests(botclient, caplog): def test_api_ws_requests(botclient, caplog):
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
ftbot, client = botclient _ftbot, client = botclient
ws_url = f"/api/v1/message/ws?token={_TEST_WS_TOKEN}" ws_url = f"/api/v1/message/ws?token={_TEST_WS_TOKEN}"
# Test whitelist request # Test whitelist request

View File

@ -2,7 +2,6 @@
Unit test file for rpc/external_message_consumer.py Unit test file for rpc/external_message_consumer.py
""" """
import asyncio import asyncio
import functools
import logging import logging
from datetime import datetime, timezone from datetime import datetime, timezone
from unittest.mock import MagicMock from unittest.mock import MagicMock
@ -302,19 +301,16 @@ async def test_emc_receive_messages_valid(default_conf, caplog, mocker):
dp = DataProvider(default_conf, None, None, None) dp = DataProvider(default_conf, None, None, None)
emc = ExternalMessageConsumer(default_conf, dp) emc = ExternalMessageConsumer(default_conf, dp)
loop = asyncio.get_event_loop()
def change_running(emc): emc._running = not emc._running
class TestChannel: class TestChannel:
async def recv(self, *args, **kwargs): async def recv(self, *args, **kwargs):
emc._running = False
return {"type": "whitelist", "data": ["BTC/USDT"]} return {"type": "whitelist", "data": ["BTC/USDT"]}
async def ping(self, *args, **kwargs): async def ping(self, *args, **kwargs):
return asyncio.Future() return asyncio.Future()
try: try:
change_running(emc) emc._running = True
loop.call_soon(functools.partial(change_running, emc=emc))
await emc._receive_messages(TestChannel(), test_producer, lock) await emc._receive_messages(TestChannel(), test_producer, lock)
assert log_has_re(r"Received message of type `whitelist`.+", caplog) assert log_has_re(r"Received message of type `whitelist`.+", caplog)
@ -349,19 +345,16 @@ async def test_emc_receive_messages_invalid(default_conf, caplog, mocker):
dp = DataProvider(default_conf, None, None, None) dp = DataProvider(default_conf, None, None, None)
emc = ExternalMessageConsumer(default_conf, dp) emc = ExternalMessageConsumer(default_conf, dp)
loop = asyncio.get_event_loop()
def change_running(emc): emc._running = not emc._running
class TestChannel: class TestChannel:
async def recv(self, *args, **kwargs): async def recv(self, *args, **kwargs):
emc._running = False
return {"type": ["BTC/USDT"]} return {"type": ["BTC/USDT"]}
async def ping(self, *args, **kwargs): async def ping(self, *args, **kwargs):
return asyncio.Future() return asyncio.Future()
try: try:
change_running(emc) emc._running = True
loop.call_soon(functools.partial(change_running, emc=emc))
await emc._receive_messages(TestChannel(), test_producer, lock) await emc._receive_messages(TestChannel(), test_producer, lock)
assert log_has_re(r"Invalid message from.+", caplog) assert log_has_re(r"Invalid message from.+", caplog)
@ -396,8 +389,8 @@ async def test_emc_receive_messages_timeout(default_conf, caplog, mocker):
dp = DataProvider(default_conf, None, None, None) dp = DataProvider(default_conf, None, None, None)
emc = ExternalMessageConsumer(default_conf, dp) emc = ExternalMessageConsumer(default_conf, dp)
loop = asyncio.get_event_loop() def change_running():
def change_running(emc): emc._running = not emc._running emc._running = not emc._running
class TestChannel: class TestChannel:
async def recv(self, *args, **kwargs): async def recv(self, *args, **kwargs):
@ -407,8 +400,7 @@ async def test_emc_receive_messages_timeout(default_conf, caplog, mocker):
return asyncio.Future() return asyncio.Future()
try: try:
change_running(emc) change_running()
loop.call_soon(functools.partial(change_running, emc=emc))
with pytest.raises(asyncio.TimeoutError): with pytest.raises(asyncio.TimeoutError):
await emc._receive_messages(TestChannel(), test_producer, lock) await emc._receive_messages(TestChannel(), test_producer, lock)
@ -447,19 +439,16 @@ async def test_emc_receive_messages_handle_error(default_conf, caplog, mocker):
emc.handle_producer_message = MagicMock(side_effect=Exception) emc.handle_producer_message = MagicMock(side_effect=Exception)
loop = asyncio.get_event_loop()
def change_running(emc): emc._running = not emc._running
class TestChannel: class TestChannel:
async def recv(self, *args, **kwargs): async def recv(self, *args, **kwargs):
emc._running = False
return {"type": "whitelist", "data": ["BTC/USDT"]} return {"type": "whitelist", "data": ["BTC/USDT"]}
async def ping(self, *args, **kwargs): async def ping(self, *args, **kwargs):
return asyncio.Future() return asyncio.Future()
try: try:
change_running(emc) emc._running = True
loop.call_soon(functools.partial(change_running, emc=emc))
await emc._receive_messages(TestChannel(), test_producer, lock) await emc._receive_messages(TestChannel(), test_producer, lock)
assert log_has_re(r"Error handling producer message.+", caplog) assert log_has_re(r"Error handling producer message.+", caplog)

View File

@ -599,7 +599,7 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time
get_fee=fee, get_fee=fee,
) )
telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt) telegram, _freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt)
# Move date to within day # Move date to within day
time_machine.move_to('2022-06-11 08:00:00+00:00') time_machine.move_to('2022-06-11 08:00:00+00:00')
@ -1480,7 +1480,7 @@ async def test_telegram_performance_handle(default_conf_usdt, update, ticker, fe
fetch_ticker=ticker, fetch_ticker=ticker,
get_fee=fee, get_fee=fee,
) )
telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt) telegram, _freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf_usdt)
# Create some test data # Create some test data
create_mock_trades_usdt(fee) create_mock_trades_usdt(fee)
@ -1655,7 +1655,7 @@ async def test_telegram_lock_handle(default_conf, update, ticker, fee, mocker) -
async def test_whitelist_static(default_conf, update, mocker) -> None: async def test_whitelist_static(default_conf, update, mocker) -> None:
telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf) telegram, _freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)
await telegram._whitelist(update=update, context=MagicMock()) await telegram._whitelist(update=update, context=MagicMock())
assert msg_mock.call_count == 1 assert msg_mock.call_count == 1
@ -2647,7 +2647,7 @@ async def test__send_msg_keyboard(default_conf, mocker, caplog) -> None:
async def test_change_market_direction(default_conf, mocker, update) -> None: async def test_change_market_direction(default_conf, mocker, update) -> None:
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf) telegram, _, _msg_mock = get_telegram_testobject(mocker, default_conf)
assert telegram._rpc._freqtrade.strategy.market_direction == MarketDirection.NONE assert telegram._rpc._freqtrade.strategy.market_direction == MarketDirection.NONE
context = MagicMock() context = MagicMock()
context.args = ["long"] context.args = ["long"]

View File

@ -152,7 +152,7 @@ class StrategyTestV3(IStrategy):
( (
qtpylib.crossed_below(dataframe['rsi'], self.sell_rsi.value) qtpylib.crossed_below(dataframe['rsi'], self.sell_rsi.value)
), ),
'enter_short'] = 1 ('enter_short', 'enter_tag')] = (1, 'short_Tag')
return dataframe return dataframe
@ -176,7 +176,7 @@ class StrategyTestV3(IStrategy):
( (
qtpylib.crossed_above(dataframe['rsi'], self.buy_rsi.value) qtpylib.crossed_above(dataframe['rsi'], self.buy_rsi.value)
), ),
'exit_short'] = 1 ('exit_short', 'exit_tag')] = (1, 'short_Tag')
return dataframe return dataframe

View File

@ -105,7 +105,7 @@ def test_returns_latest_signal(ohlcv_history):
_STRATEGY.config['trading_mode'] = 'spot' _STRATEGY.config['trading_mode'] = 'spot'
def test_analyze_pair_empty(default_conf, mocker, caplog, ohlcv_history): def test_analyze_pair_empty(mocker, caplog, ohlcv_history):
mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history) mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
mocker.patch.object( mocker.patch.object(
_STRATEGY, '_analyze_ticker_internal', _STRATEGY, '_analyze_ticker_internal',
@ -1019,3 +1019,30 @@ def test_auto_hyperopt_interface_loadparams(default_conf, mocker, caplog):
StrategyResolver.load_strategy(default_conf) StrategyResolver.load_strategy(default_conf)
assert log_has("Invalid parameter file format.", caplog) assert log_has("Invalid parameter file format.", caplog)
@pytest.mark.parametrize('function,raises', [
('populate_entry_trend', True),
('advise_entry', False),
('populate_exit_trend', True),
('advise_exit', False),
])
def test_pandas_warning_direct(ohlcv_history, function, raises):
df = _STRATEGY.populate_indicators(ohlcv_history, {'pair': 'ETH/BTC'})
if raises:
with pytest.warns(FutureWarning):
# Test for Future warning
# FutureWarning: Setting an item of incompatible dtype is
# deprecated and will raise in a future error of pandas
# https://github.com/pandas-dev/pandas/issues/56503
getattr(_STRATEGY, function)(df, {'pair': 'ETH/BTC'})
else:
getattr(_STRATEGY, function)(df, {'pair': 'ETH/BTC'})
def test_pandas_warning_through_analyze_pair(ohlcv_history, mocker, recwarn):
mocker.patch.object(_STRATEGY.dp, 'ohlcv', return_value=ohlcv_history)
_STRATEGY.analyze_pair('ETH/BTC')
assert len(recwarn) == 0

View File

@ -15,7 +15,7 @@ from freqtrade.configuration.deprecated_settings import (check_conflicting_setti
process_deprecated_setting, process_deprecated_setting,
process_removed_setting, process_removed_setting,
process_temporary_deprecated_settings) process_temporary_deprecated_settings)
from freqtrade.configuration.environment_vars import flat_vars_to_nested_dict from freqtrade.configuration.environment_vars import _flat_vars_to_nested_dict
from freqtrade.configuration.load_config import (load_config_file, load_file, load_from_files, from freqtrade.configuration.load_config import (load_config_file, load_file, load_from_files,
log_config_error_range) log_config_error_range)
from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX from freqtrade.constants import DEFAULT_DB_DRYRUN_URL, DEFAULT_DB_PROD_URL, ENV_VAR_PREFIX
@ -1419,7 +1419,7 @@ def test_flat_vars_to_nested_dict(caplog):
'chat_id': '2151' 'chat_id': '2151'
} }
} }
res = flat_vars_to_nested_dict(test_args, ENV_VAR_PREFIX) res = _flat_vars_to_nested_dict(test_args, ENV_VAR_PREFIX)
assert res == expected assert res == expected
assert log_has("Loading variable 'FREQTRADE__EXCHANGE__SOME_SETTING'", caplog) assert log_has("Loading variable 'FREQTRADE__EXCHANGE__SOME_SETTING'", caplog)

View File

@ -627,15 +627,16 @@ def test_process_exchange_failures(default_conf_usdt, ticker_usdt, mocker) -> No
mocker.patch.multiple( mocker.patch.multiple(
EXMS, EXMS,
fetch_ticker=ticker_usdt, fetch_ticker=ticker_usdt,
create_order=MagicMock(side_effect=TemporaryError) reload_markets=MagicMock(side_effect=TemporaryError),
create_order=MagicMock(side_effect=TemporaryError),
) )
sleep_mock = mocker.patch('time.sleep', side_effect=lambda _: None) sleep_mock = mocker.patch('time.sleep')
worker = Worker(args=None, config=default_conf_usdt) worker = Worker(args=None, config=default_conf_usdt)
patch_get_signal(worker.freqtrade) patch_get_signal(worker.freqtrade)
worker._process_running() worker._process_running()
assert sleep_mock.has_calls() assert sleep_mock.called is True
def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) -> None: def test_process_operational_exception(default_conf_usdt, ticker_usdt, mocker) -> None: