Merge pull request #9486 from freqtrade/new_release

New release 2023.11
This commit is contained in:
Matthias 2023-11-30 17:25:25 +01:00 committed by GitHub
commit 0654186400
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
91 changed files with 983 additions and 444 deletions

View File

@ -90,7 +90,7 @@ jobs:
- name: Backtesting (multi) - name: Backtesting (multi)
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade new-strategy -s AwesomeStrategy freqtrade new-strategy -s AwesomeStrategy
freqtrade new-strategy -s AwesomeStrategyMin --template minimal freqtrade new-strategy -s AwesomeStrategyMin --template minimal
@ -98,7 +98,7 @@ jobs:
- name: Hyperopt - name: Hyperopt
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade hyperopt --datadir tests/testdata -e 6 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all freqtrade hyperopt --datadir tests/testdata -e 6 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
@ -200,14 +200,14 @@ jobs:
- name: Backtesting - name: Backtesting
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade new-strategy -s AwesomeStrategyAdv --template advanced freqtrade new-strategy -s AwesomeStrategyAdv --template advanced
freqtrade backtesting --datadir tests/testdata --strategy AwesomeStrategyAdv freqtrade backtesting --datadir tests/testdata --strategy AwesomeStrategyAdv
- name: Hyperopt - name: Hyperopt
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all
@ -275,13 +275,13 @@ jobs:
- name: Backtesting - name: Backtesting
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy freqtrade backtesting --datadir tests/testdata --strategy SampleStrategy
- name: Hyperopt - name: Hyperopt
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp tests/testdata/config.tests.json config.json
freqtrade create-userdir --userdir user_data freqtrade create-userdir --userdir user_data
freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all freqtrade hyperopt --datadir tests/testdata -e 5 --strategy SampleStrategy --hyperopt-loss SharpeHyperOptLossDaily --print-all

View File

@ -8,17 +8,17 @@ repos:
# stages: [push] # stages: [push]
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: "v1.5.1" rev: "v1.7.0"
hooks: hooks:
- id: mypy - id: mypy
exclude: build_helpers exclude: build_helpers
additional_dependencies: additional_dependencies:
- types-cachetools==5.3.0.6 - types-cachetools==5.3.0.7
- types-filelock==3.2.7 - types-filelock==3.2.7
- types-requests==2.31.0.10 - types-requests==2.31.0.10
- types-tabulate==0.9.0.3 - types-tabulate==0.9.0.3
- types-python-dateutil==2.8.19.14 - types-python-dateutil==2.8.19.14
- SQLAlchemy==2.0.22 - SQLAlchemy==2.0.23
# stages: [push] # stages: [push]
- repo: https://github.com/pycqa/isort - repo: https://github.com/pycqa/isort

View File

@ -125,7 +125,7 @@ Exceptions:
Contributors may be given commit privileges. Preference will be given to those with: Contributors may be given commit privileges. Preference will be given to those with:
1. Past contributions to Freqtrade and other related open-source projects. Contributions to Freqtrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Quantity and quality are considered. 1. Past contributions to Freqtrade and other related open-source projects. Contributions to Freqtrade include both code (both accepted and pending) and friendly participation in the issue tracker and Pull request reviews. Both quantity and quality are considered.
1. A coding style that the other core committers find simple, minimal, and clean. 1. A coding style that the other core committers find simple, minimal, and clean.
1. Access to resources for cross-platform development and testing. 1. Access to resources for cross-platform development and testing.
1. Time to devote to the project regularly. 1. Time to devote to the project regularly.

View File

@ -1,4 +1,4 @@
FROM python:3.11.5-slim-bullseye as base FROM python:3.11.6-slim-bookworm as base
# Setup env # Setup env
ENV LANG C.UTF-8 ENV LANG C.UTF-8

View File

@ -28,7 +28,7 @@ hesitate to read the source code and understand the mechanism of this bot.
Please read the [exchange specific notes](docs/exchanges.md) to learn about eventual, special configurations needed for each exchange. Please read the [exchange specific notes](docs/exchanges.md) to learn about eventual, special configurations needed for each exchange.
- [X] [Binance](https://www.binance.com/) - [X] [Binance](https://www.binance.com/)
- [X] [Bittrex](https://bittrex.com/) - [X] [Bitmart](https://bitmart.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Huobi](http://huobi.com/) - [X] [Huobi](http://huobi.com/)
- [X] [Kraken](https://kraken.com/) - [X] [Kraken](https://kraken.com/)

Binary file not shown.

View File

@ -54,7 +54,7 @@ docker tag freqtrade:$TAG_FREQAI_ARM ${CACHE_IMAGE}:$TAG_FREQAI_ARM
docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM docker tag freqtrade:$TAG_FREQAI_RL_ARM ${CACHE_IMAGE}:$TAG_FREQAI_RL_ARM
# Run backtest # Run backtest
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 docker run --rm -v $(pwd)/tests/testdata/config.tests.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG_ARM} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "failed running backtest" echo "failed running backtest"

View File

@ -67,7 +67,7 @@ docker tag freqtrade:$TAG_FREQAI ${CACHE_IMAGE}:$TAG_FREQAI
docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL docker tag freqtrade:$TAG_FREQAI_RL ${CACHE_IMAGE}:$TAG_FREQAI_RL
# Run backtest # Run backtest
docker run --rm -v $(pwd)/config_examples/config_bittrex.example.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3 docker run --rm -v $(pwd)/tests/testdata/config.tests.json:/freqtrade/config.json:ro -v $(pwd)/tests:/tests freqtrade:${TAG} backtesting --datadir /tests/testdata --strategy-path /tests/strategy/strats/ --strategy StrategyTestV3
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "failed running backtest" echo "failed running backtest"

View File

@ -1,6 +1,6 @@
{ {
"max_open_trades": 3, "max_open_trades": 3,
"stake_currency": "BTC", "stake_currency": "USDT",
"stake_amount": 0.05, "stake_amount": 0.05,
"tradable_balance_ratio": 0.99, "tradable_balance_ratio": 0.99,
"fiat_display_currency": "USD", "fiat_display_currency": "USD",
@ -36,21 +36,21 @@
"ccxt_async_config": { "ccxt_async_config": {
}, },
"pair_whitelist": [ "pair_whitelist": [
"ALGO/BTC", "ALGO/USDT",
"ATOM/BTC", "ATOM/USDT",
"BAT/BTC", "BAT/USDT",
"BCH/BTC", "BCH/USDT",
"BRD/BTC", "BRD/USDT",
"EOS/BTC", "EOS/USDT",
"ETH/BTC", "ETH/USDT",
"IOTA/BTC", "IOTA/USDT",
"LINK/BTC", "LINK/USDT",
"LTC/BTC", "LTC/USDT",
"NEO/BTC", "NEO/USDT",
"NXS/BTC", "NXS/USDT",
"XMR/BTC", "XMR/USDT",
"XRP/BTC", "XRP/USDT",
"XTZ/BTC" "XTZ/USDT"
], ],
"pair_blacklist": [ "pair_blacklist": [
"BNB/.*" "BNB/.*"

View File

@ -1,4 +1,4 @@
FROM python:3.9.16-slim-bullseye as base FROM python:3.11.6-slim-bookworm as base
# Setup env # Setup env
ENV LANG C.UTF-8 ENV LANG C.UTF-8
@ -11,34 +11,31 @@ ENV FT_APP_ENV="docker"
# Prepare environment # Prepare environment
RUN mkdir /freqtrade \ RUN mkdir /freqtrade \
&& apt-get update \ && apt-get update \
&& apt-get -y install sudo libatlas3-base curl sqlite3 libhdf5-dev libutf8proc-dev libsnappy-dev \ && apt-get -y install sudo libatlas3-base libopenblas-dev curl sqlite3 libhdf5-dev libutf8proc-dev libsnappy-dev \
&& apt-get clean \ && apt-get clean \
&& useradd -u 1000 -G sudo -U -m ftuser \ && useradd -u 1000 -G sudo -U -m ftuser \
&& chown ftuser:ftuser /freqtrade \ && chown ftuser:ftuser /freqtrade \
# Allow sudoers # Allow sudoers
&& echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers && echo "ftuser ALL=(ALL) NOPASSWD: /bin/chown" >> /etc/sudoers \
&& pip install --upgrade pip
WORKDIR /freqtrade WORKDIR /freqtrade
# Install dependencies # Install dependencies
FROM base as python-deps FROM base as python-deps
RUN apt-get update \ RUN apt-get update \
&& apt-get -y install build-essential libssl-dev libffi-dev libopenblas-dev libgfortran5 pkg-config cmake gcc \ && apt-get -y install build-essential libssl-dev libffi-dev libgfortran5 pkg-config cmake gcc \
&& apt-get clean \ && apt-get clean \
&& pip install --upgrade pip \
&& echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > /etc/pip.conf && echo "[global]\nextra-index-url=https://www.piwheels.org/simple" > /etc/pip.conf
# Install TA-lib # Install TA-lib
COPY build_helpers/* /tmp/ COPY build_helpers/* /tmp/
RUN cd /tmp && /tmp/install_ta-lib.sh && rm -r /tmp/*ta-lib*
ENV LD_LIBRARY_PATH /usr/local/lib
# Install dependencies # Install dependencies
COPY --chown=ftuser:ftuser requirements.txt /freqtrade/ COPY --chown=ftuser:ftuser requirements.txt /freqtrade/
USER ftuser USER ftuser
RUN pip install --user --no-cache-dir numpy==1.25.2 \ RUN pip install --user --no-cache-dir numpy \
&& pip install --user /tmp/pyarrow-*.whl \ && pip install --user --no-index --find-links /tmp/ pyarrow TA-Lib==0.4.28 \
&& pip install --user --no-build-isolation TA-Lib==0.4.28 \
&& pip install --user --no-cache-dir -r requirements.txt && pip install --user --no-cache-dir -r requirements.txt
# Copy dependencies to runtime-image # Copy dependencies to runtime-image

View File

@ -170,11 +170,11 @@ freqtrade backtesting --strategy AwesomeStrategy --dry-run-wallet 1000
Using a different on-disk historical candle (OHLCV) data source Using a different on-disk historical candle (OHLCV) data source
Assume you downloaded the history data from the Bittrex exchange and kept it in the `user_data/data/bittrex-20180101` directory. Assume you downloaded the history data from the Binance exchange and kept it in the `user_data/data/binance-20180101` directory.
You can then use this data for backtesting as follows: You can then use this data for backtesting as follows:
```bash ```bash
freqtrade backtesting --strategy AwesomeStrategy --datadir user_data/data/bittrex-20180101 freqtrade backtesting --strategy AwesomeStrategy --datadir user_data/data/binance-20180101
``` ```
--- ---

View File

@ -594,7 +594,7 @@ creating trades on the exchange.
```json ```json
"exchange": { "exchange": {
"name": "bittrex", "name": "binance",
"key": "key", "key": "key",
"secret": "secret", "secret": "secret",
... ...
@ -644,7 +644,7 @@ API Keys are usually only required for live trading (trading for real money, bot
```json ```json
{ {
"exchange": { "exchange": {
"name": "bittrex", "name": "binance",
"key": "af8ddd35195e9dc500b9a6f799f6f5c93d89193b", "key": "af8ddd35195e9dc500b9a6f799f6f5c93d89193b",
"secret": "08a9dc6db3d7b53e1acebd9275677f4b0a04f1a5", "secret": "08a9dc6db3d7b53e1acebd9275677f4b0a04f1a5",
//"password": "", // Optional, not needed by all exchanges) //"password": "", // Optional, not needed by all exchanges)

View File

@ -318,6 +318,7 @@ Additional tests / steps to complete:
* Check if balance shows correctly (*) * Check if balance shows correctly (*)
* Create market order (*) * Create market order (*)
* Create limit order (*) * Create limit order (*)
* Cancel order (*)
* Complete trade (enter + exit) (*) * Complete trade (enter + exit) (*)
* Compare result calculation between exchange and bot * Compare result calculation between exchange and bot
* Ensure fees are applied correctly (check the database against the exchange) * Ensure fees are applied correctly (check the database against the exchange)

View File

@ -302,6 +302,24 @@ We do strongly recommend to limit all API keys to the IP you're going to use it
Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange. Bybit (futures only) supports `stoploss_on_exchange` and uses `stop-loss-limit` orders. It provides great advantages, so we recommend to benefit from it by enabling stoploss on exchange.
On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use. On futures, Bybit supports both `stop-limit` as well as `stop-market` orders. You can use either `"limit"` or `"market"` in the `order_types.stoploss` configuration setting to decide which type to use.
## Bitmart
Bitmart requires the API key Memo (the name you give the API key) to go along with the exchange key and secret.
It's therefore required to pass the UID as well.
```json
"exchange": {
"name": "bitmart",
"uid": "your_bitmart_api_key_memo",
"secret": "your_exchange_secret",
"password": "your_exchange_api_key_password",
// ...
}
```
!!! Warning "Necessary Verification"
Bitmart requires Verification Lvl2 to successfully trade on the spot market through the API - even though trading via UI works just fine with just Lvl1 verification.
## All exchanges ## All exchanges
Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys. Should you experience constant errors with Nonce (like `InvalidNonce`), it is best to regenerate the API keys. Resetting Nonce is difficult and it's usually easier to regenerate the API keys.

View File

@ -7,7 +7,7 @@ Low level feature engineering is performed in the user strategy within a set of
| Function | Description | | Function | Description |
|---------------|-------------| |---------------|-------------|
| `feature_engineering_expand_all()` | This optional function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. | `feature_engineering_expand_all()` | This optional function will automatically expand the defined features on the config defined `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`.
| `feature_engineering_expand_basic()` | This optional function will automatically expand the defined features on the config defined `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. Note: this function does *not* expand across `include_periods_candles`. | `feature_engineering_expand_basic()` | This optional function will automatically expand the defined features on the config defined `include_timeframes`, `include_shifted_candles`, and `include_corr_pairs`. Note: this function does *not* expand across `indicator_periods_candles`.
| `feature_engineering_standard()` | This optional function will be called once with the dataframe of the base timeframe. This is the final function to be called, which means that the dataframe entering this function will contain all the features and columns from the base asset created by the other `feature_engineering_expand` functions. This function is a good place to do custom exotic feature extractions (e.g. tsfresh). This function is also a good place for any feature that should not be auto-expanded upon (e.g., day of the week). | `feature_engineering_standard()` | This optional function will be called once with the dataframe of the base timeframe. This is the final function to be called, which means that the dataframe entering this function will contain all the features and columns from the base asset created by the other `feature_engineering_expand` functions. This function is a good place to do custom exotic feature extractions (e.g. tsfresh). This function is also a good place for any feature that should not be auto-expanded upon (e.g., day of the week).
| `set_freqai_targets()` | Required function to set the targets for the model. All targets must be prepended with `&` to be recognized by the FreqAI internals. | `set_freqai_targets()` | Required function to set the targets for the model. All targets must be prepended with `&` to be recognized by the FreqAI internals.

View File

@ -74,7 +74,6 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary** | | **Reinforcement Learning Parameters within the `freqai.rl_config` sub dictionary**
| `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary. | `rl_config` | A dictionary containing the control parameters for a Reinforcement Learning model. <br> **Datatype:** Dictionary.
| `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer. | `train_cycles` | Training time steps will be set based on the `train_cycles * number of training data points. <br> **Datatype:** Integer.
| `cpu_count` | Number of processors to dedicate to the Reinforcement Learning training process. <br> **Datatype:** int.
| `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function. <br> **Datatype:** int. | `max_trade_duration_candles`| Guides the agent training to keep trades below desired length. Example usage shown in `prediction_models/ReinforcementLearner.py` within the customizable `calculate_reward()` function. <br> **Datatype:** int.
| `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string. | `model_type` | Model string from stable_baselines3 or SBcontrib. Available strings include: `'TRPO', 'ARS', 'RecurrentPPO', 'MaskablePPO', 'PPO', 'A2C', 'DQN'`. User should ensure that `model_training_parameters` match those available to the corresponding stable_baselines3 model by visiting their documentaiton. [PPO doc](https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html) (external website) <br> **Datatype:** string.
| `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string. | `policy_type` | One of the available policy types from stable_baselines3 <br> **Datatype:** string.

View File

@ -337,11 +337,15 @@ There are four parameter types each suited for different purposes.
* `CategoricalParameter` - defines a parameter with a predetermined number of choices. * `CategoricalParameter` - defines a parameter with a predetermined number of choices.
* `BooleanParameter` - Shorthand for `CategoricalParameter([True, False])` - great for "enable" parameters. * `BooleanParameter` - Shorthand for `CategoricalParameter([True, False])` - great for "enable" parameters.
!!! Tip "Disabling parameter optimization" ### Parameter options
Each parameter takes two boolean parameters:
* `load` - when set to `False` it will not load values configured in `buy_params` and `sell_params`. There are two parameter options that can help you to quickly test various ideas:
* `optimize` - when set to `False` parameter will not be included in optimization process.
Use these parameters to quickly prototype various ideas. * `optimize` - when set to `False`, the parameter will not be included in optimization process. (Default: True)
* `load` - when set to `False`, results of a previous hyperopt run (in `buy_params` and `sell_params` either in your strategy or the JSON output file) will not be used as the starting value for subsequent hyperopts. The default value specified in the parameter will be used instead. (Default: True)
!!! Tip "Effects of `load=False` on backtesting"
Be aware that setting the `load` option to `False` will mean backtesting will also use the default value specified in the parameter and *not* the value found through hyperoptimisation.
!!! Warning !!! Warning
Hyperoptable parameters cannot be used in `populate_indicators` - as hyperopt does not recalculate indicators for each epoch, so the starting value would be used in this case. Hyperoptable parameters cannot be used in `populate_indicators` - as hyperopt does not recalculate indicators for each epoch, so the starting value would be used in this case.

View File

@ -40,7 +40,7 @@ Freqtrade is a free and open source crypto trading bot written in Python. It is
Please read the [exchange specific notes](exchanges.md) to learn about eventual, special configurations needed for each exchange. Please read the [exchange specific notes](exchanges.md) to learn about eventual, special configurations needed for each exchange.
- [X] [Binance](https://www.binance.com/) - [X] [Binance](https://www.binance.com/)
- [X] [Bittrex](https://bittrex.com/) - [X] [Bitmart](https://bitmart.com/)
- [X] [Gate.io](https://www.gate.io/ref/6266643) - [X] [Gate.io](https://www.gate.io/ref/6266643)
- [X] [Huobi](http://huobi.com/) - [X] [Huobi](http://huobi.com/)
- [X] [Kraken](https://kraken.com/) - [X] [Kraken](https://kraken.com/)

View File

@ -1,6 +1,6 @@
markdown==3.5 markdown==3.5.1
mkdocs==1.5.3 mkdocs==1.5.3
mkdocs-material==9.4.6 mkdocs-material==9.4.14
mdx_truly_sane_lists==1.3 mdx_truly_sane_lists==1.3
pymdown-extensions==10.3.1 pymdown-extensions==10.5
jinja2==3.1.2 jinja2==3.1.2

View File

@ -134,13 +134,16 @@ python3 scripts/rest_client.py --config rest_config.json <command> [optional par
| `reload_config` | Reloads the configuration file. | `reload_config` | Reloads the configuration file.
| `trades` | List last trades. Limited to 500 trades per call. | `trades` | List last trades. Limited to 500 trades per call.
| `trade/<tradeid>` | Get specific trade. | `trade/<tradeid>` | Get specific trade.
| `trade/<tradeid>` | DELETE - Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange. | `trades/<tradeid>` | DELETE - Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `trade/<tradeid>/open-order` | DELETE - Cancel open order for this trade. | `trades/<tradeid>/open-order` | DELETE - Cancel open order for this trade.
| `trade/<tradeid>/reload` | GET - Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange. | `trades/<tradeid>/reload` | GET - Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
| `show_config` | Shows part of the current configuration with relevant settings to operation. | `show_config` | Shows part of the current configuration with relevant settings to operation.
| `logs` | Shows last log messages. | `logs` | Shows last log messages.
| `status` | Lists all open trades. | `status` | Lists all open trades.
| `count` | Displays number of trades used and available. | `count` | Displays number of trades used and available.
| `entries [pair]` | Shows profit statistics for each enter tags for given pair (or all pairs if pair isn't given). Pair is optional.
| `exits [pair]` | Shows profit statistics for each exit reasons for given pair (or all pairs if pair isn't given). Pair is optional.
| `mix_tags [pair]` | Shows profit statistics for each combinations of enter tag + exit reasons for given pair (or all pairs if pair isn't given). Pair is optional.
| `locks` | Displays currently locked pairs. | `locks` | Displays currently locked pairs.
| `delete_lock <lock_id>` | Deletes (disables) the lock by id. | `delete_lock <lock_id>` | Deletes (disables) the lock by id.
| `profit` | Display a summary of your profit/loss from close trades and some stats about your performance. | `profit` | Display a summary of your profit/loss from close trades and some stats about your performance.

View File

@ -760,9 +760,9 @@ The `position_adjustment_enable` strategy property enables the usage of `adjust_
For performance reasons, it's disabled by default and freqtrade will show a warning message on startup if enabled. For performance reasons, it's disabled by default and freqtrade will show a warning message on startup if enabled.
`adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging) or to increase or decrease positions. `adjust_trade_position()` can be used to perform additional orders, for example to manage risk with DCA (Dollar Cost Averaging) or to increase or decrease positions.
`max_entry_position_adjustment` property is used to limit the number of additional buys per trade (on top of the first buy) that the bot can execute. By default, the value is -1 which means the bot have no limit on number of adjustment buys. `max_entry_position_adjustment` property is used to limit the number of additional entries per trade (on top of the first entry order) that the bot can execute. By default, the value is -1 which means the bot have no limit on number of adjustment entries.
The strategy is expected to return a stake_amount (in stake currency) between `min_stake` and `max_stake` if and when an additional buy order should be made (position is increased). The strategy is expected to return a stake_amount (in stake currency) between `min_stake` and `max_stake` if and when an additional entry order should be made (position is increased -> buy order for long trades, sell order for short trades).
If there are not enough funds in the wallet (the return value is above `max_stake`) then the signal will be ignored. If there are not enough funds in the wallet (the return value is above `max_stake`) then the signal will be ignored.
Additional orders also result in additional fees and those orders don't count towards `max_open_trades`. Additional orders also result in additional fees and those orders don't count towards `max_open_trades`.
@ -770,9 +770,11 @@ This callback is **not** called when there is an open order (either buy or sell)
`adjust_trade_position()` is called very frequently for the duration of a trade, so you must keep your implementation as performant as possible. `adjust_trade_position()` is called very frequently for the duration of a trade, so you must keep your implementation as performant as possible.
Additional Buys are ignored once you have reached the maximum amount of extra buys that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits. Additional entries are ignored once you have reached the maximum amount of extra entries that you have set on `max_entry_position_adjustment`, but the callback is called anyway looking for partial exits.
Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade. Modifications to leverage are not possible, and the stake-amount is assumed to be before applying leverage. Position adjustments will always be applied in the direction of the trade, so a positive value will always increase your position (negative values will decrease your position), no matter if it's a long or short trade.
Modifications to leverage are not possible, and the stake-amount returned is assumed to be before applying leverage.
!!! Note "About stake size" !!! Note "About stake size"
Using fixed stake size means it will be the amount used for the first order, just like without position adjustment. Using fixed stake size means it will be the amount used for the first order, just like without position adjustment.

View File

@ -173,7 +173,7 @@ You can use [recursive-analysis](recursive-analysis.md) to check and find the co
In this example strategy, this should be set to 400 (`startup_candle_count = 400`), since the minimum needed history for ema100 calculation to make sure the value is correct is 400 candles. In this example strategy, this should be set to 400 (`startup_candle_count = 400`), since the minimum needed history for ema100 calculation to make sure the value is correct is 400 candles.
``` python ``` python
dataframe['ema100'] = ta.EMA(dataframe, timeperiod=400) dataframe['ema100'] = ta.EMA(dataframe, timeperiod=100)
``` ```
By letting the bot know how much history is needed, backtest trades can start at the specified timerange during backtesting and hyperopt. By letting the bot know how much history is needed, backtest trades can start at the specified timerange during backtesting and hyperopt.
@ -486,17 +486,18 @@ for more information.
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe. :param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use :param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair. current pair. Also supports limited pair format strings (see below)
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not :param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to: specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified. * {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified. * {column}_{timeframe} if asset is not specified.
Format string supports these format variables: Pair format supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'. * {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case. * {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'. * {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case. * {QUOTE} - same as {quote}, except in upper case.
Format string additionally supports this variables.
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {column} - name of dataframe column. * {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe. * {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair. :param ffill: ffill dataframe after merging informative pair.

View File

@ -570,7 +570,7 @@ def populate_any_indicators(
``` ```
1. Features - Move to `feature_engineering_expand_all` 1. Features - Move to `feature_engineering_expand_all`
2. Basic features, not expanded across `include_periods_candles` - move to`feature_engineering_expand_basic()`. 2. Basic features, not expanded across `indicator_periods_candles` - move to`feature_engineering_expand_basic()`.
3. Standard features which should not be expanded - move to `feature_engineering_standard()`. 3. Standard features which should not be expanded - move to `feature_engineering_standard()`.
4. Targets - Move this part to `set_freqai_targets()`. 4. Targets - Move this part to `set_freqai_targets()`.

View File

@ -175,6 +175,7 @@ official commands. You can ask at any moment for help with `/help`.
| `/status` | Lists all open trades | `/status` | Lists all open trades
| `/status <trade_id>` | Lists one or more specific trade. Separate multiple <trade_id> with a blank space. | `/status <trade_id>` | Lists one or more specific trade. Separate multiple <trade_id> with a blank space.
| `/status table` | List all open trades in a table format. Pending buy orders are marked with an asterisk (*) Pending sell orders are marked with a double asterisk (**) | `/status table` | List all open trades in a table format. Pending buy orders are marked with an asterisk (*) Pending sell orders are marked with a double asterisk (**)
| `/order <trade_id>` | Lists orders of one or more specific trade. Separate multiple <trade_id> with a blank space.
| `/trades [limit]` | List all recently closed trades in a table format. | `/trades [limit]` | List all recently closed trades in a table format.
| `/count` | Displays number of trades used and available | `/count` | Displays number of trades used and available
| `/locks` | Show currently locked pairs. | `/locks` | Show currently locked pairs.

View File

@ -427,25 +427,33 @@ zb True missing opt: fetchMyTrades
Use the `list-timeframes` subcommand to see the list of timeframes available for the exchange. Use the `list-timeframes` subcommand to see the list of timeframes available for the exchange.
``` ```
usage: freqtrade list-timeframes [-h] [-v] [--logfile FILE] [-V] [-c PATH] [-d PATH] [--userdir PATH] [--exchange EXCHANGE] [-1] usage: freqtrade list-timeframes [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH]
[--exchange EXCHANGE] [-1]
optional arguments: options:
-h, --help show this help message and exit -h, --help show this help message and exit
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no config is provided. --exchange EXCHANGE Exchange name. Only valid if no config is provided.
-1, --one-column Print output in one column. -1, --one-column Print output in one column.
Common arguments: Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages). -v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are: 'syslog', 'journald'. See the documentation for more details. --logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more
details.
-V, --version show program's version number and exit -V, --version show program's version number and exit
-c PATH, --config PATH -c PATH, --config PATH
Specify configuration file (default: `config.json`). Multiple --config options may be used. Can be set to `-` Specify configuration file (default:
to read config from stdin. `userdir/config.json` or `config.json` whichever
-d PATH, --datadir PATH exists). Multiple --config options may be used. Can be
set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data. Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH --userdir PATH, --user-data-dir PATH
Path to userdata directory. Path to userdata directory.
``` ```
* Example: see the timeframes for the 'binance' exchange, set in the configuration file: * Example: see the timeframes for the 'binance' exchange, set in the configuration file:
@ -479,20 +487,17 @@ usage: freqtrade list-markets [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--exchange EXCHANGE] [-d PATH] [--userdir PATH] [--exchange EXCHANGE]
[--print-list] [--print-json] [-1] [--print-csv] [--print-list] [--print-json] [-1] [--print-csv]
[--base BASE_CURRENCY [BASE_CURRENCY ...]] [--base BASE_CURRENCY [BASE_CURRENCY ...]]
[--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]] [-a] [--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]]
[--trading-mode {spot,margin,futures}] [-a] [--trading-mode {spot,margin,futures}]
usage: freqtrade list-pairs [-h] [-v] [--logfile FILE] [-V] [-c PATH] usage: freqtrade list-pairs [-h] [-v] [--logfile FILE] [-V] [-c PATH]
[-d PATH] [--userdir PATH] [--exchange EXCHANGE] [-d PATH] [--userdir PATH] [--exchange EXCHANGE]
[--print-list] [--print-json] [-1] [--print-csv] [--print-list] [--print-json] [-1] [--print-csv]
[--base BASE_CURRENCY [BASE_CURRENCY ...]] [--base BASE_CURRENCY [BASE_CURRENCY ...]]
[--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]] [-a] [--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]] [-a]
[--trading-mode {spot,margin,futures}] [--trading-mode {spot,margin,futures}]
options:
optional arguments:
-h, --help show this help message and exit -h, --help show this help message and exit
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no --exchange EXCHANGE Exchange name. Only valid if no config is provided.
config is provided.
--print-list Print list of pairs or market symbols. By default data --print-list Print list of pairs or market symbols. By default data
is printed in the tabular format. is printed in the tabular format.
--print-json Print list of pairs or market symbols in JSON format. --print-json Print list of pairs or market symbols in JSON format.
@ -504,20 +509,22 @@ optional arguments:
Specify quote currency(-ies). Space-separated list. Specify quote currency(-ies). Space-separated list.
-a, --all Print all pairs or market symbols. By default only -a, --all Print all pairs or market symbols. By default only
active ones are shown. active ones are shown.
--trading-mode {spot,margin,futures} --trading-mode {spot,margin,futures}, --tradingmode {spot,margin,futures}
Select Trading mode Select Trading mode
Common arguments: Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages). -v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
--logfile FILE Log to the file specified. Special values are: --logfile FILE, --log-file FILE
Log to the file specified. Special values are:
'syslog', 'journald'. See the documentation for more 'syslog', 'journald'. See the documentation for more
details. details.
-V, --version show program's version number and exit -V, --version show program's version number and exit
-c PATH, --config PATH -c PATH, --config PATH
Specify configuration file (default: `config.json`). Specify configuration file (default:
Multiple --config options may be used. Can be set to `userdir/config.json` or `config.json` whichever
`-` to read config from stdin. exists). Multiple --config options may be used. Can be
-d PATH, --datadir PATH set to `-` to read config from stdin.
-d PATH, --datadir PATH, --data-dir PATH
Path to directory with historical backtesting data. Path to directory with historical backtesting data.
--userdir PATH, --user-data-dir PATH --userdir PATH, --user-data-dir PATH
Path to userdata directory. Path to userdata directory.
@ -532,7 +539,7 @@ Pairs/markets are sorted by its symbol string in the printed output.
### Examples ### Examples
* Print the list of active pairs with quote currency USD on exchange, specified in the default * Print the list of active pairs with quote currency USD on exchange, specified in the default
configuration file (i.e. pairs on the "Bittrex" exchange) in JSON format: configuration file (i.e. pairs on the "Binance" exchange) in JSON format:
``` ```
$ freqtrade list-pairs --quote USD --print-json $ freqtrade list-pairs --quote USD --print-json
@ -564,7 +571,7 @@ usage: freqtrade test-pairlist [-h] [--userdir PATH] [-v] [-c PATH]
[--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]] [--quote QUOTE_CURRENCY [QUOTE_CURRENCY ...]]
[-1] [--print-json] [--exchange EXCHANGE] [-1] [--print-json] [--exchange EXCHANGE]
optional arguments: options:
-h, --help show this help message and exit -h, --help show this help message and exit
--userdir PATH, --user-data-dir PATH --userdir PATH, --user-data-dir PATH
Path to userdata directory. Path to userdata directory.
@ -578,8 +585,7 @@ optional arguments:
Specify quote currency(-ies). Space-separated list. Specify quote currency(-ies). Space-separated list.
-1, --one-column Print output in one column. -1, --one-column Print output in one column.
--print-json Print list of pairs or market symbols in JSON format. --print-json Print list of pairs or market symbols in JSON format.
--exchange EXCHANGE Exchange name (default: `bittrex`). Only valid if no --exchange EXCHANGE Exchange name. Only valid if no config is provided.
config is provided.
``` ```

View File

@ -302,6 +302,7 @@ You can configure this as follows:
``` ```
The above represents the default (`exit_fill` and `entry_fill` are optional and will default to the above configuration) - modifications are obviously possible. The above represents the default (`exit_fill` and `entry_fill` are optional and will default to the above configuration) - modifications are obviously possible.
To disable either of the two default values (`entry_fill` / `exit_fill`), you can assign them an empty array (`exit_fill: []`).
Available fields correspond to the fields for webhooks and are documented in the corresponding webhook sections. Available fields correspond to the fields for webhooks and are documented in the corresponding webhook sections.

View File

@ -1,5 +1,5 @@
""" Freqtrade bot """ """ Freqtrade bot """
__version__ = '2023.10' __version__ = '2023.11'
if 'dev' in __version__: if 'dev' in __version__:
from pathlib import Path from pathlib import Path

View File

@ -108,7 +108,6 @@ def ask_user_config() -> Dict[str, Any]:
"choices": [ "choices": [
"binance", "binance",
"binanceus", "binanceus",
"bittrex",
"gate", "gate",
"huobi", "huobi",
"kraken", "kraken",

View File

@ -211,8 +211,9 @@ def prepare_results(analysed_trades, stratname,
timerange=None): timerange=None):
res_df = pd.DataFrame() res_df = pd.DataFrame()
for pair, trades in analysed_trades[stratname].items(): for pair, trades in analysed_trades[stratname].items():
trades.dropna(subset=['close_date'], inplace=True) if (trades.shape[0] > 0):
res_df = pd.concat([res_df, trades], ignore_index=True) trades.dropna(subset=['close_date'], inplace=True)
res_df = pd.concat([res_df, trades], ignore_index=True)
res_df = _select_rows_within_dates(res_df, timerange) res_df = _select_rows_within_dates(res_df, timerange)

View File

@ -4,6 +4,7 @@ from freqtrade.exchange.common import remove_exchange_credentials, MAP_EXCHANGE_
from freqtrade.exchange.exchange import Exchange from freqtrade.exchange.exchange import Exchange
# isort: on # isort: on
from freqtrade.exchange.binance import Binance from freqtrade.exchange.binance import Binance
from freqtrade.exchange.bitmart import Bitmart
from freqtrade.exchange.bitpanda import Bitpanda from freqtrade.exchange.bitpanda import Bitpanda
from freqtrade.exchange.bittrex import Bittrex from freqtrade.exchange.bittrex import Bittrex
from freqtrade.exchange.bitvavo import Bitvavo from freqtrade.exchange.bitvavo import Bitvavo

View File

@ -0,0 +1,20 @@
""" Bitmart exchange subclass """
import logging
from typing import Dict
from freqtrade.exchange import Exchange
logger = logging.getLogger(__name__)
class Bitmart(Exchange):
"""
Bitmart exchange class. Contains adjustments needed for Freqtrade to work
with this exchange.
"""
_ft_has: Dict = {
"stoploss_on_exchange": False, # Bitmart API does not support stoploss orders
"ohlcv_candle_limit": 200,
}

View File

@ -52,7 +52,7 @@ MAP_EXCHANGE_CHILDCLASS = {
SUPPORTED_EXCHANGES = [ SUPPORTED_EXCHANGES = [
'binance', 'binance',
'bittrex', 'bitmart',
'gate', 'gate',
'huobi', 'huobi',
'kraken', 'kraken',

View File

@ -486,11 +486,14 @@ class Exchange:
except ccxt.BaseError: except ccxt.BaseError:
logger.exception('Unable to initialize markets.') logger.exception('Unable to initialize markets.')
def reload_markets(self) -> None: def reload_markets(self, force: bool = False) -> None:
"""Reload markets both sync and async if refresh interval has passed """ """Reload markets both sync and async if refresh interval has passed """
# Check whether markets have to be reloaded # Check whether markets have to be reloaded
if (self._last_markets_refresh > 0) and ( if (
self._last_markets_refresh + self.markets_refresh_interval > dt_ts()): not force
and self._last_markets_refresh > 0
and (self._last_markets_refresh + self.markets_refresh_interval > dt_ts())
):
return None return None
logger.debug("Performing scheduled market reload..") logger.debug("Performing scheduled market reload..")
try: try:
@ -1228,16 +1231,16 @@ class Exchange:
return order return order
except ccxt.InsufficientFunds as e: except ccxt.InsufficientFunds as e:
raise InsufficientFundsError( raise InsufficientFundsError(
f'Insufficient funds to create {ordertype} sell order on market {pair}. ' f'Insufficient funds to create {ordertype} {side} order on market {pair}. '
f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Tried to {side} amount {amount} at rate {limit_rate} with '
f'Message: {e}') from e f'stop-price {stop_price_norm}. Message: {e}') from e
except ccxt.InvalidOrder as e: except (ccxt.InvalidOrder, ccxt.BadRequest) as e:
# Errors: # Errors:
# `Order would trigger immediately.` # `Order would trigger immediately.`
raise InvalidOrderException( raise InvalidOrderException(
f'Could not create {ordertype} sell order on market {pair}. ' f'Could not create {ordertype} {side} order on market {pair}. '
f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Tried to {side} amount {amount} at rate {limit_rate} with '
f'Message: {e}') from e f'stop-price {stop_price_norm}. Message: {e}') from e
except ccxt.DDoSProtection as e: except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e: except (ccxt.NetworkError, ccxt.ExchangeError) as e:
@ -1496,8 +1499,9 @@ class Exchange:
@retrier @retrier
def fetch_bids_asks(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict: def fetch_bids_asks(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict:
""" """
:param symbols: List of symbols to fetch
:param cached: Allow cached result :param cached: Allow cached result
:return: fetch_tickers result :return: fetch_bids_asks result
""" """
if not self.exchange_has('fetchBidsAsks'): if not self.exchange_has('fetchBidsAsks'):
return {} return {}
@ -1546,6 +1550,12 @@ class Exchange:
raise OperationalException( raise OperationalException(
f'Exchange {self._api.name} does not support fetching tickers in batch. ' f'Exchange {self._api.name} does not support fetching tickers in batch. '
f'Message: {e}') from e f'Message: {e}') from e
except ccxt.BadSymbol as e:
logger.warning(f"Could not load tickers due to {e.__class__.__name__}. Message: {e} ."
"Reloading markets.")
self.reload_markets(True)
# Re-raise exception to repeat the call.
raise TemporaryError from e
except ccxt.DDoSProtection as e: except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e: except (ccxt.NetworkError, ccxt.ExchangeError) as e:
@ -1954,7 +1964,7 @@ class Exchange:
results = await asyncio.gather(*input_coro, return_exceptions=True) results = await asyncio.gather(*input_coro, return_exceptions=True)
for res in results: for res in results:
if isinstance(res, Exception): if isinstance(res, BaseException):
logger.warning(f"Async code raised an exception: {repr(res)}") logger.warning(f"Async code raised an exception: {repr(res)}")
if raise_: if raise_:
raise raise

View File

@ -27,6 +27,12 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
... ...
"freqai": { "freqai": {
... ...
"conv_width": 30, // PyTorchTransformer is based on windowing
"feature_parameters": {
...
"include_shifted_candles": 0, // which removes the need for shifted candles
...
},
"model_training_parameters" : { "model_training_parameters" : {
"learning_rate": 3e-4, "learning_rate": 3e-4,
"trainer_kwargs": { "trainer_kwargs": {
@ -120,16 +126,16 @@ class PyTorchTransformerRegressor(BasePyTorchRegressor):
# create empty torch tensor # create empty torch tensor
self.model.model.eval() self.model.model.eval()
yb = torch.empty(0).to(self.device) yb = torch.empty(0).to(self.device)
if x.shape[1] > 1: if x.shape[1] > self.window_size:
ws = self.window_size ws = self.window_size
for i in range(0, x.shape[1] - ws): for i in range(0, x.shape[1] - ws):
xb = x[:, i:i + ws, :].to(self.device) xb = x[:, i:i + ws, :].to(self.device)
y = self.model.model(xb) y = self.model.model(xb)
yb = torch.cat((yb, y), dim=0) yb = torch.cat((yb, y), dim=1)
else: else:
yb = self.model.model(x) yb = self.model.model(x)
yb = yb.cpu().squeeze() yb = yb.cpu().squeeze(0)
pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list) pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list)
pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df) pred_df, _, _ = dk.label_pipeline.inverse_transform(pred_df)

View File

@ -3,7 +3,6 @@ from typing import Any, Dict, Type, Union
from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import HParam from stable_baselines3.common.logger import HParam
from stable_baselines3.common.vec_env import VecEnv
from freqtrade.freqai.RL.BaseEnvironment import BaseActions from freqtrade.freqai.RL.BaseEnvironment import BaseActions
@ -13,13 +12,9 @@ class TensorboardCallback(BaseCallback):
Custom callback for plotting additional values in tensorboard and Custom callback for plotting additional values in tensorboard and
episodic summary reports. episodic summary reports.
""" """
# Override training_env type to fix type errors
training_env: Union[VecEnv, None] = None
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
super().__init__(verbose) super().__init__(verbose)
self.model: Any = None self.model: Any = None
self.logger: Any = None
self.actions: Type[Enum] = actions self.actions: Type[Enum] = actions
def _on_training_start(self) -> None: def _on_training_start(self) -> None:
@ -47,8 +42,6 @@ class TensorboardCallback(BaseCallback):
def _on_step(self) -> bool: def _on_step(self) -> bool:
local_info = self.locals["infos"][0] local_info = self.locals["infos"][0]
if self.training_env is None:
return True
if hasattr(self.training_env, 'envs'): if hasattr(self.training_env, 'envs'):
tensorboard_metrics = self.training_env.envs[0].unwrapped.tensorboard_metrics tensorboard_metrics = self.training_env.envs[0].unwrapped.tensorboard_metrics

View File

@ -3,6 +3,7 @@ Various tool function for Freqtrade and scripts
""" """
import gzip import gzip
import logging import logging
from io import StringIO
from pathlib import Path from pathlib import Path
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
from urllib.parse import urlparse from urllib.parse import urlparse
@ -231,7 +232,7 @@ def json_to_dataframe(data: str) -> pd.DataFrame:
:param data: A JSON string :param data: A JSON string
:returns: A pandas DataFrame from the JSON string :returns: A pandas DataFrame from the JSON string
""" """
dataframe = pd.read_json(data, orient='split') dataframe = pd.read_json(StringIO(data), orient='split')
if 'date' in dataframe.columns: if 'date' in dataframe.columns:
dataframe['date'] = pd.to_datetime(dataframe['date'], unit='ms', utc=True) dataframe['date'] = pd.to_datetime(dataframe['date'], unit='ms', utc=True)

View File

@ -94,8 +94,8 @@ class LookaheadAnalysis(BaseAnalysis):
# compare_df now comprises tuples with [1] having either 'self' or 'other' # compare_df now comprises tuples with [1] having either 'self' or 'other'
if 'other' in col_name[1]: if 'other' in col_name[1]:
continue continue
self_value = compare_df_row[col_idx] self_value = compare_df_row.iloc[col_idx]
other_value = compare_df_row[col_idx + 1] other_value = compare_df_row.iloc[col_idx + 1]
# output differences # output differences
if self_value != other_value: if self_value != other_value:

View File

@ -21,7 +21,7 @@ logger = logging.getLogger(__name__)
def _format_exception_message(space: str, ignore_missing_space: bool) -> None: def _format_exception_message(space: str, ignore_missing_space: bool) -> None:
msg = (f"The '{space}' space is included into the hyperoptimization " msg = (f"The '{space}' space is included into the hyperoptimization "
f"but no parameter for this space was not found in your Strategy. " f"but no parameter for this space was found in your Strategy. "
) )
if ignore_missing_space: if ignore_missing_space:
logger.warning(msg + "This space will be ignored.") logger.warning(msg + "This space will be ignored.")

View File

@ -429,14 +429,18 @@ class HyperoptTools:
trials = trials.drop(columns=['Total profit']) trials = trials.drop(columns=['Total profit'])
if print_colorized: if print_colorized:
trials2 = trials.astype(str)
for i in range(len(trials)): for i in range(len(trials)):
if trials.loc[i]['is_profit']: if trials.loc[i]['is_profit']:
for j in range(len(trials.loc[i]) - 3): for j in range(len(trials.loc[i]) - 3):
trials.iat[i, j] = f"{Fore.GREEN}{str(trials.loc[i][j])}{Fore.RESET}" trials2.iat[i, j] = f"{Fore.GREEN}{str(trials.iloc[i, j])}{Fore.RESET}"
if trials.loc[i]['is_best'] and highlight_best: if trials.loc[i]['is_best'] and highlight_best:
for j in range(len(trials.loc[i]) - 3): for j in range(len(trials.loc[i]) - 3):
trials.iat[i, j] = f"{Style.BRIGHT}{str(trials.loc[i][j])}{Style.RESET_ALL}" trials2.iat[i, j] = (
f"{Style.BRIGHT}{str(trials.iloc[i, j])}{Style.RESET_ALL}"
)
trials = trials2
del trials2
trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit', 'is_random']) trials = trials.drop(columns=['is_initial_point', 'is_best', 'is_profit', 'is_random'])
if remove_header > 0: if remove_header > 0:
table = tabulate.tabulate( table = tabulate.tabulate(

View File

@ -219,8 +219,10 @@ def _get_resample_from_period(period: str) -> str:
raise ValueError(f"Period {period} is not supported.") raise ValueError(f"Period {period} is not supported.")
def generate_periodic_breakdown_stats(trade_list: List, period: str) -> List[Dict[str, Any]]: def generate_periodic_breakdown_stats(
results = DataFrame.from_records(trade_list) trade_list: Union[List, DataFrame], period: str) -> List[Dict[str, Any]]:
results = trade_list if not isinstance(trade_list, list) else DataFrame.from_records(trade_list)
if len(results) == 0: if len(results) == 0:
return [] return []
results['close_date'] = to_datetime(results['close_date'], utc=True) results['close_date'] = to_datetime(results['close_date'], utc=True)

View File

@ -1053,7 +1053,7 @@ class LocalTrade:
price = avg_price if is_exit else tmp_price price = avg_price if is_exit else tmp_price
current_stake += price * tmp_amount * side current_stake += price * tmp_amount * side
if current_amount > ZERO: if current_amount > ZERO and not is_exit:
avg_price = current_stake / current_amount avg_price = current_stake / current_amount
if is_exit: if is_exit:
@ -1066,7 +1066,10 @@ class LocalTrade:
exit_amount = o.safe_amount_after_fee exit_amount = o.safe_amount_after_fee
prof = self.calculate_profit(exit_rate, exit_amount, float(avg_price)) prof = self.calculate_profit(exit_rate, exit_amount, float(avg_price))
close_profit_abs += prof.profit_abs close_profit_abs += prof.profit_abs
close_profit = prof.profit_ratio if total_stake > 0:
# This needs to be calculated based on the last occuring exit to be aligned
# with realized_profit.
close_profit = (close_profit_abs / total_stake) * self.leverage
else: else:
total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price) total_stake = total_stake + self._calc_open_trade_value(tmp_amount, price)
max_stake_amount += (tmp_amount * price) max_stake_amount += (tmp_amount * price)
@ -1780,7 +1783,7 @@ class Trade(ModelBase, LocalTrade):
.order_by(desc('profit_sum_abs')) .order_by(desc('profit_sum_abs'))
).all() ).all()
return_list: List[Dict] = [] resp: List[Dict] = []
for id, enter_tag, exit_reason, profit, profit_abs, count in mix_tag_perf: for id, enter_tag, exit_reason, profit, profit_abs, count in mix_tag_perf:
enter_tag = enter_tag if enter_tag is not None else "Other" enter_tag = enter_tag if enter_tag is not None else "Other"
exit_reason = exit_reason if exit_reason is not None else "Other" exit_reason = exit_reason if exit_reason is not None else "Other"
@ -1788,24 +1791,25 @@ class Trade(ModelBase, LocalTrade):
if (exit_reason is not None and enter_tag is not None): if (exit_reason is not None and enter_tag is not None):
mix_tag = enter_tag + " " + exit_reason mix_tag = enter_tag + " " + exit_reason
i = 0 i = 0
if not any(item["mix_tag"] == mix_tag for item in return_list): if not any(item["mix_tag"] == mix_tag for item in resp):
return_list.append({'mix_tag': mix_tag, resp.append({'mix_tag': mix_tag,
'profit': profit, 'profit_ratio': profit,
'profit_pct': round(profit * 100, 2), 'profit_pct': round(profit * 100, 2),
'profit_abs': profit_abs, 'profit_abs': profit_abs,
'count': count}) 'count': count})
else: else:
while i < len(return_list): while i < len(resp):
if return_list[i]["mix_tag"] == mix_tag: if resp[i]["mix_tag"] == mix_tag:
return_list[i] = { resp[i] = {
'mix_tag': mix_tag, 'mix_tag': mix_tag,
'profit': profit + return_list[i]["profit"], 'profit_ratio': profit + resp[i]["profit_ratio"],
'profit_pct': round(profit + return_list[i]["profit"] * 100, 2), 'profit_pct': round(profit + resp[i]["profit_ratio"] * 100, 2),
'profit_abs': profit_abs + return_list[i]["profit_abs"], 'profit_abs': profit_abs + resp[i]["profit_abs"],
'count': 1 + return_list[i]["count"]} 'count': 1 + resp[i]["count"]
}
i += 1 i += 1
return return_list return resp
@staticmethod @staticmethod
def get_best_pair(start_date: datetime = datetime.fromtimestamp(0)): def get_best_pair(start_date: datetime = datetime.fromtimestamp(0)):

View File

@ -21,6 +21,7 @@ from freqtrade.misc import pair_to_filename
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.resolvers import ExchangeResolver, StrategyResolver from freqtrade.resolvers import ExchangeResolver, StrategyResolver
from freqtrade.strategy import IStrategy from freqtrade.strategy import IStrategy
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -636,7 +637,7 @@ def load_and_plot_trades(config: Config):
exchange = ExchangeResolver.load_exchange(config) exchange = ExchangeResolver.load_exchange(config)
IStrategy.dp = DataProvider(config, exchange) IStrategy.dp = DataProvider(config, exchange)
strategy.ft_bot_start() strategy.ft_bot_start()
strategy.bot_loop_start(datetime.now(timezone.utc)) strategy_safe_wrapper(strategy.bot_loop_start)(current_time=datetime.now(timezone.utc))
plot_elements = init_plotscript(config, list(exchange.markets), strategy.startup_candle_count) plot_elements = init_plotscript(config, list(exchange.markets), strategy.startup_candle_count)
timerange = plot_elements['timerange'] timerange = plot_elements['timerange']
trades = plot_elements['trades'] trades = plot_elements['trades']

View File

@ -1,9 +1,9 @@
from datetime import date, datetime from datetime import date, datetime
from typing import Any, Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, ConfigDict, RootModel, SerializeAsAny from pydantic import BaseModel, RootModel, SerializeAsAny
from freqtrade.constants import DATETIME_PRINT_FORMAT, IntOrInf from freqtrade.constants import IntOrInf
from freqtrade.enums import MarginMode, OrderTypeValues, SignalDirection, TradingMode from freqtrade.enums import MarginMode, OrderTypeValues, SignalDirection, TradingMode
from freqtrade.types import ValidExchangesType from freqtrade.types import ValidExchangesType
@ -95,15 +95,30 @@ class Count(BaseModel):
total_stake: float total_stake: float
class PerformanceEntry(BaseModel): class __BaseStatsModel(BaseModel):
pair: str
profit: float
profit_ratio: float profit_ratio: float
profit_pct: float profit_pct: float
profit_abs: float profit_abs: float
count: int count: int
class Entry(__BaseStatsModel):
enter_tag: str
class Exit(__BaseStatsModel):
exit_reason: str
class MixTag(__BaseStatsModel):
mix_tag: str
class PerformanceEntry(__BaseStatsModel):
pair: str
profit: float
class Profit(BaseModel): class Profit(BaseModel):
profit_closed_coin: float profit_closed_coin: float
profit_closed_percent_mean: float profit_closed_percent_mean: float
@ -484,11 +499,6 @@ class PairHistory(BaseModel):
data_start: str data_start: str
data_stop: str data_stop: str
data_stop_ts: int data_stop_ts: int
# TODO[pydantic]: The following keys were removed: `json_encoders`.
# Check https://docs.pydantic.dev/dev-v2/migration/#changes-to-config for more information.
model_config = ConfigDict(json_encoders={
datetime: lambda v: v.strftime(DATETIME_PRINT_FORMAT),
})
class BacktestFreqAIInputs(BaseModel): class BacktestFreqAIInputs(BaseModel):

View File

@ -12,15 +12,15 @@ from freqtrade.exceptions import OperationalException
from freqtrade.rpc import RPC from freqtrade.rpc import RPC
from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload, from freqtrade.rpc.api_server.api_schemas import (AvailablePairs, Balances, BlacklistPayload,
BlacklistResponse, Count, DailyWeeklyMonthly, BlacklistResponse, Count, DailyWeeklyMonthly,
DeleteLockRequest, DeleteTrade, DeleteLockRequest, DeleteTrade, Entry,
ExchangeListResponse, ForceEnterPayload, ExchangeListResponse, Exit, ForceEnterPayload,
ForceEnterResponse, ForceExitPayload, ForceEnterResponse, ForceExitPayload,
FreqAIModelListResponse, Health, Locks, Logs, FreqAIModelListResponse, Health, Locks, Logs,
OpenTradeSchema, PairHistory, PerformanceEntry, MixTag, OpenTradeSchema, PairHistory,
Ping, PlotConfig, Profit, ResultMsg, ShowConfig, PerformanceEntry, Ping, PlotConfig, Profit,
Stats, StatusMsg, StrategyListResponse, ResultMsg, ShowConfig, Stats, StatusMsg,
StrategyResponse, SysInfo, Version, StrategyListResponse, StrategyResponse, SysInfo,
WhitelistResponse) Version, WhitelistResponse)
from freqtrade.rpc.api_server.deps import get_config, get_exchange, get_rpc, get_rpc_optional from freqtrade.rpc.api_server.deps import get_config, get_exchange, get_rpc, get_rpc_optional
from freqtrade.rpc.rpc import RPCException from freqtrade.rpc.rpc import RPCException
@ -52,7 +52,8 @@ logger = logging.getLogger(__name__)
# 2.31: new /backtest/history/ delete endpoint # 2.31: new /backtest/history/ delete endpoint
# 2.32: new /backtest/history/ patch endpoint # 2.32: new /backtest/history/ patch endpoint
# 2.33: Additional weekly/monthly metrics # 2.33: Additional weekly/monthly metrics
API_VERSION = 2.33 # 2.34: new entries/exits/mix_tags endpoints
API_VERSION = 2.34
# Public API, requires no auth. # Public API, requires no auth.
router_public = APIRouter() router_public = APIRouter()
@ -83,6 +84,21 @@ def count(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_count() return rpc._rpc_count()
@router.get('/entries', response_model=List[Entry], tags=['info'])
def entries(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_enter_tag_performance(pair)
@router.get('/exits', response_model=List[Exit], tags=['info'])
def exits(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_exit_reason_performance(pair)
@router.get('/mix_tags', response_model=List[MixTag], tags=['info'])
def mix_tags(pair: Optional[str] = None, rpc: RPC = Depends(get_rpc)):
return rpc._rpc_mix_tag_performance(pair)
@router.get('/performance', response_model=List[PerformanceEntry], tags=['info']) @router.get('/performance', response_model=List[PerformanceEntry], tags=['info'])
def performance(rpc: RPC = Depends(get_rpc)): def performance(rpc: RPC = Depends(get_rpc)):
return rpc._rpc_performance() return rpc._rpc_performance()

View File

@ -56,7 +56,7 @@ def get_exchange(config=Depends(get_config)):
if not (exchange := ApiBG.exchanges.get(exchange_key)): if not (exchange := ApiBG.exchanges.get(exchange_key)):
from freqtrade.resolvers import ExchangeResolver from freqtrade.resolvers import ExchangeResolver
exchange = ExchangeResolver.load_exchange( exchange = ExchangeResolver.load_exchange(
config, load_leverage_tiers=False) config, validate=False, load_leverage_tiers=False)
ApiBG.exchanges[exchange_key] = exchange ApiBG.exchanges[exchange_key] = exchange
return exchange return exchange

View File

@ -31,12 +31,11 @@ class Discord(Webhook):
def send_msg(self, msg) -> None: def send_msg(self, msg) -> None:
if msg['type'].value in self._config['discord']: if (fields := self._config['discord'].get(msg['type'].value)):
logger.info(f"Sending discord message: {msg}") logger.info(f"Sending discord message: {msg}")
msg['strategy'] = self.strategy msg['strategy'] = self.strategy
msg['timeframe'] = self.timeframe msg['timeframe'] = self.timeframe
fields = self._config['discord'].get(msg['type'].value)
color = 0x0000FF color = 0x0000FF
if msg['type'] in (RPCMessageType.EXIT, RPCMessageType.EXIT_FILL): if msg['type'] in (RPCMessageType.EXIT, RPCMessageType.EXIT_FILL):
profit_ratio = msg.get('profit_ratio') profit_ratio = msg.get('profit_ratio')

View File

@ -223,7 +223,8 @@ class Telegram(RPCHandler):
CommandHandler('health', self._health), CommandHandler('health', self._health),
CommandHandler('help', self._help), CommandHandler('help', self._help),
CommandHandler('version', self._version), CommandHandler('version', self._version),
CommandHandler('marketdir', self._changemarketdir) CommandHandler('marketdir', self._changemarketdir),
CommandHandler('order', self._order),
] ]
callbacks = [ callbacks = [
CallbackQueryHandler(self._status_table, pattern='update_status_table'), CallbackQueryHandler(self._status_table, pattern='update_status_table'),
@ -240,7 +241,7 @@ class Telegram(RPCHandler):
CallbackQueryHandler(self._mix_tag_performance, pattern='update_mix_tag_performance'), CallbackQueryHandler(self._mix_tag_performance, pattern='update_mix_tag_performance'),
CallbackQueryHandler(self._count, pattern='update_count'), CallbackQueryHandler(self._count, pattern='update_count'),
CallbackQueryHandler(self._force_exit_inline, pattern=r"force_exit__\S+"), CallbackQueryHandler(self._force_exit_inline, pattern=r"force_exit__\S+"),
CallbackQueryHandler(self._force_enter_inline, pattern=r"\S+\/\S+"), CallbackQueryHandler(self._force_enter_inline, pattern=r"force_enter__\S+"),
] ]
for handle in handles: for handle in handles:
self._app.add_handler(handle) self._app.add_handler(handle)
@ -555,6 +556,47 @@ class Telegram(RPCHandler):
return lines_detail return lines_detail
@authorized_only
async def _order(self, update: Update, context: CallbackContext) -> None:
"""
Handler for /order.
Returns the orders of the trade
:param bot: telegram bot
:param update: message update
:return: None
"""
trade_ids = []
if context.args and len(context.args) > 0:
trade_ids = [int(i) for i in context.args if i.isnumeric()]
results = self._rpc._rpc_trade_status(trade_ids=trade_ids)
for r in results:
lines = [
"*Order List for Trade #*`{trade_id}`"
]
lines_detail = self._prepare_order_details(
r['orders'], r['quote_currency'], r['is_open'])
lines.extend(lines_detail if lines_detail else "")
await self.__send_order_msg(lines, r)
async def __send_order_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
"""
Send status message.
"""
msg = ''
for line in lines:
if line:
if (len(msg) + len(line) + 1) < MAX_MESSAGE_LENGTH:
msg += line + '\n'
else:
await self._send_msg(msg.format(**r))
msg = "*Order List for Trade #*`{trade_id}` - continued\n" + line + '\n'
await self._send_msg(msg.format(**r))
@authorized_only @authorized_only
async def _status(self, update: Update, context: CallbackContext) -> None: async def _status(self, update: Update, context: CallbackContext) -> None:
""" """
@ -652,9 +694,6 @@ class Telegram(RPCHandler):
"*Open Order:* `{open_orders}`" "*Open Order:* `{open_orders}`"
+ ("- `{exit_order_status}`" if r['exit_order_status'] else "")) + ("- `{exit_order_status}`" if r['exit_order_status'] else ""))
lines_detail = self._prepare_order_details(
r['orders'], r['quote_currency'], r['is_open'])
lines.extend(lines_detail if lines_detail else "")
await self.__send_status_msg(lines, r) await self.__send_status_msg(lines, r)
async def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None: async def __send_status_msg(self, lines: List[str], r: Dict[str, Any]) -> None:
@ -1149,12 +1188,19 @@ class Telegram(RPCHandler):
async def _force_enter_inline(self, update: Update, _: CallbackContext) -> None: async def _force_enter_inline(self, update: Update, _: CallbackContext) -> None:
if update.callback_query: if update.callback_query:
query = update.callback_query query = update.callback_query
if query.data and '_||_' in query.data: if query.data and '__' in query.data:
pair, side = query.data.split('_||_') # Input data is "force_enter__<pair|cancel>_<side>"
order_side = SignalDirection(side) payload = query.data.split("__")[1]
await query.answer() if payload == 'cancel':
await query.edit_message_text(text=f"Manually entering {order_side} for {pair}") await query.answer()
await self._force_enter_action(pair, None, order_side) await query.edit_message_text(text="Force enter canceled.")
return
if payload and '_||_' in payload:
pair, side = payload.split('_||_')
order_side = SignalDirection(side)
await query.answer()
await query.edit_message_text(text=f"Manually entering {order_side} for {pair}")
await self._force_enter_action(pair, None, order_side)
@staticmethod @staticmethod
def _layout_inline_keyboard( def _layout_inline_keyboard(
@ -1183,12 +1229,14 @@ class Telegram(RPCHandler):
else: else:
whitelist = self._rpc._rpc_whitelist()['whitelist'] whitelist = self._rpc._rpc_whitelist()['whitelist']
pair_buttons = [ pair_buttons = [
InlineKeyboardButton(text=pair, callback_data=f"{pair}_||_{order_side}") InlineKeyboardButton(
for pair in sorted(whitelist) text=pair, callback_data=f"force_enter__{pair}_||_{order_side}"
) for pair in sorted(whitelist)
] ]
buttons_aligned = self._layout_inline_keyboard(pair_buttons) buttons_aligned = self._layout_inline_keyboard(pair_buttons)
buttons_aligned.append([InlineKeyboardButton(text='Cancel', callback_data='cancel')]) buttons_aligned.append([InlineKeyboardButton(text='Cancel',
callback_data='force_enter__cancel')])
await self._send_msg(msg="Which pair?", await self._send_msg(msg="Which pair?",
keyboard=buttons_aligned, keyboard=buttons_aligned,
query=update.callback_query) query=update.callback_query)
@ -1369,7 +1417,7 @@ class Telegram(RPCHandler):
stat_line = ( stat_line = (
f"{i+1}.\t <code>{trade['mix_tag']}\t" f"{i+1}.\t <code>{trade['mix_tag']}\t"
f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} " f"{round_coin_value(trade['profit_abs'], self._config['stake_currency'])} "
f"({trade['profit']:.2%}) " f"({trade['profit_ratio']:.2%}) "
f"({trade['count']})</code>\n") f"({trade['count']})</code>\n")
if len(output + stat_line) >= MAX_MESSAGE_LENGTH: if len(output + stat_line) >= MAX_MESSAGE_LENGTH:

View File

@ -1,5 +1,5 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Callable, Optional, Union from typing import Any, Callable, Dict, Optional, Union
from pandas import DataFrame from pandas import DataFrame
@ -38,17 +38,18 @@ def informative(timeframe: str, asset: str = '',
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe. :param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use :param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair. current pair. Also supports limited pair format strings (see below)
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not :param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to: specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified. * {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified. * {column}_{timeframe} if asset is not specified.
Format string supports these format variables: Pair format supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'. * {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case. * {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'. * {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case. * {QUOTE} - same as {quote}, except in upper case.
Format string additionally supports this variables.
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {column} - name of dataframe column. * {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe. * {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair. :param ffill: ffill dataframe after merging informative pair.
@ -68,9 +69,25 @@ def informative(timeframe: str, asset: str = '',
return decorator return decorator
def _format_pair_name(config, pair: str) -> str: def __get_pair_formats(market: Optional[Dict[str, Any]]) -> Dict[str, str]:
return pair.format(stake_currency=config['stake_currency'], if not market:
stake=config['stake_currency']).upper() return {}
base = market['base']
quote = market['quote']
return {
'base': base.lower(),
'BASE': base.upper(),
'quote': quote.lower(),
'QUOTE': quote.upper(),
}
def _format_pair_name(config, pair: str, market: Optional[Dict[str, Any]] = None) -> str:
return pair.format(
stake_currency=config['stake_currency'],
stake=config['stake_currency'],
**__get_pair_formats(market),
).upper()
def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict, def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict,
@ -85,7 +102,8 @@ def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata:
if asset: if asset:
# Insert stake currency if needed. # Insert stake currency if needed.
asset = _format_pair_name(config, asset) market1 = strategy.dp.market(metadata['pair'])
asset = _format_pair_name(config, asset, market1)
else: else:
# Not specifying an asset will define informative dataframe for current pair. # Not specifying an asset will define informative dataframe for current pair.
asset = metadata['pair'] asset = metadata['pair']
@ -93,8 +111,6 @@ def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata:
market = strategy.dp.market(asset) market = strategy.dp.market(asset)
if market is None: if market is None:
raise OperationalException(f'Market {asset} is not available.') raise OperationalException(f'Market {asset} is not available.')
base = market['base']
quote = market['quote']
# Default format. This optimizes for the common case: informative pairs using same stake # Default format. This optimizes for the common case: informative pairs using same stake
# currency. When quote currency matches stake currency, column name will omit base currency. # currency. When quote currency matches stake currency, column name will omit base currency.
@ -117,10 +133,7 @@ def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata:
formatter = fmt.format # A default string formatter. formatter = fmt.format # A default string formatter.
fmt_args = { fmt_args = {
'BASE': base.upper(), **__get_pair_formats(market),
'QUOTE': quote.upper(),
'base': base.lower(),
'quote': quote.lower(),
'asset': asset, 'asset': asset,
'timeframe': timeframe, 'timeframe': timeframe,
} }

View File

@ -756,12 +756,23 @@ class IStrategy(ABC, HyperStrategyMixin):
candle_type = (inf_data.candle_type if inf_data.candle_type candle_type = (inf_data.candle_type if inf_data.candle_type
else self.config.get('candle_type_def', CandleType.SPOT)) else self.config.get('candle_type_def', CandleType.SPOT))
if inf_data.asset: if inf_data.asset:
pair_tf = ( if any(s in inf_data.asset for s in ("{BASE}", "{base}")):
_format_pair_name(self.config, inf_data.asset), for pair in self.dp.current_whitelist():
inf_data.timeframe,
candle_type, pair_tf = (
) _format_pair_name(self.config, inf_data.asset, self.dp.market(pair)),
informative_pairs.append(pair_tf) inf_data.timeframe,
candle_type,
)
informative_pairs.append(pair_tf)
else:
pair_tf = (
_format_pair_name(self.config, inf_data.asset),
inf_data.timeframe,
candle_type,
)
informative_pairs.append(pair_tf)
else: else:
for pair in self.dp.current_whitelist(): for pair in self.dp.current_whitelist():
informative_pairs.append((pair, inf_data.timeframe, candle_type)) informative_pairs.append((pair, inf_data.timeframe, candle_type))
@ -1006,7 +1017,7 @@ class IStrategy(ABC, HyperStrategyMixin):
exit_ = latest.get(SignalType.EXIT_LONG.value, 0) == 1 exit_ = latest.get(SignalType.EXIT_LONG.value, 0) == 1
exit_tag = latest.get(SignalTagType.EXIT_TAG.value, None) exit_tag = latest.get(SignalTagType.EXIT_TAG.value, None)
# Tags can be None, which does not resolve to False. # Tags can be None, which does not resolve to False.
exit_tag = exit_tag if isinstance(exit_tag, str) else None exit_tag = exit_tag if isinstance(exit_tag, str) and exit_tag != 'nan' else None
logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) " logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) "
f"enter={enter} exit={exit_}") f"enter={enter} exit={exit_}")
@ -1038,17 +1049,17 @@ class IStrategy(ABC, HyperStrategyMixin):
exit_short = latest.get(SignalType.EXIT_SHORT.value, 0) == 1 exit_short = latest.get(SignalType.EXIT_SHORT.value, 0) == 1
enter_signal: Optional[SignalDirection] = None enter_signal: Optional[SignalDirection] = None
enter_tag_value: Optional[str] = None enter_tag: Optional[str] = None
if enter_long == 1 and not any([exit_long, enter_short]): if enter_long == 1 and not any([exit_long, enter_short]):
enter_signal = SignalDirection.LONG enter_signal = SignalDirection.LONG
enter_tag_value = latest.get(SignalTagType.ENTER_TAG.value, None) enter_tag = latest.get(SignalTagType.ENTER_TAG.value, None)
if (self.config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT if (self.config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT
and self.can_short and self.can_short
and enter_short == 1 and not any([exit_short, enter_long])): and enter_short == 1 and not any([exit_short, enter_long])):
enter_signal = SignalDirection.SHORT enter_signal = SignalDirection.SHORT
enter_tag_value = latest.get(SignalTagType.ENTER_TAG.value, None) enter_tag = latest.get(SignalTagType.ENTER_TAG.value, None)
enter_tag_value = enter_tag_value if isinstance(enter_tag_value, str) else None enter_tag = enter_tag if isinstance(enter_tag, str) and enter_tag != 'nan' else None
timeframe_seconds = timeframe_to_seconds(timeframe) timeframe_seconds = timeframe_to_seconds(timeframe)
@ -1058,11 +1069,11 @@ class IStrategy(ABC, HyperStrategyMixin):
timeframe_seconds=timeframe_seconds, timeframe_seconds=timeframe_seconds,
enter=bool(enter_signal) enter=bool(enter_signal)
): ):
return None, enter_tag_value return None, enter_tag
logger.debug(f"entry trigger: {latest['date']} (pair={pair}) " logger.debug(f"entry trigger: {latest['date']} (pair={pair}) "
f"enter={enter_long} enter_tag_value={enter_tag_value}") f"enter={enter_long} enter_tag_value={enter_tag}")
return enter_signal, enter_tag_value return enter_signal, enter_tag
def ignore_expired_candle( def ignore_expired_candle(
self, self,

View File

@ -290,9 +290,6 @@ class FreqaiExampleStrategy(IStrategy):
return df return df
def get_ticker_indicator(self):
return int(self.config["timeframe"][:-1])
def confirm_trade_entry( def confirm_trade_entry(
self, self,
pair: str, pair: str,

View File

@ -7,10 +7,10 @@
-r docs/requirements-docs.txt -r docs/requirements-docs.txt
coveralls==3.3.1 coveralls==3.3.1
ruff==0.1.1 ruff==0.1.6
mypy==1.6.1 mypy==1.7.1
pre-commit==3.5.0 pre-commit==3.5.0
pytest==7.4.2 pytest==7.4.3
pytest-asyncio==0.21.1 pytest-asyncio==0.21.1
pytest-cov==4.1.0 pytest-cov==4.1.0
pytest-mock==3.12.0 pytest-mock==3.12.0
@ -20,10 +20,10 @@ isort==5.12.0
time-machine==2.13.0 time-machine==2.13.0
# Convert jupyter notebooks to markdown documents # Convert jupyter notebooks to markdown documents
nbconvert==7.9.2 nbconvert==7.11.0
# mypy types # mypy types
types-cachetools==5.3.0.6 types-cachetools==5.3.0.7
types-filelock==3.2.7 types-filelock==3.2.7
types-requests==2.31.0.10 types-requests==2.31.0.10
types-tabulate==0.9.0.3 types-tabulate==0.9.0.3

View File

@ -5,7 +5,7 @@
torch==2.0.1 torch==2.0.1
#until these branches will be released we can use this #until these branches will be released we can use this
gymnasium==0.29.1 gymnasium==0.29.1
stable_baselines3==2.1.0 stable_baselines3==2.2.1
sb3_contrib>=2.0.0a9 sb3_contrib>=2.0.0a9
# Progress bar for stable-baselines3 and sb3-contrib # Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.66.1 tqdm==4.66.1

View File

@ -7,6 +7,6 @@ scikit-learn==1.1.3
joblib==1.3.2 joblib==1.3.2
catboost==1.2.2; 'arm' not in platform_machine catboost==1.2.2; 'arm' not in platform_machine
lightgbm==4.1.0 lightgbm==4.1.0
xgboost==2.0.0 xgboost==2.0.2
tensorboard==2.15.0 tensorboard==2.15.1
datasieve==0.1.7 datasieve==0.1.7

View File

@ -2,7 +2,7 @@
-r requirements.txt -r requirements.txt
# Required for hyperopt # Required for hyperopt
scipy==1.11.3 scipy==1.11.4
scikit-learn==1.1.3 scikit-learn==1.1.3
scikit-optimize==0.9.0 scikit-optimize==0.9.0
filelock==3.12.4 filelock==3.13.1

View File

@ -1,4 +1,4 @@
# Include all requirements to run the bot. # Include all requirements to run the bot.
-r requirements.txt -r requirements.txt
plotly==5.17.0 plotly==5.18.0

View File

@ -1,46 +1,44 @@
numpy==1.25.2; platform_machine == 'armv7l' numpy==1.26.2
numpy==1.26.1; platform_machine != 'armv7l' pandas==2.1.3
pandas==2.0.3
pandas-ta==0.3.14b pandas-ta==0.3.14b
ccxt==4.1.22 ccxt==4.1.66
cryptography==41.0.4 cryptography==41.0.7
aiohttp==3.8.6 aiohttp==3.9.1
SQLAlchemy==2.0.22 SQLAlchemy==2.0.23
python-telegram-bot==20.6 python-telegram-bot==20.6
# can't be hard-pinned due to telegram-bot pinning httpx with ~ # can't be hard-pinned due to telegram-bot pinning httpx with ~
httpx>=0.24.1 httpx>=0.24.1
arrow==1.3.0 arrow==1.3.0
cachetools==5.3.1 cachetools==5.3.2
requests==2.31.0 requests==2.31.0
urllib3==2.0.7 urllib3==2.1.0
jsonschema==4.19.1 jsonschema==4.20.0
TA-Lib==0.4.28 TA-Lib==0.4.28
technical==1.4.0 technical==1.4.0
tabulate==0.9.0 tabulate==0.9.0
pycoingecko==3.1.0 pycoingecko==3.1.0
jinja2==3.1.2 jinja2==3.1.2
tables==3.8.0 tables==3.9.1
blosc==1.11.1
joblib==1.3.2 joblib==1.3.2
rich==13.6.0 rich==13.7.0
pyarrow==13.0.0; platform_machine != 'armv7l' pyarrow==14.0.1; platform_machine != 'armv7l'
# find first, C search in arrays # find first, C search in arrays
py_find_1st==1.1.5 py_find_1st==1.1.6
# Load ticker files 30% faster # Load ticker files 30% faster
python-rapidjson==1.12 python-rapidjson==1.13
# Properly format api responses # Properly format api responses
orjson==3.9.9 orjson==3.9.10
# Notify systemd # Notify systemd
sdnotify==0.3.2 sdnotify==0.3.2
# API Server # API Server
fastapi==0.104.0 fastapi==0.104.1
pydantic==2.4.2 pydantic==2.5.2
uvicorn==0.23.2 uvicorn==0.24.0.post1
pyjwt==2.8.0 pyjwt==2.8.0
aiofiles==23.2.1 aiofiles==23.2.1
psutil==5.9.6 psutil==5.9.6
@ -60,5 +58,5 @@ schedule==1.2.1
websockets==12.0 websockets==12.0
janus==1.0.0 janus==1.0.0
ast-comments==1.1.2 ast-comments==1.2.0
packaging==23.2 packaging==23.2

View File

@ -112,6 +112,30 @@ class FtRestClient:
""" """
return self._get("count") return self._get("count")
def entries(self, pair=None):
"""Returns List of dicts containing all Trades, based on buy tag performance
Can either be average for all pairs or a specific pair provided
:return: json object
"""
return self._get("entries", params={"pair": pair} if pair else None)
def exits(self, pair=None):
"""Returns List of dicts containing all Trades, based on exit reason performance
Can either be average for all pairs or a specific pair provided
:return: json object
"""
return self._get("exits", params={"pair": pair} if pair else None)
def mix_tags(self, pair=None):
"""Returns List of dicts containing all Trades, based on entry_tag + exit_reason performance
Can either be average for all pairs or a specific pair provided
:return: json object
"""
return self._get("mix_tags", params={"pair": pair} if pair else None)
def locks(self): def locks(self):
"""Return current locks """Return current locks

View File

@ -550,7 +550,7 @@ def test_start_install_ui(mocker):
assert download_mock.call_count == 0 assert download_mock.call_count == 0
def test_clean_ui_subdir(mocker, tmpdir, caplog): def test_clean_ui_subdir(mocker, tmp_path, caplog):
mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir", mocker.patch("freqtrade.commands.deploy_commands.Path.is_dir",
side_effect=[True, True]) side_effect=[True, True])
mocker.patch("freqtrade.commands.deploy_commands.Path.is_file", mocker.patch("freqtrade.commands.deploy_commands.Path.is_file",
@ -560,14 +560,14 @@ def test_clean_ui_subdir(mocker, tmpdir, caplog):
mocker.patch("freqtrade.commands.deploy_commands.Path.glob", mocker.patch("freqtrade.commands.deploy_commands.Path.glob",
return_value=[Path('test1'), Path('test2'), Path('.gitkeep')]) return_value=[Path('test1'), Path('test2'), Path('.gitkeep')])
folder = Path(tmpdir) / "uitests" folder = tmp_path / "uitests"
clean_ui_subdir(folder) clean_ui_subdir(folder)
assert log_has("Removing UI directory content.", caplog) assert log_has("Removing UI directory content.", caplog)
assert rd_mock.call_count == 1 assert rd_mock.call_count == 1
assert ul_mock.call_count == 1 assert ul_mock.call_count == 1
def test_download_and_install_ui(mocker, tmpdir): def test_download_and_install_ui(mocker, tmp_path):
# Create zipfile # Create zipfile
requests_mock = MagicMock() requests_mock = MagicMock()
file_like_object = BytesIO() file_like_object = BytesIO()
@ -583,7 +583,7 @@ def test_download_and_install_ui(mocker, tmpdir):
side_effect=[True, False]) side_effect=[True, False])
wb_mock = mocker.patch("freqtrade.commands.deploy_commands.Path.write_bytes") wb_mock = mocker.patch("freqtrade.commands.deploy_commands.Path.write_bytes")
folder = Path(tmpdir) / "uitests_dl" folder = tmp_path / "uitests_dl"
folder.mkdir(exist_ok=True) folder.mkdir(exist_ok=True)
assert read_ui_version(folder) is None assert read_ui_version(folder) is None
@ -1010,8 +1010,8 @@ def test_start_test_pairlist(mocker, caplog, tickers, default_conf, capsys):
pytest.fail(f'Expected well formed JSON, but failed to parse: {captured.out}') pytest.fail(f'Expected well formed JSON, but failed to parse: {captured.out}')
def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmpdir): def test_hyperopt_list(mocker, capsys, caplog, saved_hyperopt_results, tmp_path):
csv_file = Path(tmpdir) / "test.csv" csv_file = tmp_path / "test.csv"
mocker.patch( mocker.patch(
'freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist', 'freqtrade.optimize.hyperopt_tools.HyperoptTools._test_hyperopt_results_exist',
return_value=True return_value=True
@ -1512,10 +1512,10 @@ def test_backtesting_show(mocker, testdatadir, capsys):
assert "Pairs for Strategy" in out assert "Pairs for Strategy" in out
def test_start_convert_db(mocker, fee, tmpdir, caplog): def test_start_convert_db(fee, tmp_path):
db_src_file = Path(f"{tmpdir}/db.sqlite") db_src_file = tmp_path / "db.sqlite"
db_from = f"sqlite:///{db_src_file}" db_from = f"sqlite:///{db_src_file}"
db_target_file = Path(f"{tmpdir}/db_target.sqlite") db_target_file = tmp_path / "db_target.sqlite"
db_to = f"sqlite:///{db_target_file}" db_to = f"sqlite:///{db_target_file}"
args = [ args = [
"convert-db", "convert-db",
@ -1542,13 +1542,13 @@ def test_start_convert_db(mocker, fee, tmpdir, caplog):
assert db_target_file.is_file() assert db_target_file.is_file()
def test_start_strategy_updater(mocker, tmpdir): def test_start_strategy_updater(mocker, tmp_path):
sc_mock = mocker.patch('freqtrade.commands.strategy_utils_commands.start_conversion') sc_mock = mocker.patch('freqtrade.commands.strategy_utils_commands.start_conversion')
teststrats = Path(__file__).parent.parent / 'strategy/strats' teststrats = Path(__file__).parent.parent / 'strategy/strats'
args = [ args = [
"strategy-updater", "strategy-updater",
"--userdir", "--userdir",
str(tmpdir), str(tmp_path),
"--strategy-path", "--strategy-path",
str(teststrats), str(teststrats),
] ]
@ -1562,7 +1562,7 @@ def test_start_strategy_updater(mocker, tmpdir):
args = [ args = [
"strategy-updater", "strategy-updater",
"--userdir", "--userdir",
str(tmpdir), str(tmp_path),
"--strategy-path", "--strategy-path",
str(teststrats), str(teststrats),
"--strategy-list", "--strategy-list",

View File

@ -413,8 +413,8 @@ def patch_gc(mocker) -> None:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def user_dir(mocker, tmpdir) -> Path: def user_dir(mocker, tmp_path) -> Path:
user_dir = Path(tmpdir) / "user_data" user_dir = tmp_path / "user_data"
mocker.patch('freqtrade.configuration.configuration.create_userdata_dir', mocker.patch('freqtrade.configuration.configuration.create_userdata_dir',
return_value=user_dir) return_value=user_dir)
return user_dir return user_dir

View File

@ -1,6 +1,5 @@
# pragma pylint: disable=missing-docstring, C0103 # pragma pylint: disable=missing-docstring, C0103
import logging import logging
from pathlib import Path
from shutil import copyfile from shutil import copyfile
import numpy as np import numpy as np
@ -50,8 +49,8 @@ def test_trades_to_ohlcv(trades_history_df, caplog):
assert 'high' in df.columns assert 'high' in df.columns
assert 'low' in df.columns assert 'low' in df.columns
assert 'close' in df.columns assert 'close' in df.columns
assert df.loc[:, 'high'][0] == 0.019627 assert df.iloc[0, :]['high'] == 0.019627
assert df.loc[:, 'low'][0] == 0.019626 assert df.iloc[0, :]['low'] == 0.019626
def test_ohlcv_fill_up_missing_data(testdatadir, caplog): def test_ohlcv_fill_up_missing_data(testdatadir, caplog):
@ -323,18 +322,17 @@ def test_trades_dict_to_list(fetch_trades_result):
assert t[6] == fetch_trades_result[i]['cost'] assert t[6] == fetch_trades_result[i]['cost']
def test_convert_trades_format(default_conf, testdatadir, tmpdir): def test_convert_trades_format(default_conf, testdatadir, tmp_path):
tmpdir1 = Path(tmpdir) files = [{'old': tmp_path / "XRP_ETH-trades.json.gz",
files = [{'old': tmpdir1 / "XRP_ETH-trades.json.gz", 'new': tmp_path / "XRP_ETH-trades.json"},
'new': tmpdir1 / "XRP_ETH-trades.json"}, {'old': tmp_path / "XRP_OLD-trades.json.gz",
{'old': tmpdir1 / "XRP_OLD-trades.json.gz", 'new': tmp_path / "XRP_OLD-trades.json"},
'new': tmpdir1 / "XRP_OLD-trades.json"},
] ]
for file in files: for file in files:
copyfile(testdatadir / file['old'].name, file['old']) copyfile(testdatadir / file['old'].name, file['old'])
assert not file['new'].exists() assert not file['new'].exists()
default_conf['datadir'] = tmpdir1 default_conf['datadir'] = tmp_path
convert_trades_format(default_conf, convert_from='jsongz', convert_trades_format(default_conf, convert_from='jsongz',
convert_to='json', erase=False) convert_to='json', erase=False)
@ -362,16 +360,15 @@ def test_convert_trades_format(default_conf, testdatadir, tmpdir):
(['UNITTEST_USDT_USDT-1h-mark', 'XRP_USDT_USDT-1h-mark'], CandleType.MARK), (['UNITTEST_USDT_USDT-1h-mark', 'XRP_USDT_USDT-1h-mark'], CandleType.MARK),
(['XRP_USDT_USDT-1h-futures'], CandleType.FUTURES), (['XRP_USDT_USDT-1h-futures'], CandleType.FUTURES),
]) ])
def test_convert_ohlcv_format(default_conf, testdatadir, tmpdir, file_base, candletype): def test_convert_ohlcv_format(default_conf, testdatadir, tmp_path, file_base, candletype):
tmpdir1 = Path(tmpdir)
prependix = '' if candletype == CandleType.SPOT else 'futures/' prependix = '' if candletype == CandleType.SPOT else 'futures/'
files_orig = [] files_orig = []
files_temp = [] files_temp = []
files_new = [] files_new = []
for file in file_base: for file in file_base:
file_orig = testdatadir / f"{prependix}{file}.feather" file_orig = testdatadir / f"{prependix}{file}.feather"
file_temp = tmpdir1 / f"{prependix}{file}.feather" file_temp = tmp_path / f"{prependix}{file}.feather"
file_new = tmpdir1 / f"{prependix}{file}.json.gz" file_new = tmp_path / f"{prependix}{file}.json.gz"
IDataHandler.create_dir_if_needed(file_temp) IDataHandler.create_dir_if_needed(file_temp)
copyfile(file_orig, file_temp) copyfile(file_orig, file_temp)
@ -379,7 +376,7 @@ def test_convert_ohlcv_format(default_conf, testdatadir, tmpdir, file_base, cand
files_temp.append(file_temp) files_temp.append(file_temp)
files_new.append(file_new) files_new.append(file_new)
default_conf['datadir'] = tmpdir1 default_conf['datadir'] = tmp_path
default_conf['candle_types'] = [candletype] default_conf['candle_types'] = [candletype]
if candletype == CandleType.SPOT: if candletype == CandleType.SPOT:
@ -445,30 +442,29 @@ def test_reduce_dataframe_footprint():
assert df2['close_copy'].dtype == np.float32 assert df2['close_copy'].dtype == np.float32
def test_convert_trades_to_ohlcv(testdatadir, tmpdir, caplog): def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog):
tmpdir1 = Path(tmpdir)
pair = 'XRP/ETH' pair = 'XRP/ETH'
file1 = tmpdir1 / 'XRP_ETH-1m.feather' file1 = tmp_path / 'XRP_ETH-1m.feather'
file5 = tmpdir1 / 'XRP_ETH-5m.feather' file5 = tmp_path / 'XRP_ETH-5m.feather'
filetrades = tmpdir1 / 'XRP_ETH-trades.json.gz' filetrades = tmp_path / 'XRP_ETH-trades.json.gz'
copyfile(testdatadir / file1.name, file1) copyfile(testdatadir / file1.name, file1)
copyfile(testdatadir / file5.name, file5) copyfile(testdatadir / file5.name, file5)
copyfile(testdatadir / filetrades.name, filetrades) copyfile(testdatadir / filetrades.name, filetrades)
# Compare downloaded dataset with converted dataset # Compare downloaded dataset with converted dataset
dfbak_1m = load_pair_history(datadir=tmpdir1, timeframe="1m", pair=pair) dfbak_1m = load_pair_history(datadir=tmp_path, timeframe="1m", pair=pair)
dfbak_5m = load_pair_history(datadir=tmpdir1, timeframe="5m", pair=pair) dfbak_5m = load_pair_history(datadir=tmp_path, timeframe="5m", pair=pair)
tr = TimeRange.parse_timerange('20191011-20191012') tr = TimeRange.parse_timerange('20191011-20191012')
convert_trades_to_ohlcv([pair], timeframes=['1m', '5m'], convert_trades_to_ohlcv([pair], timeframes=['1m', '5m'],
data_format_trades='jsongz', data_format_trades='jsongz',
datadir=tmpdir1, timerange=tr, erase=True) datadir=tmp_path, timerange=tr, erase=True)
assert log_has("Deleting existing data for pair XRP/ETH, interval 1m.", caplog) assert log_has("Deleting existing data for pair XRP/ETH, interval 1m.", caplog)
# Load new data # Load new data
df_1m = load_pair_history(datadir=tmpdir1, timeframe="1m", pair=pair) df_1m = load_pair_history(datadir=tmp_path, timeframe="1m", pair=pair)
df_5m = load_pair_history(datadir=tmpdir1, timeframe="5m", pair=pair) df_5m = load_pair_history(datadir=tmp_path, timeframe="5m", pair=pair)
assert_frame_equal(dfbak_1m, df_1m, check_exact=True) assert_frame_equal(dfbak_1m, df_1m, check_exact=True)
assert_frame_equal(dfbak_5m, df_5m, check_exact=True) assert_frame_equal(dfbak_5m, df_5m, check_exact=True)
@ -477,5 +473,5 @@ def test_convert_trades_to_ohlcv(testdatadir, tmpdir, caplog):
convert_trades_to_ohlcv(['NoDatapair'], timeframes=['1m', '5m'], convert_trades_to_ohlcv(['NoDatapair'], timeframes=['1m', '5m'],
data_format_trades='jsongz', data_format_trades='jsongz',
datadir=tmpdir1, timerange=tr, erase=True) datadir=tmp_path, timerange=tr, erase=True)
assert log_has(msg, caplog) assert log_has(msg, caplog)

View File

@ -328,17 +328,16 @@ def test_hdf5datahandler_trades_load(testdatadir):
]) ])
def test_hdf5datahandler_ohlcv_load_and_resave( def test_hdf5datahandler_ohlcv_load_and_resave(
testdatadir, testdatadir,
tmpdir, tmp_path,
pair, pair,
timeframe, timeframe,
candle_type, candle_type,
candle_append, candle_append,
startdt, enddt startdt, enddt
): ):
tmpdir1 = Path(tmpdir) tmpdir2 = tmp_path
tmpdir2 = tmpdir1
if candle_type not in ('', 'spot'): if candle_type not in ('', 'spot'):
tmpdir2 = tmpdir1 / 'futures' tmpdir2 = tmp_path / 'futures'
tmpdir2.mkdir() tmpdir2.mkdir()
dh = get_datahandler(testdatadir, 'hdf5') dh = get_datahandler(testdatadir, 'hdf5')
ohlcv = dh._ohlcv_load(pair, timeframe, None, candle_type=candle_type) ohlcv = dh._ohlcv_load(pair, timeframe, None, candle_type=candle_type)
@ -348,7 +347,7 @@ def test_hdf5datahandler_ohlcv_load_and_resave(
file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.h5" file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.h5"
assert not file.is_file() assert not file.is_file()
dh1 = get_datahandler(tmpdir1, 'hdf5') dh1 = get_datahandler(tmp_path, 'hdf5')
dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type) dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type)
assert file.is_file() assert file.is_file()
@ -379,17 +378,16 @@ def test_hdf5datahandler_ohlcv_load_and_resave(
def test_generic_datahandler_ohlcv_load_and_resave( def test_generic_datahandler_ohlcv_load_and_resave(
datahandler, datahandler,
testdatadir, testdatadir,
tmpdir, tmp_path,
pair, pair,
timeframe, timeframe,
candle_type, candle_type,
candle_append, candle_append,
startdt, enddt startdt, enddt
): ):
tmpdir1 = Path(tmpdir) tmpdir2 = tmp_path
tmpdir2 = tmpdir1
if candle_type not in ('', 'spot'): if candle_type not in ('', 'spot'):
tmpdir2 = tmpdir1 / 'futures' tmpdir2 = tmp_path / 'futures'
tmpdir2.mkdir() tmpdir2.mkdir()
# Load data from one common file # Load data from one common file
dhbase = get_datahandler(testdatadir, 'feather') dhbase = get_datahandler(testdatadir, 'feather')
@ -403,7 +401,7 @@ def test_generic_datahandler_ohlcv_load_and_resave(
file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.{dh._get_file_extension()}" file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.{dh._get_file_extension()}"
assert not file.is_file() assert not file.is_file()
dh1 = get_datahandler(tmpdir1, datahandler) dh1 = get_datahandler(tmp_path, datahandler)
dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type) dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type)
assert file.is_file() assert file.is_file()
@ -459,15 +457,14 @@ def test_datahandler_trades_load(testdatadir, datahandler):
@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet']) @pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet'])
def test_datahandler_trades_store(testdatadir, tmpdir, datahandler): def test_datahandler_trades_store(testdatadir, tmp_path, datahandler):
tmpdir1 = Path(tmpdir)
dh = get_datahandler(testdatadir, datahandler) dh = get_datahandler(testdatadir, datahandler)
trades = dh.trades_load('XRP/ETH') trades = dh.trades_load('XRP/ETH')
dh1 = get_datahandler(tmpdir1, datahandler) dh1 = get_datahandler(tmp_path, datahandler)
dh1.trades_store('XRP/NEW', trades) dh1.trades_store('XRP/NEW', trades)
file = tmpdir1 / f'XRP_NEW-trades.{dh1._get_file_extension()}' file = tmp_path / f'XRP_NEW-trades.{dh1._get_file_extension()}'
assert file.is_file() assert file.is_file()
# Load trades back # Load trades back
trades_new = dh1.trades_load('XRP/NEW') trades_new = dh1.trades_load('XRP/NEW')

View File

@ -106,17 +106,16 @@ def test_load_data_startup_candles(mocker, testdatadir) -> None:
@pytest.mark.parametrize('candle_type', ['mark', '']) @pytest.mark.parametrize('candle_type', ['mark', ''])
def test_load_data_with_new_pair_1min(ohlcv_history_list, mocker, caplog, def test_load_data_with_new_pair_1min(ohlcv_history_list, mocker, caplog,
default_conf, tmpdir, candle_type) -> None: default_conf, tmp_path, candle_type) -> None:
""" """
Test load_pair_history() with 1 min timeframe Test load_pair_history() with 1 min timeframe
""" """
tmpdir1 = Path(tmpdir)
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list) mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list)
exchange = get_patched_exchange(mocker, default_conf) exchange = get_patched_exchange(mocker, default_conf)
file = tmpdir1 / 'MEME_BTC-1m.feather' file = tmp_path / 'MEME_BTC-1m.feather'
# do not download a new pair if refresh_pairs isn't set # do not download a new pair if refresh_pairs isn't set
load_pair_history(datadir=tmpdir1, timeframe='1m', pair='MEME/BTC', candle_type=candle_type) load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type)
assert not file.is_file() assert not file.is_file()
assert log_has( assert log_has(
f"No history for MEME/BTC, {candle_type}, 1m found. " f"No history for MEME/BTC, {candle_type}, 1m found. "
@ -124,10 +123,10 @@ def test_load_data_with_new_pair_1min(ohlcv_history_list, mocker, caplog,
) )
# download a new pair if refresh_pairs is set # download a new pair if refresh_pairs is set
refresh_data(datadir=tmpdir1, timeframe='1m', pairs=['MEME/BTC'], refresh_data(datadir=tmp_path, timeframe='1m', pairs=['MEME/BTC'],
exchange=exchange, candle_type=CandleType.SPOT exchange=exchange, candle_type=CandleType.SPOT
) )
load_pair_history(datadir=tmpdir1, timeframe='1m', pair='MEME/BTC', candle_type=candle_type) load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type)
assert file.is_file() assert file.is_file()
assert log_has_re( assert log_has_re(
r'\(0/1\) - Download history data for "MEME/BTC", 1m, ' r'\(0/1\) - Download history data for "MEME/BTC", 1m, '
@ -273,27 +272,26 @@ def test_download_pair_history(
ohlcv_history_list, ohlcv_history_list,
mocker, mocker,
default_conf, default_conf,
tmpdir, tmp_path,
candle_type, candle_type,
subdir, subdir,
file_tail file_tail
) -> None: ) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list) mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list)
exchange = get_patched_exchange(mocker, default_conf) exchange = get_patched_exchange(mocker, default_conf)
tmpdir1 = Path(tmpdir) file1_1 = tmp_path / f'{subdir}MEME_BTC-1m{file_tail}.feather'
file1_1 = tmpdir1 / f'{subdir}MEME_BTC-1m{file_tail}.feather' file1_5 = tmp_path / f'{subdir}MEME_BTC-5m{file_tail}.feather'
file1_5 = tmpdir1 / f'{subdir}MEME_BTC-5m{file_tail}.feather' file2_1 = tmp_path / f'{subdir}CFI_BTC-1m{file_tail}.feather'
file2_1 = tmpdir1 / f'{subdir}CFI_BTC-1m{file_tail}.feather' file2_5 = tmp_path / f'{subdir}CFI_BTC-5m{file_tail}.feather'
file2_5 = tmpdir1 / f'{subdir}CFI_BTC-5m{file_tail}.feather'
assert not file1_1.is_file() assert not file1_1.is_file()
assert not file2_1.is_file() assert not file2_1.is_file()
assert _download_pair_history(datadir=tmpdir1, exchange=exchange, assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC', pair='MEME/BTC',
timeframe='1m', timeframe='1m',
candle_type=candle_type) candle_type=candle_type)
assert _download_pair_history(datadir=tmpdir1, exchange=exchange, assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='CFI/BTC', pair='CFI/BTC',
timeframe='1m', timeframe='1m',
candle_type=candle_type) candle_type=candle_type)
@ -308,11 +306,11 @@ def test_download_pair_history(
assert not file1_5.is_file() assert not file1_5.is_file()
assert not file2_5.is_file() assert not file2_5.is_file()
assert _download_pair_history(datadir=tmpdir1, exchange=exchange, assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC', pair='MEME/BTC',
timeframe='5m', timeframe='5m',
candle_type=candle_type) candle_type=candle_type)
assert _download_pair_history(datadir=tmpdir1, exchange=exchange, assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='CFI/BTC', pair='CFI/BTC',
timeframe='5m', timeframe='5m',
candle_type=candle_type) candle_type=candle_type)
@ -340,13 +338,12 @@ def test_download_pair_history2(mocker, default_conf, testdatadir) -> None:
assert json_dump_mock.call_count == 3 assert json_dump_mock.call_count == 3
def test_download_backtesting_data_exception(mocker, caplog, default_conf, tmpdir) -> None: def test_download_backtesting_data_exception(mocker, caplog, default_conf, tmp_path) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv', mocker.patch(f'{EXMS}.get_historic_ohlcv',
side_effect=Exception('File Error')) side_effect=Exception('File Error'))
tmpdir1 = Path(tmpdir)
exchange = get_patched_exchange(mocker, default_conf) exchange = get_patched_exchange(mocker, default_conf)
assert not _download_pair_history(datadir=tmpdir1, exchange=exchange, assert not _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC', pair='MEME/BTC',
timeframe='1m', candle_type='spot') timeframe='1m', candle_type='spot')
assert log_has('Failed to download history data for pair: "MEME/BTC", timeframe: 1m.', caplog) assert log_has('Failed to download history data for pair: "MEME/BTC", timeframe: 1m.', caplog)
@ -570,16 +567,15 @@ def test_refresh_backtest_trades_data(mocker, default_conf, markets, caplog, tes
def test_download_trades_history(trades_history, mocker, default_conf, testdatadir, caplog, def test_download_trades_history(trades_history, mocker, default_conf, testdatadir, caplog,
tmpdir, time_machine) -> None: tmp_path, time_machine) -> None:
start_dt = dt_utc(2023, 1, 1) start_dt = dt_utc(2023, 1, 1)
time_machine.move_to(start_dt, tick=False) time_machine.move_to(start_dt, tick=False)
tmpdir1 = Path(tmpdir)
ght_mock = MagicMock(side_effect=lambda pair, *args, **kwargs: (pair, trades_history)) ght_mock = MagicMock(side_effect=lambda pair, *args, **kwargs: (pair, trades_history))
mocker.patch(f'{EXMS}.get_historic_trades', ght_mock) mocker.patch(f'{EXMS}.get_historic_trades', ght_mock)
exchange = get_patched_exchange(mocker, default_conf) exchange = get_patched_exchange(mocker, default_conf)
file1 = tmpdir1 / 'ETH_BTC-trades.json.gz' file1 = tmp_path / 'ETH_BTC-trades.json.gz'
data_handler = get_datahandler(tmpdir1, data_format='jsongz') data_handler = get_datahandler(tmp_path, data_format='jsongz')
assert not file1.is_file() assert not file1.is_file()
@ -614,7 +610,7 @@ def test_download_trades_history(trades_history, mocker, default_conf, testdatad
pair='ETH/BTC') pair='ETH/BTC')
assert log_has_re('Failed to download historic trades for pair: "ETH/BTC".*', caplog) assert log_has_re('Failed to download historic trades for pair: "ETH/BTC".*', caplog)
file2 = tmpdir1 / 'XRP_ETH-trades.json.gz' file2 = tmp_path / 'XRP_ETH-trades.json.gz'
copyfile(testdatadir / file2.name, file2) copyfile(testdatadir / file2.name, file2)
ght_mock.reset_mock() ght_mock.reset_mock()

View File

@ -1,5 +1,4 @@
from datetime import datetime, timezone from datetime import datetime, timezone
from pathlib import Path
from shutil import copytree from shutil import copytree
from unittest.mock import PropertyMock from unittest.mock import PropertyMock
@ -11,7 +10,7 @@ from freqtrade.exceptions import OperationalException
from tests.conftest import EXMS, log_has, log_has_re, patch_exchange from tests.conftest import EXMS, log_has, log_has_re, patch_exchange
def test_import_kraken_trades_from_csv(testdatadir, tmpdir, caplog, default_conf_usdt, mocker): def test_import_kraken_trades_from_csv(testdatadir, tmp_path, caplog, default_conf_usdt, mocker):
with pytest.raises(OperationalException, match="This function is only for the kraken exchange"): with pytest.raises(OperationalException, match="This function is only for the kraken exchange"):
import_kraken_trades_from_csv(default_conf_usdt, 'feather') import_kraken_trades_from_csv(default_conf_usdt, 'feather')
@ -21,10 +20,9 @@ def test_import_kraken_trades_from_csv(testdatadir, tmpdir, caplog, default_conf
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={ mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={
'BCH/EUR': {'symbol': 'BCH/EUR', 'id': 'BCHEUR', 'altname': 'BCHEUR'}, 'BCH/EUR': {'symbol': 'BCH/EUR', 'id': 'BCHEUR', 'altname': 'BCHEUR'},
})) }))
tmpdir1 = Path(tmpdir) dstfile = tmp_path / 'BCH_EUR-trades.feather'
dstfile = tmpdir1 / 'BCH_EUR-trades.feather'
assert not dstfile.is_file() assert not dstfile.is_file()
default_conf_usdt['datadir'] = tmpdir1 default_conf_usdt['datadir'] = tmp_path
# There's 2 files in this tree, containing a total of 2 days. # There's 2 files in this tree, containing a total of 2 days.
# tests/testdata/kraken/ # tests/testdata/kraken/
# └── trades_csv # └── trades_csv
@ -32,7 +30,7 @@ def test_import_kraken_trades_from_csv(testdatadir, tmpdir, caplog, default_conf
# └── incremental_q2 # └── incremental_q2
# └── BCHEUR.csv <-- 2023-01-02 # └── BCHEUR.csv <-- 2023-01-02
copytree(testdatadir / 'kraken/trades_csv', tmpdir1 / 'trades_csv') copytree(testdatadir / 'kraken/trades_csv', tmp_path / 'trades_csv')
import_kraken_trades_from_csv(default_conf_usdt, 'feather') import_kraken_trades_from_csv(default_conf_usdt, 'feather')
assert log_has("Found csv files for BCHEUR.", caplog) assert log_has("Found csv files for BCHEUR.", caplog)
@ -40,7 +38,7 @@ def test_import_kraken_trades_from_csv(testdatadir, tmpdir, caplog, default_conf
assert dstfile.is_file() assert dstfile.is_file()
dh = get_datahandler(tmpdir1, 'feather') dh = get_datahandler(tmp_path, 'feather')
trades = dh.trades_load('BCH_EUR') trades = dh.trades_load('BCH_EUR')
assert len(trades) == 340 assert len(trades) == 340

View File

@ -1851,7 +1851,7 @@ def test_fetch_bids_asks(default_conf, mocker):
@pytest.mark.parametrize("exchange_name", EXCHANGES) @pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_get_tickers(default_conf, mocker, exchange_name): def test_get_tickers(default_conf, mocker, exchange_name, caplog):
api_mock = MagicMock() api_mock = MagicMock()
tick = {'ETH/BTC': { tick = {'ETH/BTC': {
'symbol': 'ETH/BTC', 'symbol': 'ETH/BTC',
@ -1900,6 +1900,14 @@ def test_get_tickers(default_conf, mocker, exchange_name):
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
exchange.get_tickers() exchange.get_tickers()
caplog.clear()
api_mock.fetch_tickers = MagicMock(side_effect=[ccxt.BadSymbol("SomeSymbol"), []])
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
x = exchange.get_tickers()
assert x == []
assert log_has_re(r'Could not load tickers due to BadSymbol\..*SomeSymbol', caplog)
caplog.clear()
api_mock.fetch_tickers = MagicMock(return_value={}) api_mock.fetch_tickers = MagicMock(return_value={})
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name) exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
exchange.get_tickers() exchange.get_tickers()

View File

@ -18,7 +18,7 @@ from tests.conftest import log_has_re
def test_check_exchange(default_conf, caplog) -> None: def test_check_exchange(default_conf, caplog) -> None:
# Test an officially supported by Freqtrade team exchange # Test an officially supported by Freqtrade team exchange
default_conf['runmode'] = RunMode.DRY_RUN default_conf['runmode'] = RunMode.DRY_RUN
default_conf.get('exchange').update({'name': 'BITTREX'}) default_conf.get('exchange').update({'name': 'BINANCE'})
assert check_exchange(default_conf) assert check_exchange(default_conf)
assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.", assert log_has_re(r"Exchange .* is officially supported by the Freqtrade development team\.",
caplog) caplog)
@ -41,14 +41,14 @@ def test_check_exchange(default_conf, caplog) -> None:
caplog.clear() caplog.clear()
# Test an officially supported by Freqtrade team exchange - with remapping # Test an officially supported by Freqtrade team exchange - with remapping
default_conf.get('exchange').update({'name': 'okex'}) default_conf.get('exchange').update({'name': 'okx'})
assert check_exchange(default_conf) assert check_exchange(default_conf)
assert log_has_re( assert log_has_re(
r"Exchange \"okex\" is officially supported by the Freqtrade development team\.", r"Exchange \"okx\" is officially supported by the Freqtrade development team\.",
caplog) caplog)
caplog.clear() caplog.clear()
# Test an available exchange, supported by ccxt # Test an available exchange, supported by ccxt
default_conf.get('exchange').update({'name': 'huobipro'}) default_conf.get('exchange').update({'name': 'huobijp'})
assert check_exchange(default_conf) assert check_exchange(default_conf)
assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, " assert log_has_re(r"Exchange .* is known to the the ccxt library, available for the bot, "
r"but not officially supported " r"but not officially supported "

View File

@ -1,5 +1,4 @@
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, PropertyMock from unittest.mock import AsyncMock, MagicMock, PropertyMock
import ccxt import ccxt
@ -269,9 +268,9 @@ def test_additional_exchange_init_okx(default_conf, mocker):
"additional_exchange_init", "fetch_accounts") "additional_exchange_init", "fetch_accounts")
def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmpdir, caplog, time_machine): def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmp_path, caplog, time_machine):
default_conf['datadir'] = Path(tmpdir) default_conf['datadir'] = tmp_path
# fd_mock = mocker.patch('freqtrade.exchange.exchange.file_dump_json') # fd_mock = mocker.patch('freqtrade.exchange.exchange.file_dump_json')
api_mock = MagicMock() api_mock = MagicMock()
type(api_mock).has = PropertyMock(return_value={ type(api_mock).has = PropertyMock(return_value={

View File

@ -227,6 +227,7 @@ EXCHANGES = {
'timeframe': '1h', 'timeframe': '1h',
'futures_pair': 'BTC/USDT:USDT', 'futures_pair': 'BTC/USDT:USDT',
'futures': True, 'futures': True,
'orderbook_max_entries': 50,
'leverage_tiers_public': True, 'leverage_tiers_public': True,
'leverage_in_spot_market': True, 'leverage_in_spot_market': True,
'sample_order': [ 'sample_order': [
@ -247,6 +248,13 @@ EXCHANGES = {
} }
] ]
}, },
'bitmart': {
'pair': 'BTC/USDT',
'stake_currency': 'USDT',
'hasQuoteVolume': True,
'timeframe': '1h',
'orderbook_max_entries': 50,
},
'huobi': { 'huobi': {
'pair': 'ETH/BTC', 'pair': 'ETH/BTC',
'stake_currency': 'BTC', 'stake_currency': 'BTC',

View File

@ -133,6 +133,7 @@ class TestCCXTExchange:
exch, exchangename = exchange exch, exchangename = exchange
pair = EXCHANGES[exchangename]['pair'] pair = EXCHANGES[exchangename]['pair']
l2 = exch.fetch_l2_order_book(pair) l2 = exch.fetch_l2_order_book(pair)
orderbook_max_entries = EXCHANGES[exchangename].get('orderbook_max_entries')
assert 'asks' in l2 assert 'asks' in l2
assert 'bids' in l2 assert 'bids' in l2
assert len(l2['asks']) >= 1 assert len(l2['asks']) >= 1
@ -143,7 +144,7 @@ class TestCCXTExchange:
# TODO: Gate is unstable here at the moment, ignoring the limit partially. # TODO: Gate is unstable here at the moment, ignoring the limit partially.
return return
for val in [1, 2, 5, 25, 50, 100]: for val in [1, 2, 5, 25, 50, 100]:
if val > 50 and exchangename == 'bybit': if orderbook_max_entries and val > orderbook_max_entries:
continue continue
l2 = exch.fetch_l2_order_book(pair, val) l2 = exch.fetch_l2_order_book(pair, val)
if not l2_limit_range or val in l2_limit_range: if not l2_limit_range or val in l2_limit_range:

View File

@ -21,13 +21,13 @@ def is_mac() -> bool:
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def freqai_conf(default_conf, tmpdir): def freqai_conf(default_conf, tmp_path):
freqaiconf = deepcopy(default_conf) freqaiconf = deepcopy(default_conf)
freqaiconf.update( freqaiconf.update(
{ {
"datadir": Path(default_conf["datadir"]), "datadir": Path(default_conf["datadir"]),
"strategy": "freqai_test_strat", "strategy": "freqai_test_strat",
"user_data_dir": Path(tmpdir), "user_data_dir": tmp_path,
"strategy-path": "freqtrade/tests/strategy/strats", "strategy-path": "freqtrade/tests/strategy/strats",
"freqaimodel": "LightGBMRegressor", "freqaimodel": "LightGBMRegressor",
"freqaimodel_path": "freqai/prediction_models", "freqaimodel_path": "freqai/prediction_models",

View File

@ -500,14 +500,14 @@ def test_get_required_data_timerange(mocker, freqai_conf):
assert (time_range.stopts - time_range.startts) == 177300 assert (time_range.stopts - time_range.startts) == 177300
def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmpdir): def test_download_all_data_for_training(mocker, freqai_conf, caplog, tmp_path):
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
strategy = get_patched_freqai_strategy(mocker, freqai_conf) strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf)
pairlist = PairListManager(exchange, freqai_conf) pairlist = PairListManager(exchange, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange, pairlist) strategy.dp = DataProvider(freqai_conf, exchange, pairlist)
freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist'] freqai_conf['pairs'] = freqai_conf['exchange']['pair_whitelist']
freqai_conf['datadir'] = Path(tmpdir) freqai_conf['datadir'] = tmp_path
download_all_data_for_training(strategy.dp, freqai_conf) download_all_data_for_training(strategy.dp, freqai_conf)
assert log_has_re( assert log_has_re(

View File

@ -193,8 +193,8 @@ def test_start_no_hyperopt_allowed(mocker, hyperopt_conf, caplog) -> None:
start_hyperopt(pargs) start_hyperopt(pargs)
def test_start_no_data(mocker, hyperopt_conf, tmpdir) -> None: def test_start_no_data(mocker, hyperopt_conf, tmp_path) -> None:
hyperopt_conf['user_data_dir'] = Path(tmpdir) hyperopt_conf['user_data_dir'] = tmp_path
patched_configuration_load_config_file(mocker, hyperopt_conf) patched_configuration_load_config_file(mocker, hyperopt_conf)
mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame)) mocker.patch('freqtrade.data.history.load_pair_history', MagicMock(return_value=pd.DataFrame))
mocker.patch( mocker.patch(
@ -310,6 +310,8 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
'freqtrade.optimize.hyperopt.get_timerange', 'freqtrade.optimize.hyperopt.get_timerange',
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))) MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13)))
) )
# Dummy-reduce points to ensure scikit-learn is forced to generate new values
mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2)
parallel = mocker.patch( parallel = mocker.patch(
'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel', 'freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel',
@ -857,14 +859,16 @@ def test_simplified_interface_failed(mocker, hyperopt_conf, space) -> None:
hyperopt.start() hyperopt.start()
def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmpdir, fee) -> None: def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmp_path, fee) -> None:
patch_exchange(mocker) patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) # Dummy-reduce points to ensure scikit-learn is forced to generate new values
mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2)
(tmp_path / 'hyperopt_results').mkdir(parents=True)
# No hyperopt needed # No hyperopt needed
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['all'], 'spaces': ['all'],
}) })
@ -897,17 +901,19 @@ def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmpdir, fee) -> None:
hyperopt.get_optimizer([], 2) hyperopt.get_optimizer([], 2)
def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmpdir, fee) -> None: def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmp_path, fee) -> None:
mocker.patch(f'{EXMS}.validate_config', MagicMock()) mocker.patch(f'{EXMS}.validate_config', MagicMock())
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
mocker.patch(f'{EXMS}._load_markets') mocker.patch(f'{EXMS}._load_markets')
mocker.patch(f'{EXMS}.markets', mocker.patch(f'{EXMS}.markets',
PropertyMock(return_value=get_markets())) PropertyMock(return_value=get_markets()))
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) (tmp_path / 'hyperopt_results').mkdir(parents=True)
# Dummy-reduce points to ensure scikit-learn is forced to generate new values
mocker.patch('freqtrade.optimize.hyperopt.INITIAL_POINTS', 2)
# No hyperopt needed # No hyperopt needed
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['all'], 'spaces': ['all'],
# Enforce parallelity # Enforce parallelity
@ -938,14 +944,14 @@ def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmpdir,
hyperopt.start() hyperopt.start()
def test_in_strategy_auto_hyperopt_per_epoch(mocker, hyperopt_conf, tmpdir, fee) -> None: def test_in_strategy_auto_hyperopt_per_epoch(mocker, hyperopt_conf, tmp_path, fee) -> None:
patch_exchange(mocker) patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) (tmp_path / 'hyperopt_results').mkdir(parents=True)
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['all'], 'spaces': ['all'],
'epochs': 3, 'epochs': 3,
@ -995,15 +1001,15 @@ def test_SKDecimal():
assert space.transform([1.5, 1.6]) == [150, 160] assert space.transform([1.5, 1.6]) == [150, 160]
def test_stake_amount_unlimited_max_open_trades(mocker, hyperopt_conf, tmpdir, fee) -> None: def test_stake_amount_unlimited_max_open_trades(mocker, hyperopt_conf, tmp_path, fee) -> None:
# This test is to ensure that unlimited max_open_trades are ignored for the backtesting # This test is to ensure that unlimited max_open_trades are ignored for the backtesting
# if we have an unlimited stake amount # if we have an unlimited stake amount
patch_exchange(mocker) patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) (tmp_path / 'hyperopt_results').mkdir(parents=True)
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['trades'], 'spaces': ['trades'],
'stake_amount': 'unlimited' 'stake_amount': 'unlimited'
@ -1023,15 +1029,15 @@ def test_stake_amount_unlimited_max_open_trades(mocker, hyperopt_conf, tmpdir, f
assert hyperopt.backtesting.strategy.max_open_trades == 1 assert hyperopt.backtesting.strategy.max_open_trades == 1
def test_max_open_trades_dump(mocker, hyperopt_conf, tmpdir, fee, capsys) -> None: def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> None:
# This test is to ensure that after hyperopting, max_open_trades is never # This test is to ensure that after hyperopting, max_open_trades is never
# saved as inf in the output json params # saved as inf in the output json params
patch_exchange(mocker) patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) (tmp_path / 'hyperopt_results').mkdir(parents=True)
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['trades'], 'spaces': ['trades'],
}) })
@ -1069,16 +1075,16 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmpdir, fee, capsys) -> Non
assert '"max_open_trades":-1' in out assert '"max_open_trades":-1' in out
def test_max_open_trades_consistency(mocker, hyperopt_conf, tmpdir, fee) -> None: def test_max_open_trades_consistency(mocker, hyperopt_conf, tmp_path, fee) -> None:
# This test is to ensure that max_open_trades is the same across all functions needing it # This test is to ensure that max_open_trades is the same across all functions needing it
# after it has been changed from the hyperopt # after it has been changed from the hyperopt
patch_exchange(mocker) patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_fee', return_value=0) mocker.patch(f'{EXMS}.get_fee', return_value=0)
(Path(tmpdir) / 'hyperopt_results').mkdir(parents=True) (tmp_path / 'hyperopt_results').mkdir(parents=True)
hyperopt_conf.update({ hyperopt_conf.update({
'strategy': 'HyperoptableStrategy', 'strategy': 'HyperoptableStrategy',
'user_data_dir': Path(tmpdir), 'user_data_dir': tmp_path,
'hyperopt_random_state': 42, 'hyperopt_random_state': 42,
'spaces': ['trades'], 'spaces': ['trades'],
'stake_amount': 'unlimited', 'stake_amount': 'unlimited',

View File

@ -19,9 +19,9 @@ def create_results() -> List[Dict]:
return [{'loss': 1, 'result': 'foo', 'params': {}, 'is_best': True}] return [{'loss': 1, 'result': 'foo', 'params': {}, 'is_best': True}]
def test_save_results_saves_epochs(hyperopt, tmpdir, caplog) -> None: def test_save_results_saves_epochs(hyperopt, tmp_path, caplog) -> None:
hyperopt.results_file = Path(tmpdir / 'ut_results.fthypt') hyperopt.results_file = tmp_path / 'ut_results.fthypt'
hyperopt_epochs = HyperoptTools.load_filtered_results(hyperopt.results_file, {}) hyperopt_epochs = HyperoptTools.load_filtered_results(hyperopt.results_file, {})
assert log_has_re("Hyperopt file .* not found.", caplog) assert log_has_re("Hyperopt file .* not found.", caplog)
@ -182,9 +182,9 @@ def test_get_strategy_filename(default_conf):
assert x is None assert x is None
def test_export_params(tmpdir): def test_export_params(tmp_path):
filename = Path(tmpdir) / f"{CURRENT_TEST_STRATEGY}.json" filename = tmp_path / f"{CURRENT_TEST_STRATEGY}.json"
assert not filename.is_file() assert not filename.is_file()
params = { params = {
"params_details": { "params_details": {
@ -231,11 +231,11 @@ def test_export_params(tmpdir):
assert "max_open_trades" in content["params"] assert "max_open_trades" in content["params"]
def test_try_export_params(default_conf, tmpdir, caplog, mocker): def test_try_export_params(default_conf, tmp_path, caplog, mocker):
default_conf['disableparamexport'] = False default_conf['disableparamexport'] = False
export_mock = mocker.patch("freqtrade.optimize.hyperopt_tools.HyperoptTools.export_params") export_mock = mocker.patch("freqtrade.optimize.hyperopt_tools.HyperoptTools.export_params")
filename = Path(tmpdir) / f"{CURRENT_TEST_STRATEGY}.json" filename = tmp_path / f"{CURRENT_TEST_STRATEGY}.json"
assert not filename.is_file() assert not filename.is_file()
params = { params = {
"params_details": { "params_details": {

View File

@ -74,7 +74,7 @@ def test_text_table_bt_results():
assert text_table_bt_results(pair_results, stake_currency='BTC') == result_str assert text_table_bt_results(pair_results, stake_currency='BTC') == result_str
def test_generate_backtest_stats(default_conf, testdatadir, tmpdir): def test_generate_backtest_stats(default_conf, testdatadir, tmp_path):
default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
StrategyResolver.load_strategy(default_conf) StrategyResolver.load_strategy(default_conf)
@ -185,8 +185,8 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
assert strat_stats['pairlist'] == ['UNITTEST/BTC'] assert strat_stats['pairlist'] == ['UNITTEST/BTC']
# Test storing stats # Test storing stats
filename = Path(tmpdir / 'btresult.json') filename = tmp_path / 'btresult.json'
filename_last = Path(tmpdir / LAST_BT_RESULT_FN) filename_last = tmp_path / LAST_BT_RESULT_FN
_backup_file(filename_last, copy_file=True) _backup_file(filename_last, copy_file=True)
assert not filename.is_file() assert not filename.is_file()
@ -196,7 +196,7 @@ def test_generate_backtest_stats(default_conf, testdatadir, tmpdir):
last_fn = get_latest_backtest_filename(filename_last.parent) last_fn = get_latest_backtest_filename(filename_last.parent)
assert re.match(r"btresult-.*\.json", last_fn) assert re.match(r"btresult-.*\.json", last_fn)
filename1 = Path(tmpdir / last_fn) filename1 = tmp_path / last_fn
assert filename1.is_file() assert filename1.is_file()
content = filename1.read_text() content = filename1.read_text()
assert 'max_drawdown_account' in content assert 'max_drawdown_account' in content
@ -254,14 +254,14 @@ def test_store_backtest_candles(testdatadir, mocker):
dump_mock.reset_mock() dump_mock.reset_mock()
def test_write_read_backtest_candles(tmpdir): def test_write_read_backtest_candles(tmp_path):
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
# test directory exporting # test directory exporting
sample_date = '2022_01_01_15_05_13' sample_date = '2022_01_01_15_05_13'
store_backtest_analysis_results(Path(tmpdir), candle_dict, {}, sample_date) store_backtest_analysis_results(tmp_path, candle_dict, {}, sample_date)
stored_file = Path(tmpdir / f'backtest-result-{sample_date}_signals.pkl') stored_file = tmp_path / f'backtest-result-{sample_date}_signals.pkl'
with stored_file.open("rb") as scp: with stored_file.open("rb") as scp:
pickled_signal_candles = joblib.load(scp) pickled_signal_candles = joblib.load(scp)
@ -273,9 +273,9 @@ def test_write_read_backtest_candles(tmpdir):
_clean_test_file(stored_file) _clean_test_file(stored_file)
# test file exporting # test file exporting
filename = Path(tmpdir / 'testresult') filename = tmp_path / 'testresult'
store_backtest_analysis_results(filename, candle_dict, {}, sample_date) store_backtest_analysis_results(filename, candle_dict, {}, sample_date)
stored_file = Path(tmpdir / f'testresult-{sample_date}_signals.pkl') stored_file = tmp_path / f'testresult-{sample_date}_signals.pkl'
with stored_file.open("rb") as scp: with stored_file.open("rb") as scp:
pickled_signal_candles = joblib.load(scp) pickled_signal_candles = joblib.load(scp)

View File

@ -29,15 +29,15 @@ def test_init_create_session(default_conf):
assert 'scoped_session' in type(Trade.session).__name__ assert 'scoped_session' in type(Trade.session).__name__
def test_init_custom_db_url(default_conf, tmpdir): def test_init_custom_db_url(default_conf, tmp_path):
# Update path to a value other than default, but still in-memory # Update path to a value other than default, but still in-memory
filename = f"{tmpdir}/freqtrade2_test.sqlite" filename = tmp_path / "freqtrade2_test.sqlite"
assert not Path(filename).is_file() assert not filename.is_file()
default_conf.update({'db_url': f'sqlite:///{filename}'}) default_conf.update({'db_url': f'sqlite:///{filename}'})
init_db(default_conf['db_url']) init_db(default_conf['db_url'])
assert Path(filename).is_file() assert filename.is_file()
r = Trade.session.execute(text("PRAGMA journal_mode")) r = Trade.session.execute(text("PRAGMA journal_mode"))
assert r.first() == ('wal',) assert r.first() == ('wal',)

View File

@ -2302,6 +2302,101 @@ def test_recalc_trade_from_orders(fee):
assert pytest.approx(trade.open_trade_value) == o1_trade_val + o2_trade_val + o3_trade_val assert pytest.approx(trade.open_trade_value) == o1_trade_val + o2_trade_val + o3_trade_val
@pytest.mark.usefixtures("init_persistence")
def test_recalc_trade_from_orders_kucoin():
# Taken from https://github.com/freqtrade/freqtrade/issues/9346
o1_amount = 11511963.8634448908
o2_amount = 11750101.7743937783
o3_amount = 23262065.6378386617 # Exit amount - barely doesn't even out
res = o1_amount + o2_amount - o3_amount
assert res > 0.0
assert res < 0.1
o1_rate = 0.000029901
o2_rate = 0.000029295
o3_rate = 0.000029822
o1_cost = o1_amount * o1_rate
trade = Trade(
pair='FLOKI/USDT',
stake_amount=o1_cost,
open_date=dt_now() - timedelta(hours=2),
amount=o1_amount,
fee_open=0.001,
fee_close=0.001,
exchange='binance',
open_rate=o1_rate,
max_rate=o1_rate,
leverage=1,
)
# Check with 1 order
order1 = Order(
ft_order_side='buy',
ft_pair=trade.pair,
ft_is_open=False,
status="closed",
symbol=trade.pair,
order_type="market",
side="buy",
price=o1_rate,
average=o1_rate,
filled=o1_amount,
remaining=0,
cost=o1_cost,
order_date=trade.open_date,
order_filled_date=trade.open_date,
)
trade.orders.append(order1)
order2 = Order(
ft_order_side='buy',
ft_pair=trade.pair,
ft_is_open=False,
status="closed",
symbol=trade.pair,
order_type="market",
side="buy",
price=o2_rate,
average=o2_rate,
filled=o2_amount,
remaining=0,
cost=o2_amount * o2_rate,
order_date=trade.open_date,
order_filled_date=trade.open_date,
)
trade.orders.append(order2)
trade.recalc_trade_from_orders()
assert trade.amount == o1_amount + o2_amount
profit = trade.calculate_profit(o3_rate)
assert profit.profit_abs == pytest.approx(3.90069871)
assert profit.profit_ratio == pytest.approx(0.00566035)
order3 = Order(
ft_order_side='sell',
ft_pair=trade.pair,
ft_is_open=False,
status="closed",
symbol=trade.pair,
order_type="market",
side="sell",
price=o3_rate,
average=o3_rate,
filled=o3_amount,
remaining=0,
cost=o2_amount * o2_rate,
order_date=trade.open_date,
order_filled_date=trade.open_date,
)
trade.orders.append(order3)
trade.update_trade(order3)
assert trade.is_open is False
# Trade closed correctly - but left a minimal amount.
assert trade.amount == 8e-09
assert pytest.approx(trade.close_profit_abs) == 3.90069871
assert pytest.approx(trade.close_profit) == 0.00566035
@pytest.mark.parametrize('is_short', [True, False]) @pytest.mark.parametrize('is_short', [True, False])
def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short): def test_recalc_trade_from_orders_ignores_bad_orders(fee, is_short):
@ -2581,9 +2676,9 @@ def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open):
'orders': [ 'orders': [
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
(('sell', 50, 12), (150.0, 12.5, 1875.0, -25.0, -25.0, -0.04)), (('sell', 50, 12), (150.0, 12.5, 1875.0, -25.0, -25.0, -0.01)),
(('sell', 100, 20), (50.0, 12.5, 625.0, 725.0, 750.0, 0.60)), (('sell', 100, 20), (50.0, 12.5, 625.0, 725.0, 750.0, 0.29)),
(('sell', 50, 5), (50.0, 12.5, 625.0, 350.0, -375.0, -0.60)), (('sell', 50, 5), (50.0, 12.5, 625.0, 350.0, -375.0, 0.14)),
], ],
'end_profit': 350.0, 'end_profit': 350.0,
'end_profit_ratio': 0.14, 'end_profit_ratio': 0.14,
@ -2593,9 +2688,9 @@ def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open):
'orders': [ 'orders': [
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
(('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.044788)), (('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)),
(('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.59201995)), (('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)),
(('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, -377.1875, -0.60199501)), (('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, -377.1875, 0.1343142)),
], ],
'end_profit': 336.625, 'end_profit': 336.625,
'end_profit_ratio': 0.1343142, 'end_profit_ratio': 0.1343142,
@ -2605,10 +2700,10 @@ def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open):
'orders': [ 'orders': [
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
(('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 1.189027)), (('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)),
(('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 1.189027)), (('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)),
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.7186579)), (('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)),
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 1787.25, 1.08048062)), (('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 1787.25, 0.9747170)),
], ],
'end_profit': 3175.75, 'end_profit': 3175.75,
'end_profit_ratio': 0.9747170, 'end_profit_ratio': 0.9747170,
@ -2619,10 +2714,10 @@ def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open):
'orders': [ 'orders': [
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
(('sell', 100, 11), (100.0, 5.0, 500.0, 600.0, 600.0, 1.2)), (('sell', 100, 11), (100.0, 5.0, 500.0, 600.0, 600.0, 0.6)),
(('buy', 150, 15), (250.0, 11.0, 2750.0, 600.0, 600.0, 1.2)), (('buy', 150, 15), (250.0, 11.0, 2750.0, 600.0, 600.0, 0.6)),
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1400.0, 800.0, 0.72727273)), (('sell', 100, 19), (150.0, 11.0, 1650.0, 1400.0, 800.0, 0.43076923)),
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3200.0, 1800.0, 1.09090909)), (('sell', 150, 23), (150.0, 11.0, 1650.0, 3200.0, 1800.0, 0.98461538)),
], ],
'end_profit': 3200.0, 'end_profit': 3200.0,
'end_profit_ratio': 0.98461538, 'end_profit_ratio': 0.98461538,
@ -2632,10 +2727,10 @@ def test_order_to_ccxt(limit_buy_order_open, limit_sell_order_usdt_open):
'orders': [ 'orders': [
(('buy', 100, 8), (100.0, 8.0, 800.0, 0.0, None, None)), (('buy', 100, 8), (100.0, 8.0, 800.0, 0.0, None, None)),
(('buy', 100, 9), (200.0, 8.5, 1700.0, 0.0, None, None)), (('buy', 100, 9), (200.0, 8.5, 1700.0, 0.0, None, None)),
(('sell', 100, 10), (100.0, 8.5, 850.0, 150.0, 150.0, 0.17647059)), (('sell', 100, 10), (100.0, 8.5, 850.0, 150.0, 150.0, 0.08823529)),
(('buy', 150, 11), (250.0, 10, 2500.0, 150.0, 150.0, 0.17647059)), (('buy', 150, 11), (250.0, 10, 2500.0, 150.0, 150.0, 0.08823529)),
(('sell', 100, 12), (150.0, 10.0, 1500.0, 350.0, 200.0, 0.2)), (('sell', 100, 12), (150.0, 10.0, 1500.0, 350.0, 200.0, 0.1044776)),
(('sell', 150, 14), (150.0, 10.0, 1500.0, 950.0, 600.0, 0.40)), (('sell', 150, 14), (150.0, 10.0, 1500.0, 950.0, 600.0, 0.283582)),
], ],
'end_profit': 950.0, 'end_profit': 950.0,
'end_profit_ratio': 0.283582, 'end_profit_ratio': 0.283582,

View File

@ -242,7 +242,7 @@ def test_stoploss_guard_perpair(mocker, default_conf, fee, caplog, only_per_pair
# 2nd Trade that counts with correct pair # 2nd Trade that counts with correct pair
generate_mock_trade( generate_mock_trade(
pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value, pair, fee.return_value, False, exit_reason=ExitType.STOP_LOSS.value,
min_ago_open=180, min_ago_close=30, profit_rate=0.9, is_short=is_short min_ago_open=180, min_ago_close=31, profit_rate=0.9, is_short=is_short
) )
freqtrade.protections.stop_per_pair(pair) freqtrade.protections.stop_per_pair(pair)

View File

@ -1063,6 +1063,63 @@ def test_api_performance(botclient, fee):
'profit_ratio': -0.05570419, 'profit_abs': -0.1150375}] 'profit_ratio': -0.05570419, 'profit_abs': -0.1150375}]
def test_api_entries(botclient, fee):
ftbot, client = botclient
patch_get_signal(ftbot)
# Empty
rc = client_get(client, f"{BASE_URI}/entries")
assert_response(rc)
assert len(rc.json()) == 0
create_mock_trades(fee)
rc = client_get(client, f"{BASE_URI}/entries")
assert_response(rc)
response = rc.json()
assert len(response) == 2
resp = response[0]
assert resp['enter_tag'] == 'TEST1'
assert resp['count'] == 1
assert resp['profit_pct'] == 0.5
def test_api_exits(botclient, fee):
ftbot, client = botclient
patch_get_signal(ftbot)
# Empty
rc = client_get(client, f"{BASE_URI}/exits")
assert_response(rc)
assert len(rc.json()) == 0
create_mock_trades(fee)
rc = client_get(client, f"{BASE_URI}/exits")
assert_response(rc)
response = rc.json()
assert len(response) == 2
resp = response[0]
assert resp['exit_reason'] == 'sell_signal'
assert resp['count'] == 1
assert resp['profit_pct'] == 0.5
def test_api_mix_tag(botclient, fee):
ftbot, client = botclient
patch_get_signal(ftbot)
# Empty
rc = client_get(client, f"{BASE_URI}/mix_tags")
assert_response(rc)
assert len(rc.json()) == 0
create_mock_trades(fee)
rc = client_get(client, f"{BASE_URI}/mix_tags")
assert_response(rc)
response = rc.json()
assert len(response) == 2
resp = response[0]
assert resp['mix_tag'] == 'TEST1 sell_signal'
assert resp['count'] == 1
assert resp['profit_pct'] == 0.5
@pytest.mark.parametrize( @pytest.mark.parametrize(
'is_short,current_rate,open_trade_value', 'is_short,current_rate,open_trade_value',
[(True, 1.098e-05, 15.0911775), [(True, 1.098e-05, 15.0911775),
@ -1616,9 +1673,9 @@ def test_api_plot_config(botclient, mocker):
assert_response(rc) assert_response(rc)
def test_api_strategies(botclient, tmpdir): def test_api_strategies(botclient, tmp_path):
ftbot, client = botclient ftbot, client = botclient
ftbot.config['user_data_dir'] = Path(tmpdir) ftbot.config['user_data_dir'] = tmp_path
rc = client_get(client, f"{BASE_URI}/strategies") rc = client_get(client, f"{BASE_URI}/strategies")
@ -1701,9 +1758,9 @@ def test_api_exchanges(botclient):
} }
def test_api_freqaimodels(botclient, tmpdir, mocker): def test_api_freqaimodels(botclient, tmp_path, mocker):
ftbot, client = botclient ftbot, client = botclient
ftbot.config['user_data_dir'] = Path(tmpdir) ftbot.config['user_data_dir'] = tmp_path
mocker.patch( mocker.patch(
"freqtrade.resolvers.freqaimodel_resolver.FreqaiModelResolver.search_all_objects", "freqtrade.resolvers.freqaimodel_resolver.FreqaiModelResolver.search_all_objects",
return_value=[ return_value=[
@ -1739,9 +1796,9 @@ def test_api_freqaimodels(botclient, tmpdir, mocker):
]} ]}
def test_api_pairlists_available(botclient, tmpdir): def test_api_pairlists_available(botclient, tmp_path):
ftbot, client = botclient ftbot, client = botclient
ftbot.config['user_data_dir'] = Path(tmpdir) ftbot.config['user_data_dir'] = tmp_path
rc = client_get(client, f"{BASE_URI}/pairlists/available") rc = client_get(client, f"{BASE_URI}/pairlists/available")
@ -1768,9 +1825,9 @@ def test_api_pairlists_available(botclient, tmpdir):
assert len(volumepl['params']) > 2 assert len(volumepl['params']) > 2
def test_api_pairlists_evaluate(botclient, tmpdir, mocker): def test_api_pairlists_evaluate(botclient, tmp_path, mocker):
ftbot, client = botclient ftbot, client = botclient
ftbot.config['user_data_dir'] = Path(tmpdir) ftbot.config['user_data_dir'] = tmp_path
rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob") rc = client_get(client, f"{BASE_URI}/pairlists/evaluate/randomJob")
@ -1905,7 +1962,7 @@ def test_sysinfo(botclient):
assert 'ram_pct' in result assert 'ram_pct' in result
def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir): def test_api_backtesting(botclient, mocker, fee, caplog, tmp_path):
try: try:
ftbot, client = botclient ftbot, client = botclient
mocker.patch(f'{EXMS}.get_fee', fee) mocker.patch(f'{EXMS}.get_fee', fee)
@ -1935,8 +1992,8 @@ def test_api_backtesting(botclient, mocker, fee, caplog, tmpdir):
assert result['status_msg'] == 'Backtest reset' assert result['status_msg'] == 'Backtest reset'
ftbot.config['export'] = 'trades' ftbot.config['export'] = 'trades'
ftbot.config['backtest_cache'] = 'day' ftbot.config['backtest_cache'] = 'day'
ftbot.config['user_data_dir'] = Path(tmpdir) ftbot.config['user_data_dir'] = tmp_path
ftbot.config['exportfilename'] = Path(tmpdir) / "backtest_results" ftbot.config['exportfilename'] = tmp_path / "backtest_results"
ftbot.config['exportfilename'].mkdir() ftbot.config['exportfilename'].mkdir()
# start backtesting # start backtesting
@ -2194,14 +2251,14 @@ def test_api_ws_subscribe(botclient, mocker):
with client.websocket_connect(ws_url) as ws: with client.websocket_connect(ws_url) as ws:
ws.send_json({'type': 'subscribe', 'data': ['whitelist']}) ws.send_json({'type': 'subscribe', 'data': ['whitelist']})
time.sleep(1) time.sleep(0.2)
# Check call count is now 1 as we sent a valid subscribe request # Check call count is now 1 as we sent a valid subscribe request
assert sub_mock.call_count == 1 assert sub_mock.call_count == 1
with client.websocket_connect(ws_url) as ws: with client.websocket_connect(ws_url) as ws:
ws.send_json({'type': 'subscribe', 'data': 'whitelist'}) ws.send_json({'type': 'subscribe', 'data': 'whitelist'})
time.sleep(1) time.sleep(0.2)
# Call count hasn't changed as the subscribe request was invalid # Call count hasn't changed as the subscribe request was invalid
assert sub_mock.call_count == 1 assert sub_mock.call_count == 1

View File

@ -150,8 +150,8 @@ def test_telegram_init(default_conf, mocker, caplog) -> None:
"['reload_conf', 'reload_config'], ['show_conf', 'show_config'], " "['reload_conf', 'reload_config'], ['show_conf', 'show_config'], "
"['stopbuy', 'stopentry'], ['whitelist'], ['blacklist'], " "['stopbuy', 'stopentry'], ['whitelist'], ['blacklist'], "
"['bl_delete', 'blacklist_delete'], " "['bl_delete', 'blacklist_delete'], "
"['logs'], ['edge'], ['health'], ['help'], ['version'], ['marketdir']" "['logs'], ['edge'], ['health'], ['help'], ['version'], ['marketdir'], "
"]") "['order']]")
assert log_has(message_str, caplog) assert log_has(message_str, caplog)
@ -347,8 +347,6 @@ async def test_telegram_status_multi_entry(default_conf, update, mocker, fee) ->
msg = msg_mock.call_args_list[3][0][0] msg = msg_mock.call_args_list[3][0][0]
assert re.search(r'Number of Entries.*2', msg) assert re.search(r'Number of Entries.*2', msg)
assert re.search(r'Number of Exits.*1', msg) assert re.search(r'Number of Exits.*1', msg)
assert re.search(r'from 1st entry rate', msg)
assert re.search(r'Order Filled', msg)
assert re.search(r'Close Date:', msg) is None assert re.search(r'Close Date:', msg) is None
assert re.search(r'Close Profit:', msg) is None assert re.search(r'Close Profit:', msg) is None
@ -375,6 +373,105 @@ async def test_telegram_status_closed_trade(default_conf, update, mocker, fee) -
assert re.search(r'Close Profit:', msg) assert re.search(r'Close Profit:', msg)
async def test_order_handle(default_conf, update, ticker, fee, mocker) -> None:
default_conf['max_open_trades'] = 3
mocker.patch.multiple(
EXMS,
fetch_ticker=ticker,
get_fee=fee,
_dry_is_price_crossed=MagicMock(return_value=True),
)
status_table = MagicMock()
mocker.patch.multiple(
'freqtrade.rpc.telegram.Telegram',
_status_table=status_table,
)
telegram, freqtradebot, msg_mock = get_telegram_testobject(mocker, default_conf)
patch_get_signal(freqtradebot)
freqtradebot.state = State.RUNNING
msg_mock.reset_mock()
# Create some test data
freqtradebot.enter_positions()
mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 500)
msg_mock.reset_mock()
context = MagicMock()
context.args = ["2"]
await telegram._order(update=update, context=context)
assert msg_mock.call_count == 1
msg1 = msg_mock.call_args_list[0][0][0]
assert 'Order List for Trade #*`2`' in msg1
msg_mock.reset_mock()
mocker.patch('freqtrade.rpc.telegram.MAX_MESSAGE_LENGTH', 50)
context = MagicMock()
context.args = ["2"]
await telegram._order(update=update, context=context)
assert msg_mock.call_count == 2
msg1 = msg_mock.call_args_list[0][0][0]
msg2 = msg_mock.call_args_list[1][0][0]
assert 'Order List for Trade #*`2`' in msg1
assert '*Order List for Trade #*`2` - continued' in msg2
@pytest.mark.usefixtures("init_persistence")
async def test_telegram_order_multi_entry(default_conf, update, mocker, fee) -> None:
default_conf['telegram']['enabled'] = False
default_conf['position_adjustment_enable'] = True
mocker.patch.multiple(
EXMS,
fetch_order=MagicMock(return_value=None),
get_rate=MagicMock(return_value=0.22),
)
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
create_mock_trades(fee)
trades = Trade.get_open_trades()
trade = trades[3]
# Average may be empty on some exchanges
trade.orders[0].average = 0
trade.orders.append(Order(
order_id='5412vbb',
ft_order_side='buy',
ft_pair=trade.pair,
ft_is_open=False,
ft_amount=trade.amount,
ft_price=trade.open_rate,
status="closed",
symbol=trade.pair,
order_type="market",
side="buy",
price=trade.open_rate * 0.95,
average=0,
filled=trade.amount,
remaining=0,
cost=trade.amount,
order_date=trade.open_date,
order_filled_date=trade.open_date,
)
)
trade.recalc_trade_from_orders()
Trade.commit()
await telegram._order(update=update, context=MagicMock())
assert msg_mock.call_count == 4
msg = msg_mock.call_args_list[3][0][0]
assert re.search(r'from 1st entry rate', msg)
assert re.search(r'Order Filled', msg)
async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None: async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None:
default_conf['max_open_trades'] = 3 default_conf['max_open_trades'] = 3
mocker.patch.multiple( mocker.patch.multiple(
@ -443,14 +540,12 @@ async def test_status_handle(default_conf, update, ticker, fee, mocker) -> None:
context.args = ["2"] context.args = ["2"]
await telegram._status(update=update, context=context) await telegram._status(update=update, context=context)
assert msg_mock.call_count == 2 assert msg_mock.call_count == 1
msg1 = msg_mock.call_args_list[0][0][0] msg1 = msg_mock.call_args_list[0][0][0]
msg2 = msg_mock.call_args_list[1][0][0]
assert 'Close Rate' not in msg1 assert 'Close Rate' not in msg1
assert 'Trade ID:* `2`' in msg1 assert 'Trade ID:* `2`' in msg1
assert 'Trade ID:* `2` - continued' in msg2
async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> None: async def test_status_table_handle(default_conf, update, ticker, fee, mocker) -> None:
@ -1359,10 +1454,19 @@ async def test_force_enter_no_pair(default_conf, update, mocker) -> None:
assert reduce(lambda acc, x: acc + len(x), keyboard, 0) == 5 assert reduce(lambda acc, x: acc + len(x), keyboard, 0) == 5
update = MagicMock() update = MagicMock()
update.callback_query = AsyncMock() update.callback_query = AsyncMock()
update.callback_query.data = 'XRP/USDT_||_long' update.callback_query.data = 'force_enter__XRP/USDT_||_long'
await telegram._force_enter_inline(update, None) await telegram._force_enter_inline(update, None)
assert fbuy_mock.call_count == 1 assert fbuy_mock.call_count == 1
fbuy_mock.reset_mock()
update.callback_query = AsyncMock()
update.callback_query.data = 'force_enter__cancel'
await telegram._force_enter_inline(update, None)
assert fbuy_mock.call_count == 0
query = update.callback_query
assert query.edit_message_text.call_count == 1
assert query.edit_message_text.call_args_list[-1][1]['text'] == "Force enter canceled."
async def test_telegram_performance_handle(default_conf_usdt, update, ticker, fee, mocker) -> None: async def test_telegram_performance_handle(default_conf_usdt, update, ticker, fee, mocker) -> None:

View File

@ -47,6 +47,11 @@ class InformativeDecoratorTest(IStrategy):
dataframe['rsi'] = 14 dataframe['rsi'] = 14
return dataframe return dataframe
@informative('1h', '{base}/BTC')
def populate_indicators_base_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['rsi'] = 14
return dataframe
# Quote currency different from stake currency test. # Quote currency different from stake currency test.
@informative('1h', 'ETH/BTC', candle_type='spot') @informative('1h', 'ETH/BTC', candle_type='spot')
def populate_indicators_eth_btc_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame: def populate_indicators_eth_btc_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:

View File

@ -277,9 +277,11 @@ def test_informative_decorator(mocker, default_conf_usdt, trading_mode):
('XRP/USDT', '5m', candle_def): test_data_5m, ('XRP/USDT', '5m', candle_def): test_data_5m,
('XRP/USDT', '30m', candle_def): test_data_30m, ('XRP/USDT', '30m', candle_def): test_data_30m,
('XRP/USDT', '1h', candle_def): test_data_1h, ('XRP/USDT', '1h', candle_def): test_data_1h,
('XRP/BTC', '1h', candle_def): test_data_1h, # from {base}/BTC
('LTC/USDT', '5m', candle_def): test_data_5m, ('LTC/USDT', '5m', candle_def): test_data_5m,
('LTC/USDT', '30m', candle_def): test_data_30m, ('LTC/USDT', '30m', candle_def): test_data_30m,
('LTC/USDT', '1h', candle_def): test_data_1h, ('LTC/USDT', '1h', candle_def): test_data_1h,
('LTC/BTC', '1h', candle_def): test_data_1h, # from {base}/BTC
('NEO/USDT', '30m', candle_def): test_data_30m, ('NEO/USDT', '30m', candle_def): test_data_30m,
('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '5m', CandleType.SPOT): test_data_5m, # Explicit request with '' as candletype
('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype ('NEO/USDT', '15m', candle_def): test_data_5m, # Explicit request with '' as candletype
@ -296,10 +298,12 @@ def test_informative_decorator(mocker, default_conf_usdt, trading_mode):
'XRP/USDT', 'LTC/USDT', 'NEO/USDT' 'XRP/USDT', 'LTC/USDT', 'NEO/USDT'
]) ])
assert len(strategy._ft_informative) == 6 # Equal to number of decorators used assert len(strategy._ft_informative) == 7 # Equal to number of decorators used
informative_pairs = [ informative_pairs = [
('XRP/USDT', '1h', candle_def), ('XRP/USDT', '1h', candle_def),
('XRP/BTC', '1h', candle_def),
('LTC/USDT', '1h', candle_def), ('LTC/USDT', '1h', candle_def),
('LTC/BTC', '1h', candle_def),
('XRP/USDT', '30m', candle_def), ('XRP/USDT', '30m', candle_def),
('LTC/USDT', '30m', candle_def), ('LTC/USDT', '30m', candle_def),
('NEO/USDT', '1h', candle_def), ('NEO/USDT', '1h', candle_def),

View File

@ -1,7 +1,6 @@
import shutil import shutil
from pathlib import Path
import pytest import pytest
@ -10,7 +9,7 @@ from freqtrade.util.binance_mig import migrate_binance_futures_data, migrate_bin
from tests.conftest import create_mock_trades_usdt, log_has from tests.conftest import create_mock_trades_usdt, log_has
def test_binance_mig_data_conversion(default_conf_usdt, tmpdir, testdatadir): def test_binance_mig_data_conversion(default_conf_usdt, tmp_path, testdatadir):
# call doing nothing (spot mode) # call doing nothing (spot mode)
migrate_binance_futures_data(default_conf_usdt) migrate_binance_futures_data(default_conf_usdt)
@ -18,7 +17,7 @@ def test_binance_mig_data_conversion(default_conf_usdt, tmpdir, testdatadir):
pair_old = 'XRP_USDT' pair_old = 'XRP_USDT'
pair_unified = 'XRP_USDT_USDT' pair_unified = 'XRP_USDT_USDT'
futures_src = testdatadir / 'futures' futures_src = testdatadir / 'futures'
futures_dst = tmpdir / 'futures' futures_dst = tmp_path / 'futures'
futures_dst.mkdir() futures_dst.mkdir()
files = [ files = [
'-1h-mark.feather', '-1h-mark.feather',
@ -32,7 +31,7 @@ def test_binance_mig_data_conversion(default_conf_usdt, tmpdir, testdatadir):
fn_after = futures_dst / f'{pair_old}{file}' fn_after = futures_dst / f'{pair_old}{file}'
shutil.copy(futures_src / f'{pair_unified}{file}', fn_after) shutil.copy(futures_src / f'{pair_unified}{file}', fn_after)
default_conf_usdt['datadir'] = Path(tmpdir) default_conf_usdt['datadir'] = tmp_path
# Migrate files to unified namings # Migrate files to unified namings
migrate_binance_futures_data(default_conf_usdt) migrate_binance_futures_data(default_conf_usdt)

View File

@ -104,8 +104,8 @@ def test_load_config_file_error_range(default_conf, mocker, caplog) -> None:
assert x == '' assert x == ''
def test_load_file_error(tmpdir): def test_load_file_error(tmp_path):
testpath = Path(tmpdir) / 'config.json' testpath = tmp_path / 'config.json'
with pytest.raises(OperationalException, match=r"File .* not found!"): with pytest.raises(OperationalException, match=r"File .* not found!"):
load_file(testpath) load_file(testpath)
@ -601,9 +601,9 @@ def test_cli_verbose_with_params(default_conf, mocker, caplog) -> None:
assert log_has('Verbosity set to 3', caplog) assert log_has('Verbosity set to 3', caplog)
def test_set_logfile(default_conf, mocker, tmpdir): def test_set_logfile(default_conf, mocker, tmp_path):
patched_configuration_load_config_file(mocker, default_conf) patched_configuration_load_config_file(mocker, default_conf)
f = Path(tmpdir / "test_file.log") f = tmp_path / "test_file.log"
assert not f.is_file() assert not f.is_file()
arglist = [ arglist = [
'trade', '--logfile', str(f), 'trade', '--logfile', str(f),
@ -1145,7 +1145,7 @@ def test_pairlist_resolving_with_config_pl_not_exists(mocker, default_conf):
configuration.get_config() configuration.get_config()
def test_pairlist_resolving_fallback(mocker, tmpdir): def test_pairlist_resolving_fallback(mocker, tmp_path):
mocker.patch.object(Path, "exists", MagicMock(return_value=True)) mocker.patch.object(Path, "exists", MagicMock(return_value=True))
mocker.patch.object(Path, "open", MagicMock(return_value=MagicMock())) mocker.patch.object(Path, "open", MagicMock(return_value=MagicMock()))
mocker.patch("freqtrade.configuration.configuration.load_file", mocker.patch("freqtrade.configuration.configuration.load_file",
@ -1164,7 +1164,7 @@ def test_pairlist_resolving_fallback(mocker, tmpdir):
assert config['pairs'] == ['ETH/BTC', 'XRP/BTC'] assert config['pairs'] == ['ETH/BTC', 'XRP/BTC']
assert config['exchange']['name'] == 'binance' assert config['exchange']['name'] == 'binance'
assert config['datadir'] == Path(tmpdir) / "user_data/data/binance" assert config['datadir'] == tmp_path / "user_data/data/binance"
@pytest.mark.parametrize("setting", [ @pytest.mark.parametrize("setting", [

View File

@ -32,9 +32,9 @@ def test_create_userdata_dir(mocker, default_conf, caplog) -> None:
assert str(x) == str(Path("/tmp/bar")) assert str(x) == str(Path("/tmp/bar"))
def test_create_userdata_dir_and_chown(mocker, tmpdir, caplog) -> None: def test_create_userdata_dir_and_chown(mocker, tmp_path, caplog) -> None:
sp_mock = mocker.patch('subprocess.check_output') sp_mock = mocker.patch('subprocess.check_output')
path = Path(tmpdir / 'bar') path = tmp_path / 'bar'
assert not path.is_dir() assert not path.is_dir()
x = create_userdata_dir(str(path), create_dir=True) x = create_userdata_dir(str(path), create_dir=True)

View File

@ -6569,16 +6569,16 @@ def test_position_adjust2(mocker, default_conf_usdt, fee) -> None:
# tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit # tuple 2 - amount, open_rate, stake_amount, cumulative_profit, realized_profit, rel_profit
(('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)), (('buy', 100, 10), (100.0, 10.0, 1000.0, 0.0, None, None)),
(('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)), (('buy', 100, 15), (200.0, 12.5, 2500.0, 0.0, None, None)),
(('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.044788)), (('sell', 50, 12), (150.0, 12.5, 1875.0, -28.0625, -28.0625, -0.011197)),
(('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.59201995)), (('sell', 100, 20), (50.0, 12.5, 625.0, 713.8125, 741.875, 0.2848129)),
(('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, 336.625, 0.1343142)), # final profit (sum) (('sell', 50, 5), (50.0, 12.5, 625.0, 336.625, 336.625, 0.1343142)), # final profit (sum)
), ),
( (
(('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)), (('buy', 100, 3), (100.0, 3.0, 300.0, 0.0, None, None)),
(('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)), (('buy', 100, 7), (200.0, 5.0, 1000.0, 0.0, None, None)),
(('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 1.189027)), (('sell', 100, 11), (100.0, 5.0, 500.0, 596.0, 596.0, 0.5945137)),
(('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 1.189027)), (('buy', 150, 15), (250.0, 11.0, 2750.0, 596.0, 596.0, 0.5945137)),
(('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.7186579)), (('sell', 100, 19), (150.0, 11.0, 1650.0, 1388.5, 792.5, 0.4261653)),
(('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 3175.75, 0.9747170)), # final profit (('sell', 150, 23), (150.0, 11.0, 1650.0, 3175.75, 3175.75, 0.9747170)), # final profit
) )
]) ])

View File

@ -1,3 +1,4 @@
import time
from unittest.mock import MagicMock from unittest.mock import MagicMock
import pytest import pytest
@ -440,6 +441,7 @@ def test_dca_order_adjust(default_conf_usdt, ticker_usdt, leverage, fee, mocker)
assert trade.open_rate == 1.99 assert trade.open_rate == 1.99
assert trade.orders[-1].price == 1.96 assert trade.orders[-1].price == 1.96
assert trade.orders[-1].cost == 120 * leverage assert trade.orders[-1].cost == 120 * leverage
time.sleep(0.1)
# Replace new order with diff. order at a lower price # Replace new order with diff. order at a lower price
freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95) freqtrade.strategy.adjust_entry_price = MagicMock(return_value=1.95)

View File

@ -1,6 +1,5 @@
import logging import logging
import sys import sys
from pathlib import Path
import pytest import pytest
@ -75,11 +74,11 @@ def test_set_loggers_syslog():
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
def test_set_loggers_Filehandler(tmpdir): def test_set_loggers_Filehandler(tmp_path):
logger = logging.getLogger() logger = logging.getLogger()
orig_handlers = logger.handlers orig_handlers = logger.handlers
logger.handlers = [] logger.handlers = []
logfile = Path(tmpdir) / 'ft_logfile.log' logfile = tmp_path / 'ft_logfile.log'
config = {'verbosity': 2, config = {'verbosity': 2,
'logfile': str(logfile), 'logfile': str(logfile),
} }

View File

@ -40,7 +40,7 @@ def test_strategy_updater_start(user_dir, capsys) -> None:
# Backup file exists # Backup file exists
assert Path(user_dir / "strategies_orig_updater" / 'strategy_test_v2.py').exists() assert Path(user_dir / "strategies_orig_updater" / 'strategy_test_v2.py').exists()
# updated file exists # updated file exists
new_file = Path(tmpdirp / 'strategy_test_v2.py') new_file = tmpdirp / 'strategy_test_v2.py'
assert new_file.exists() assert new_file.exists()
new_code = new_file.read_text() new_code = new_file.read_text()
assert 'INTERFACE_VERSION = 3' in new_code assert 'INTERFACE_VERSION = 3' in new_code

74
tests/testdata/config.tests.json vendored Normal file
View File

@ -0,0 +1,74 @@
{
"max_open_trades": 3,
"stake_currency": "BTC",
"stake_amount": 0.05,
"tradable_balance_ratio": 0.99,
"fiat_display_currency": "USD",
"timeframe": "5m",
"dry_run": true,
"cancel_open_orders_on_exit": false,
"unfilledtimeout": {
"entry": 5,
"exit": 5,
"exit_timeout_count": 0,
"unit": "minutes"
},
"entry_pricing": {
"price_side": "same",
"use_order_book": true,
"order_book_top": 1,
"price_last_balance": 0.0,
"check_depth_of_market": {
"enabled": false,
"bids_to_ask_delta": 1
}
},
"exit_pricing":{
"price_side": "same",
"use_order_book": true,
"order_book_top": 1
},
"exchange": {
"name": "gate",
"key": "your_exchange_key",
"secret": "your_exchange_secret",
"ccxt_config": {},
"ccxt_async_config": {},
"pair_whitelist": [
"ETH/BTC",
"LTC/BTC",
"ETC/BTC",
"XLM/BTC",
"XRP/BTC",
"ADA/BTC",
"DOT/BTC"
],
"pair_blacklist": [
"DOGE/BTC"
]
},
"pairlists": [
{"method": "StaticPairList"}
],
"telegram": {
"enabled": false,
"token": "your_telegram_token",
"chat_id": "your_telegram_chat_id"
},
"api_server": {
"enabled": false,
"listen_ip_address": "127.0.0.1",
"listen_port": 8080,
"verbosity": "error",
"jwt_secret_key": "somethingrandom",
"CORS_origins": [],
"username": "freqtrader",
"password": "SuperSecurePassword"
},
"bot_name": "freqtrade",
"initial_state": "running",
"force_entry_enable": false,
"internals": {
"process_throttle_secs": 5
}
}