resolve conflict, ensure gpu works with transformer

This commit is contained in:
robcaulk 2023-05-19 14:39:16 +00:00
commit dd1a0156b9
104 changed files with 3059 additions and 770 deletions

View File

@ -14,7 +14,7 @@ on:
- cron: '0 5 * * 4' - cron: '0 5 * * 4'
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: "${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}"
cancel-in-progress: true cancel-in-progress: true
permissions: permissions:
repository-projects: read repository-projects: read
@ -57,7 +57,7 @@ jobs:
- name: Installation - *nix - name: Installation - *nix
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: | run: |
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4 python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include export TA_INCLUDE_PATH=${HOME}/dependencies/include
@ -77,6 +77,17 @@ jobs:
# Allow failure for coveralls # Allow failure for coveralls
coveralls || true coveralls || true
- name: Check for repository changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "Repository is dirty, changes detected:"
git status
git diff
exit 1
else
echo "Repository is clean, no changes detected."
fi
- name: Backtesting (multi) - name: Backtesting (multi)
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp config_examples/config_bittrex.example.json config.json
@ -163,7 +174,7 @@ jobs:
rm /usr/local/bin/python3.11-config || true rm /usr/local/bin/python3.11-config || true
brew install hdf5 c-blosc brew install hdf5 c-blosc
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4 python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include export TA_INCLUDE_PATH=${HOME}/dependencies/include
@ -174,6 +185,17 @@ jobs:
run: | run: |
pytest --random-order pytest --random-order
- name: Check for repository changes
run: |
if [ -n "$(git status --porcelain)" ]; then
echo "Repository is dirty, changes detected:"
git status
git diff
exit 1
else
echo "Repository is clean, no changes detected."
fi
- name: Backtesting - name: Backtesting
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp config_examples/config_bittrex.example.json config.json
@ -237,6 +259,18 @@ jobs:
run: | run: |
pytest --random-order pytest --random-order
- name: Check for repository changes
run: |
if (git status --porcelain) {
Write-Host "Repository is dirty, changes detected:"
git status
git diff
exit 1
}
else {
Write-Host "Repository is clean, no changes detected."
}
- name: Backtesting - name: Backtesting
run: | run: |
cp config_examples/config_bittrex.example.json config.json cp config_examples/config_bittrex.example.json config.json
@ -302,7 +336,7 @@ jobs:
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: "3.10" python-version: "3.11"
- name: Documentation build - name: Documentation build
run: | run: |
@ -352,7 +386,7 @@ jobs:
- name: Installation - *nix - name: Installation - *nix
if: runner.os == 'Linux' if: runner.os == 'Linux'
run: | run: |
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4 python -m pip install --upgrade pip wheel
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
export TA_LIBRARY_PATH=${HOME}/dependencies/lib export TA_LIBRARY_PATH=${HOME}/dependencies/lib
export TA_INCLUDE_PATH=${HOME}/dependencies/include export TA_INCLUDE_PATH=${HOME}/dependencies/include
@ -425,7 +459,7 @@ jobs:
python setup.py sdist bdist_wheel python setup.py sdist bdist_wheel
- name: Publish to PyPI (Test) - name: Publish to PyPI (Test)
uses: pypa/gh-action-pypi-publish@v1.8.5 uses: pypa/gh-action-pypi-publish@v1.8.6
if: (github.event_name == 'release') if: (github.event_name == 'release')
with: with:
user: __token__ user: __token__
@ -433,7 +467,7 @@ jobs:
repository_url: https://test.pypi.org/legacy/ repository_url: https://test.pypi.org/legacy/
- name: Publish to PyPI - name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@v1.8.5 uses: pypa/gh-action-pypi-publish@v1.8.6
if: (github.event_name == 'release') if: (github.event_name == 'release')
with: with:
user: __token__ user: __token__

View File

@ -15,10 +15,10 @@ repos:
additional_dependencies: additional_dependencies:
- types-cachetools==5.3.0.5 - types-cachetools==5.3.0.5
- types-filelock==3.2.7 - types-filelock==3.2.7
- types-requests==2.28.11.17 - types-requests==2.30.0.0
- types-tabulate==0.9.0.2 - types-tabulate==0.9.0.2
- types-python-dateutil==2.8.19.12 - types-python-dateutil==2.8.19.13
- SQLAlchemy==2.0.10 - SQLAlchemy==2.0.13
# stages: [push] # stages: [push]
- repo: https://github.com/pycqa/isort - repo: https://github.com/pycqa/isort
@ -30,7 +30,7 @@ repos:
- repo: https://github.com/charliermarsh/ruff-pre-commit - repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: 'v0.0.255' rev: 'v0.0.263'
hooks: hooks:
- id: ruff - id: ruff

View File

@ -25,7 +25,7 @@ FROM base as python-deps
RUN apt-get update \ RUN apt-get update \
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \ && apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
&& apt-get clean \ && apt-get clean \
&& pip install --upgrade pip==23.0.1 wheel==0.38.4 && pip install --upgrade pip wheel
# Install TA-lib # Install TA-lib
COPY build_helpers/* /tmp/ COPY build_helpers/* /tmp/

View File

@ -1,7 +1,7 @@
# Downloads don't work automatically, since the URL is regenerated via javascript. # Downloads don't work automatically, since the URL is regenerated via javascript.
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib # Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4 python -m pip install --upgrade pip wheel
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')" $pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"

View File

@ -29,7 +29,7 @@ If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl`
`user_data/backtest_results` folder. `user_data/backtest_results` folder.
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`): with `--analysis-groups` option provided with space-separated arguments:
``` bash ``` bash
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5 freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
@ -39,6 +39,7 @@ This command will read from the last backtesting results. The `--analysis-groups
used to specify the various tabular outputs showing the profit fo each group or trade, used to specify the various tabular outputs showing the profit fo each group or trade,
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4): ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
* 0: overall winrate and profit summary by enter_tag
* 1: profit summaries grouped by enter_tag * 1: profit summaries grouped by enter_tag
* 2: profit summaries grouped by enter_tag and exit_tag * 2: profit summaries grouped by enter_tag and exit_tag
* 3: profit summaries grouped by pair and enter_tag * 3: profit summaries grouped by pair and enter_tag
@ -115,3 +116,38 @@ For example, if your backtest timerange was `20220101-20221231` but you only wan
```bash ```bash
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201 freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
``` ```
### Printing out rejected signals
Use the `--rejected-signals` option to print out rejected signals.
```bash
freqtrade backtesting-analysis -c <config.json> --rejected-signals
```
### Writing tables to CSV
Some of the tabular outputs can become large, so printing them out to the terminal is not preferable.
Use the `--analysis-to-csv` option to disable printing out of tables to standard out and write them to CSV files.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv
```
By default this will write one file per output table you specified in the `backtesting-analysis` command, e.g.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --rejected-signals --analysis-groups 0 1
```
This will write to `user_data/backtest_results`:
* rejected_signals.csv
* group_0.csv
* group_1.csv
To override where the files will be written, also specify the `--analysis-csv-path` option.
```bash
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --analysis-csv-path another/data/path/
```

View File

@ -397,3 +397,21 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
return dataframe return dataframe
``` ```
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py). To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
#### Improving performance with `torch.compile()`
Torch provides a `torch.compile()` method that can be used to improve performance for specific GPU hardware. More details can be found [here](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). In brief, you simply wrap your `model` in `torch.compile()`:
```python
model = PyTorchMLPModel(
input_dim=n_features,
output_dim=1,
**self.model_kwargs
)
model.to(self.device)
model = torch.compile(model)
```
Then proceed to use the model as normal. Keep in mind that doing this will remove eager execution, which means errors and tracebacks will not be informative.

View File

@ -18,9 +18,10 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`. | `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`.
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved). | `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer. | `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`. | `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the connections here primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market. <br> **Datatype:** Boolean. <br> Default: `False`.
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False` | `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer. | `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
| `activate_tensorboard` | <br> Indicate whether or not to activate tensorboard for the tensorboard enabled modules (currently Reinforcment Learning, XGBoost, Catboost, and PyTorch). Tensorboard needs Torch installed, which means you will need the torch/RL docker image or you need to answer "yes" to the install question about whether or not you wish to install Torch. <br> **Datatype:** Boolean. <br> Default: `True`.
### Feature parameters ### Feature parameters
@ -114,5 +115,5 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|------------|-------------| |------------|-------------|
| | **Extraneous parameters** | | **Extraneous parameters**
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`. | `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`. | `freqai.conv_width` | The width of a neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`. | `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.

View File

@ -135,14 +135,21 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general
## Creating a custom reward function ## Creating a custom reward function
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated: !!! danger "Not for production"
Warning!
The reward function provided with the Freqtrade source code is a showcase of functionality designed to show/test as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is *not* for live production. Please beware that you will need to create your own custom_reward() function or use a template built by other users outside of the Freqtrade source code.
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but this is *not* designed for production. Users *must* create their own custom reinforcement learning model class or use a pre-built one from outside the Freqtrade source code and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
!!! note "Hint"
The best reward functions are ones that are continuously differentiable, and well scaled. In other words, adding a single large negative penalty to a rare event is not a good idea, and the neural net will not be able to learn that function. Instead, it is better to add a small negative penalty to a common event. This will help the agent learn faster. Not only this, but you can help improve the continuity of your rewards/penalties by having them scale with severity according to some linear/exponential functions. In other words, you'd slowly scale the penalty as the duration of the trade increases. This is better than a single large penalty occuring at a single point in time.
```python ```python
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
class MyCoolRLModel(ReinforcementLearner): class MyCoolRLModel(ReinforcementLearner):
""" """
User created RL prediction model. User created RL prediction model.
@ -169,6 +176,11 @@ As you begin to modify the strategy and the prediction model, you will quickly r
User made custom environment. This class inherits from BaseEnvironment and gym.env. User made custom environment. This class inherits from BaseEnvironment and gym.env.
Users can override any functions from those parent classes. Here is an example Users can override any functions from those parent classes. Here is an example
of a user customized `calculate_reward()` function. of a user customized `calculate_reward()` function.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
""" """
def calculate_reward(self, action: int) -> float: def calculate_reward(self, action: int) -> float:
# first, penalize if the action is not valid # first, penalize if the action is not valid
@ -220,7 +232,7 @@ As you begin to modify the strategy and the prediction model, you will quickly r
return 0. return 0.
``` ```
### Using Tensorboard ## Using Tensorboard
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command: Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
@ -233,15 +245,13 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th
![tensorboard](assets/tensorboard.jpg) ![tensorboard](assets/tensorboard.jpg)
## Custom logging
### Custom logging
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode. FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called: `self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
```py ```python
class MyRLEnv(Base5ActionRLEnv): class MyRLEnv(Base5ActionRLEnv):
""" """
User made custom environment. This class inherits from BaseEnvironment and gym.env. User made custom environment. This class inherits from BaseEnvironment and gym.env.
@ -258,7 +268,7 @@ FreqAI also provides a built in episodic summary logger called `self.tensorboard
!!! Note !!! Note
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented. The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
### Choosing a base environment ## Choosing a base environment
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include: FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:

View File

@ -131,6 +131,9 @@ You can choose to adopt a continual learning scheme by setting `"continual_learn
???+ danger "Continual learning enforces a constant parameter space" ???+ danger "Continual learning enforces a constant parameter space"
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis). Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
???+ danger "Experimental functionality"
Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the mechanics available in FreqAI primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market.
## Hyperopt ## Hyperopt
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md): You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
@ -158,7 +161,14 @@ This specific hyperopt would help you understand the appropriate `DI_values` for
## Using Tensorboard ## Using Tensorboard
CatBoost models benefit from tracking training metrics via Tensorboard. You can take advantage of the FreqAI integration to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command: !!! note "Availability"
FreqAI includes tensorboard for a variety of models, including XGBoost, all PyTorch models, Reinforcement Learning, and Catboost. If you would like to see Tensorboard integrated into another model type, please open an issue on the [Freqtrade GitHub](https://github.com/freqtrade/freqtrade/issues)
!!! danger "Requirements"
Tensorboard logging requires the FreqAI torch installation/docker image.
The easiest way to use tensorboard is to ensure `freqai.activate_tensorboard` is set to `True` (default setting) in your configuration file, run FreqAI, then open a separate shell and run:
```bash ```bash
cd freqtrade cd freqtrade
@ -168,3 +178,7 @@ tensorboard --logdir user_data/models/unique-id
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard). where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
![tensorboard](assets/tensorboard.jpg) ![tensorboard](assets/tensorboard.jpg)
!!! note "Deactivate for improved performance"
Tensorboard logging can slow down training and should be deactivated for production use.

View File

@ -34,6 +34,9 @@ freqtrade trade --config config_examples/config_freqai.example.json --strategy F
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading. You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
!!! danger "Not for production"
The example strategy provided with the Freqtrade source code is designed for showcasing/testing a wide variety of FreqAI features. It is also designed to run on small computers so that it can be used as a benchmark between developers and users. It is *not* designed to be run in production.
An example strategy, prediction model, and config to use as a starting points can be found in An example strategy, prediction model, and config to use as a starting points can be found in
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and `freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
`config_examples/config_freqai.example.json`, respectively. `config_examples/config_freqai.example.json`, respectively.
@ -69,11 +72,7 @@ pip install -r requirements-freqai.txt
``` ```
!!! Note !!! Note
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform. Catboost will not be installed on low-powered arm devices (raspberry), since it does not provide wheels for this platform.
!!! Note "python 3.11"
Some dependencies (Catboost, Torch) currently don't support python 3.11. Freqtrade therefore only supports python 3.10 for these models/dependencies.
Tests involving these dependencies are skipped on 3.11.
### Usage with docker ### Usage with docker

View File

@ -30,12 +30,6 @@ The easiest way to install and run Freqtrade is to clone the bot Github reposito
!!! Warning "Up-to-date clock" !!! Warning "Up-to-date clock"
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges. The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
!!! Error "Running setup.py install for gym did not run successfully."
If you get an error related with gym we suggest you to downgrade setuptools it to version 65.5.0 you can do it with the following command:
```bash
pip install setuptools==65.5.0
```
------ ------
## Requirements ## Requirements
@ -242,6 +236,7 @@ source .env/bin/activate
```bash ```bash
python3 -m pip install --upgrade pip python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
python3 -m pip install -e . python3 -m pip install -e .
``` ```

View File

@ -1,6 +1,6 @@
markdown==3.3.7 markdown==3.3.7
mkdocs==1.4.2 mkdocs==1.4.3
mkdocs-material==9.1.7 mkdocs-material==9.1.12
mdx_truly_sane_lists==1.3 mdx_truly_sane_lists==1.3
pymdown-extensions==9.11 pymdown-extensions==10.0.1
jinja2==3.1.2 jinja2==3.1.2

View File

@ -134,7 +134,9 @@ python3 scripts/rest_client.py --config rest_config.json <command> [optional par
| `reload_config` | Reloads the configuration file. | `reload_config` | Reloads the configuration file.
| `trades` | List last trades. Limited to 500 trades per call. | `trades` | List last trades. Limited to 500 trades per call.
| `trade/<tradeid>` | Get specific trade. | `trade/<tradeid>` | Get specific trade.
| `delete_trade <trade_id>` | Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange. | `trade/<tradeid>` | DELETE - Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `trade/<tradeid>/open-order` | DELETE - Cancel open order for this trade.
| `trade/<tradeid>/reload` | GET - Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
| `show_config` | Shows part of the current configuration with relevant settings to operation. | `show_config` | Shows part of the current configuration with relevant settings to operation.
| `logs` | Shows last log messages. | `logs` | Shows last log messages.
| `status` | Lists all open trades. | `status` | Lists all open trades.

View File

@ -227,8 +227,8 @@ for val in self.buy_ema_short.range:
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val) f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
})) }))
# Append columns to existing dataframe # Combine all dataframes, and reassign the original dataframe column
merged_frame = pd.concat(frames, axis=1) dataframe = pd.concat(frames, axis=1)
``` ```
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant. Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.

View File

@ -187,6 +187,7 @@ official commands. You can ask at any moment for help with `/help`.
| `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True) | `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True)
| `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True) | `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True)
| `/delete <trade_id>` | Delete a specific trade from the Database. Tries to close open orders. Requires manual handling of this trade on the exchange. | `/delete <trade_id>` | Delete a specific trade from the Database. Tries to close open orders. Requires manual handling of this trade on the exchange.
| `/reload_trade <trade_id>` | Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
| `/cancel_open_order <trade_id> | /coo <trade_id>` | Cancel an open order for a trade. | `/cancel_open_order <trade_id> | /coo <trade_id>` | Cancel an open order for a trade.
| **Metrics** | | **Metrics** |
| `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default) | `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default)

View File

@ -723,6 +723,9 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]] [--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]] [--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
[--timerange YYYYMMDD-[YYYYMMDD]] [--timerange YYYYMMDD-[YYYYMMDD]]
[--rejected]
[--analysis-to-csv]
[--analysis-csv-path PATH]
optional arguments: optional arguments:
-h, --help show this help message and exit -h, --help show this help message and exit
@ -736,19 +739,27 @@ optional arguments:
pair and enter_tag, 4: by pair, enter_ and exit_tag pair and enter_tag, 4: by pair, enter_ and exit_tag
(this can get quite large) (this can get quite large)
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...] --enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
Comma separated list of entry signals to analyse. Space separated list of entry signals to analyse.
Default: all. e.g. 'entry_tag_a,entry_tag_b' Default: all. e.g. 'entry_tag_a entry_tag_b'
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...] --exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
Comma separated list of exit signals to analyse. Space separated list of exit signals to analyse.
Default: all. e.g. Default: all. e.g.
'exit_tag_a,roi,stop_loss,trailing_stop_loss' 'exit_tag_a roi stop_loss trailing_stop_loss'
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...] --indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
Comma separated list of indicators to analyse. e.g. Space separated list of indicators to analyse. e.g.
'close,rsi,bb_lowerband,profit_abs' 'close rsi bb_lowerband profit_abs'
--timerange YYYYMMDD-[YYYYMMDD] --timerange YYYYMMDD-[YYYYMMDD]
Timerange to filter trades for analysis, Timerange to filter trades for analysis,
start inclusive, end exclusive. e.g. start inclusive, end exclusive. e.g.
20220101-20220201 20220101-20220201
--rejected
Print out rejected trades table
--analysis-to-csv
Write out tables to individual CSVs, by default to
'user_data/backtest_results' unless '--analysis-csv-path' is given.
--analysis-csv-path [PATH]
Optional path where individual CSVs will be written. If not used,
CSVs will be written to 'user_data/backtest_results'.
Common arguments: Common arguments:
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages). -v, --verbose Verbose mode (-vv for more, -vvv to get all messages).

View File

@ -106,7 +106,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
"disableparamexport", "backtest_breakdown"] "disableparamexport", "backtest_breakdown"]
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list", ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
"exit_reason_list", "indicator_list", "timerange"] "exit_reason_list", "indicator_list", "timerange",
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes", NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels", "list-markets", "list-pairs", "list-strategies", "list-freqaimodels",

View File

@ -636,30 +636,45 @@ AVAILABLE_CLI_OPTIONS = {
"4: by pair, enter_ and exit_tag (this can get quite large), " "4: by pair, enter_ and exit_tag (this can get quite large), "
"5: by exit_tag"), "5: by exit_tag"),
nargs='+', nargs='+',
default=['0', '1', '2'], default=[],
choices=['0', '1', '2', '3', '4', '5'], choices=['0', '1', '2', '3', '4', '5'],
), ),
"enter_reason_list": Arg( "enter_reason_list": Arg(
"--enter-reason-list", "--enter-reason-list",
help=("Comma separated list of entry signals to analyse. Default: all. " help=("Space separated list of entry signals to analyse. Default: all. "
"e.g. 'entry_tag_a,entry_tag_b'"), "e.g. 'entry_tag_a entry_tag_b'"),
nargs='+', nargs='+',
default=['all'], default=['all'],
), ),
"exit_reason_list": Arg( "exit_reason_list": Arg(
"--exit-reason-list", "--exit-reason-list",
help=("Comma separated list of exit signals to analyse. Default: all. " help=("Space separated list of exit signals to analyse. Default: all. "
"e.g. 'exit_tag_a,roi,stop_loss,trailing_stop_loss'"), "e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'"),
nargs='+', nargs='+',
default=['all'], default=['all'],
), ),
"indicator_list": Arg( "indicator_list": Arg(
"--indicator-list", "--indicator-list",
help=("Comma separated list of indicators to analyse. " help=("Space separated list of indicators to analyse. "
"e.g. 'close,rsi,bb_lowerband,profit_abs'"), "e.g. 'close rsi bb_lowerband profit_abs'"),
nargs='+', nargs='+',
default=[], default=[],
), ),
"analysis_rejected": Arg(
'--rejected-signals',
help='Analyse rejected signals',
action='store_true',
),
"analysis_to_csv": Arg(
'--analysis-to-csv',
help='Save selected analysis tables to individual CSVs',
action='store_true',
),
"analysis_csv_path": Arg(
'--analysis-csv-path',
help=("Specify a path to save the analysis CSVs "
"if --analysis-to-csv is enabled. Default: user_data/basktesting_results/"),
),
"freqaimodel": Arg( "freqaimodel": Arg(
'--freqaimodel', '--freqaimodel',
help='Specify a custom freqaimodels.', help='Specify a custom freqaimodels.',

View File

@ -52,7 +52,7 @@ def start_download_data(args: Dict[str, Any]) -> None:
pairs_not_available: List[str] = [] pairs_not_available: List[str] = []
# Init exchange # Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) exchange = ExchangeResolver.load_exchange(config, validate=False)
markets = [p for p, m in exchange.markets.items() if market_is_active(m) markets = [p for p, m in exchange.markets.items() if market_is_active(m)
or config.get('include_inactive')] or config.get('include_inactive')]
@ -125,7 +125,7 @@ def start_convert_trades(args: Dict[str, Any]) -> None:
"Please check the documentation on how to configure this.") "Please check the documentation on how to configure this.")
# Init exchange # Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) exchange = ExchangeResolver.load_exchange(config, validate=False)
# Manual validations of relevant settings # Manual validations of relevant settings
if not config['exchange'].get('skip_pair_validation', False): if not config['exchange'].get('skip_pair_validation', False):
exchange.validate_pairs(config['pairs']) exchange.validate_pairs(config['pairs'])

View File

@ -114,7 +114,7 @@ def start_list_timeframes(args: Dict[str, Any]) -> None:
config['timeframe'] = None config['timeframe'] = None
# Init exchange # Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) exchange = ExchangeResolver.load_exchange(config, validate=False)
if args['print_one_column']: if args['print_one_column']:
print('\n'.join(exchange.timeframes)) print('\n'.join(exchange.timeframes))
@ -133,7 +133,7 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Init exchange # Init exchange
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) exchange = ExchangeResolver.load_exchange(config, validate=False)
# By default only active pairs/markets are to be shown # By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False) active_only = not args.get('list_pairs_all', False)

View File

@ -18,7 +18,7 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE) config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False) exchange = ExchangeResolver.load_exchange(config, validate=False)
quote_currencies = args.get('quote_currencies') quote_currencies = args.get('quote_currencies')
if not quote_currencies: if not quote_currencies:

View File

@ -465,6 +465,15 @@ class Configuration:
self._args_to_config(config, argname='timerange', self._args_to_config(config, argname='timerange',
logstring='Filter trades by timerange: {}') logstring='Filter trades by timerange: {}')
self._args_to_config(config, argname='analysis_rejected',
logstring='Analyse rejected signals: {}')
self._args_to_config(config, argname='analysis_to_csv',
logstring='Store analysis tables to CSV: {}')
self._args_to_config(config, argname='analysis_csv_path',
logstring='Path to store analysis CSVs: {}')
def _process_runmode(self, config: Config) -> None: def _process_runmode(self, config: Config) -> None:
self._args_to_config(config, argname='dry_run', self._args_to_config(config, argname='dry_run',

View File

@ -690,4 +690,6 @@ BidAsk = Literal['bid', 'ask']
OBLiteral = Literal['asks', 'bids'] OBLiteral = Literal['asks', 'bids']
Config = Dict[str, Any] Config = Dict[str, Any]
# Exchange part of the configuration.
ExchangeConfig = Dict[str, Any]
IntOrInf = float IntOrInf = float

View File

@ -1,5 +1,6 @@
import logging import logging
from pathlib import Path from pathlib import Path
from typing import List
import joblib import joblib
import pandas as pd import pandas as pd
@ -15,22 +16,31 @@ from freqtrade.exceptions import OperationalException
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _load_signal_candles(backtest_dir: Path): def _load_backtest_analysis_data(backtest_dir: Path, name: str):
if backtest_dir.is_dir(): if backtest_dir.is_dir():
scpf = Path(backtest_dir, scpf = Path(backtest_dir,
Path(get_latest_backtest_filename(backtest_dir)).stem + "_signals.pkl" Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl"
) )
else: else:
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl") scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
try: try:
with scpf.open("rb") as scp: with scpf.open("rb") as scp:
signal_candles = joblib.load(scp) loaded_data = joblib.load(scp)
logger.info(f"Loaded signal candles: {str(scpf)}") logger.info(f"Loaded {name} candles: {str(scpf)}")
except Exception as e: except Exception as e:
logger.error("Cannot load signal candles from pickled results: ", e) logger.error(f"Cannot load {name} data from pickled results: ", e)
return None
return signal_candles return loaded_data
def _load_rejected_signals(backtest_dir: Path):
return _load_backtest_analysis_data(backtest_dir, "rejected")
def _load_signal_candles(backtest_dir: Path):
return _load_backtest_analysis_data(backtest_dir, "signals")
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles): def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
@ -43,9 +53,7 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand
for pair in pairlist: for pair in pairlist:
if pair in signal_candles[strategy_name]: if pair in signal_candles[strategy_name]:
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators( analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
pair, pair, trades, signal_candles[strategy_name][pair])
trades,
signal_candles[strategy_name][pair])
except Exception as e: except Exception as e:
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e) print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
@ -85,7 +93,7 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
return pd.DataFrame() return pd.DataFrame()
def _do_group_table_output(bigdf, glist): def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ):
for g in glist: for g in glist:
# 0: summary wins/losses grouped by enter tag # 0: summary wins/losses grouped by enter tag
if g == "0": if g == "0":
@ -116,7 +124,8 @@ def _do_group_table_output(bigdf, glist):
sortcols = ['total_num_buys'] sortcols = ['total_num_buys']
_print_table(new, sortcols, show_index=True) _print_table(new, sortcols, show_index=True, name="Group 0:",
to_csv=to_csv, csv_path=csv_path)
else: else:
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'], agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
@ -154,11 +163,24 @@ def _do_group_table_output(bigdf, glist):
new['mean_profit_pct'] = new['mean_profit_pct'] * 100 new['mean_profit_pct'] = new['mean_profit_pct'] * 100
new['total_profit_pct'] = new['total_profit_pct'] * 100 new['total_profit_pct'] = new['total_profit_pct'] * 100
_print_table(new, sortcols) _print_table(new, sortcols, name=f"Group {g}:",
to_csv=to_csv, csv_path=csv_path)
else: else:
logger.warning("Invalid group mask specified.") logger.warning("Invalid group mask specified.")
def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame,
to_csv: bool = False, csv_path=None) -> None:
cols = ['pair', 'date', 'enter_tag']
sortcols = ['date', 'pair', 'enter_tag']
_print_table(rejected_signals_df[cols],
sortcols,
show_index=False,
name="Rejected Signals:",
to_csv=to_csv,
csv_path=csv_path)
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'): def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
if timerange: if timerange:
if timerange.starttype == 'date': if timerange.starttype == 'date':
@ -192,30 +214,56 @@ def prepare_results(analysed_trades, stratname,
return res_df return res_df
def print_results(res_df, analysis_groups, indicator_list): def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str],
csv_path: Path, rejected_signals=None, to_csv=False):
if res_df.shape[0] > 0: if res_df.shape[0] > 0:
if analysis_groups: if analysis_groups:
_do_group_table_output(res_df, analysis_groups) _do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path)
if rejected_signals is not None:
if rejected_signals.empty:
print("There were no rejected signals.")
else:
_do_rejected_signals_output(rejected_signals, to_csv=to_csv, csv_path=csv_path)
# NB this can be large for big dataframes!
if "all" in indicator_list: if "all" in indicator_list:
print(res_df) _print_table(res_df,
elif indicator_list is not None: show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
elif indicator_list is not None and indicator_list:
available_inds = [] available_inds = []
for ind in indicator_list: for ind in indicator_list:
if ind in res_df: if ind in res_df:
available_inds.append(ind) available_inds.append(ind)
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False) _print_table(res_df[ilist],
sortcols=['exit_reason'],
show_index=False,
name="Indicators:",
to_csv=to_csv,
csv_path=csv_path)
else: else:
print("\\No trades to show") print("\\No trades to show")
def _print_table(df, sortcols=None, show_index=False): def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None,
to_csv=False, csv_path: Path):
if (sortcols is not None): if (sortcols is not None):
data = df.sort_values(sortcols) data = df.sort_values(sortcols)
else: else:
data = df data = df
if to_csv:
safe_name = Path(csv_path, name.lower().replace(" ", "_").replace(":", "") + ".csv")
data.to_csv(safe_name)
print(f"Saved {name} to {safe_name}")
else:
if name is not None:
print(name)
print( print(
tabulate( tabulate(
data, data,
@ -232,6 +280,11 @@ def process_entry_exit_reasons(config: Config):
enter_reason_list = config.get('enter_reason_list', ["all"]) enter_reason_list = config.get('enter_reason_list', ["all"])
exit_reason_list = config.get('exit_reason_list', ["all"]) exit_reason_list = config.get('exit_reason_list', ["all"])
indicator_list = config.get('indicator_list', []) indicator_list = config.get('indicator_list', [])
do_rejected = config.get('analysis_rejected', False)
to_csv = config.get('analysis_to_csv', False)
csv_path = Path(config.get('analysis_csv_path', config['exportfilename']))
if to_csv and not csv_path.is_dir():
raise OperationalException(f"Specified directory {csv_path} does not exist.")
timerange = TimeRange.parse_timerange(None if config.get( timerange = TimeRange.parse_timerange(None if config.get(
'timerange') is None else str(config.get('timerange'))) 'timerange') is None else str(config.get('timerange')))
@ -241,8 +294,16 @@ def process_entry_exit_reasons(config: Config):
for strategy_name, results in backtest_stats['strategy'].items(): for strategy_name, results in backtest_stats['strategy'].items():
trades = load_backtest_data(config['exportfilename'], strategy_name) trades = load_backtest_data(config['exportfilename'], strategy_name)
if not trades.empty: if trades is not None and not trades.empty:
signal_candles = _load_signal_candles(config['exportfilename']) signal_candles = _load_signal_candles(config['exportfilename'])
rej_df = None
if do_rejected:
rejected_signals_dict = _load_rejected_signals(config['exportfilename'])
rej_df = prepare_results(rejected_signals_dict, strategy_name,
enter_reason_list, exit_reason_list,
timerange=timerange)
analysed_trades_dict = _process_candles_and_indicators( analysed_trades_dict = _process_candles_and_indicators(
config['exchange']['pair_whitelist'], strategy_name, config['exchange']['pair_whitelist'], strategy_name,
trades, signal_candles) trades, signal_candles)
@ -253,7 +314,10 @@ def process_entry_exit_reasons(config: Config):
print_results(res_df, print_results(res_df,
analysis_groups, analysis_groups,
indicator_list) indicator_list,
rejected_signals=rej_df,
to_csv=to_csv,
csv_path=csv_path)
except ValueError as e: except ValueError as e:
raise OperationalException(e) from e raise OperationalException(e) from e

View File

@ -15,6 +15,7 @@ class ExitType(Enum):
EMERGENCY_EXIT = "emergency_exit" EMERGENCY_EXIT = "emergency_exit"
CUSTOM_EXIT = "custom_exit" CUSTOM_EXIT = "custom_exit"
PARTIAL_EXIT = "partial_exit" PARTIAL_EXIT = "partial_exit"
SOLD_ON_EXCHANGE = "sold_on_exchange"
NONE = "" NONE = ""
def __str__(self): def __str__(self):

View File

@ -1,6 +1,6 @@
# flake8: noqa: F401 # flake8: noqa: F401
# isort: off # isort: off
from freqtrade.exchange.common import remove_credentials, MAP_EXCHANGE_CHILDCLASS from freqtrade.exchange.common import remove_exchange_credentials, MAP_EXCHANGE_CHILDCLASS
from freqtrade.exchange.exchange import Exchange from freqtrade.exchange.exchange import Exchange
# isort: on # isort: on
from freqtrade.exchange.binance import Binance from freqtrade.exchange.binance import Binance

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ import time
from functools import wraps from functools import wraps
from typing import Any, Callable, Optional, TypeVar, cast, overload from typing import Any, Callable, Optional, TypeVar, cast, overload
from freqtrade.constants import ExchangeConfig
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
from freqtrade.mixins import LoggingMixin from freqtrade.mixins import LoggingMixin
@ -84,20 +85,22 @@ EXCHANGE_HAS_OPTIONAL = [
# 'fetchPositions', # Futures trading # 'fetchPositions', # Futures trading
# 'fetchLeverageTiers', # Futures initialization # 'fetchLeverageTiers', # Futures initialization
# 'fetchMarketLeverageTiers', # Futures initialization # 'fetchMarketLeverageTiers', # Futures initialization
# 'fetchOpenOrders', 'fetchClosedOrders', # 'fetchOrders', # Refinding balance...
] ]
def remove_credentials(config) -> None: def remove_exchange_credentials(exchange_config: ExchangeConfig, dry_run: bool) -> None:
""" """
Removes exchange keys from the configuration and specifies dry-run Removes exchange keys from the configuration and specifies dry-run
Used for backtesting / hyperopt / edge and utils. Used for backtesting / hyperopt / edge and utils.
Modifies the input dict! Modifies the input dict!
""" """
if config.get('dry_run', False): if dry_run:
config['exchange']['key'] = '' exchange_config['key'] = ''
config['exchange']['secret'] = '' exchange_config['apiKey'] = ''
config['exchange']['password'] = '' exchange_config['secret'] = ''
config['exchange']['uid'] = '' exchange_config['password'] = ''
exchange_config['uid'] = ''
def calculate_backoff(retrycount, max_retries): def calculate_backoff(retrycount, max_retries):

View File

@ -20,16 +20,16 @@ from dateutil import parser
from pandas import DataFrame, concat from pandas import DataFrame, concat
from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk, from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk,
BuySell, Config, EntryExit, ListPairsWithTimeframes, MakerTaker, BuySell, Config, EntryExit, ExchangeConfig,
OBLiteral, PairWithTimeframe) ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe)
from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list
from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode
from freqtrade.enums.pricetype import PriceType from freqtrade.enums.pricetype import PriceType
from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError, from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError,
InvalidOrderException, OperationalException, PricingError, InvalidOrderException, OperationalException, PricingError,
RetryableOrderError, TemporaryError) RetryableOrderError, TemporaryError)
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_credentials, retrier, from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials,
retrier_async) retrier, retrier_async)
from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType, from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType,
amount_to_contract_precision, amount_to_contracts, amount_to_contract_precision, amount_to_contracts,
amount_to_precision, contracts_to_amount, amount_to_precision, contracts_to_amount,
@ -92,8 +92,8 @@ class Exchange:
# TradingMode.SPOT always supported and not required in this list # TradingMode.SPOT always supported and not required in this list
] ]
def __init__(self, config: Config, validate: bool = True, def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None,
load_leverage_tiers: bool = False) -> None: validate: bool = True, load_leverage_tiers: bool = False) -> None:
""" """
Initializes this module with the given config, Initializes this module with the given config,
it does basic validation whether the specified exchange and pairs are valid. it does basic validation whether the specified exchange and pairs are valid.
@ -131,13 +131,13 @@ class Exchange:
# Holds all open sell orders for dry_run # Holds all open sell orders for dry_run
self._dry_run_open_orders: Dict[str, Any] = {} self._dry_run_open_orders: Dict[str, Any] = {}
remove_credentials(config)
if config['dry_run']: if config['dry_run']:
logger.info('Instance is running with dry_run enabled') logger.info('Instance is running with dry_run enabled')
logger.info(f"Using CCXT {ccxt.__version__}") logger.info(f"Using CCXT {ccxt.__version__}")
exchange_config = config['exchange'] exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange']
self.log_responses = exchange_config.get('log_responses', False) remove_exchange_credentials(exchange_conf, config.get('dry_run', False))
self.log_responses = exchange_conf.get('log_responses', False)
# Leverage properties # Leverage properties
self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT)
@ -152,8 +152,8 @@ class Exchange:
self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default)) self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default))
if self.trading_mode == TradingMode.FUTURES: if self.trading_mode == TradingMode.FUTURES:
self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has) self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has)
if exchange_config.get('_ft_has_params'): if exchange_conf.get('_ft_has_params'):
self._ft_has = deep_merge_dicts(exchange_config.get('_ft_has_params'), self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'),
self._ft_has) self._ft_has)
logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has) logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has)
@ -165,18 +165,18 @@ class Exchange:
# Initialize ccxt objects # Initialize ccxt objects
ccxt_config = self._ccxt_config ccxt_config = self._ccxt_config
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}), ccxt_config) ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config)
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_sync_config', {}), ccxt_config) ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config)
self._api = self._init_ccxt(exchange_config, ccxt_kwargs=ccxt_config) self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config)
ccxt_async_config = self._ccxt_config ccxt_async_config = self._ccxt_config
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}), ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}),
ccxt_async_config) ccxt_async_config)
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_async_config', {}), ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}),
ccxt_async_config) ccxt_async_config)
self._api_async = self._init_ccxt( self._api_async = self._init_ccxt(
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config) exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config)
logger.info(f'Using Exchange "{self.name}"') logger.info(f'Using Exchange "{self.name}"')
self.required_candle_call_count = 1 self.required_candle_call_count = 1
@ -189,7 +189,7 @@ class Exchange:
self._startup_candle_count, config.get('timeframe', '')) self._startup_candle_count, config.get('timeframe', ''))
# Converts the interval provided in minutes in config to seconds # Converts the interval provided in minutes in config to seconds
self.markets_refresh_interval: int = exchange_config.get( self.markets_refresh_interval: int = exchange_conf.get(
"markets_refresh_interval", 60) * 60 "markets_refresh_interval", 60) * 60
if self.trading_mode != TradingMode.SPOT and load_leverage_tiers: if self.trading_mode != TradingMode.SPOT and load_leverage_tiers:
@ -1432,6 +1432,47 @@ class Exchange:
except ccxt.BaseError as e: except ccxt.BaseError as e:
raise OperationalException(e) from e raise OperationalException(e) from e
@retrier(retries=0)
def fetch_orders(self, pair: str, since: datetime) -> List[Dict]:
"""
Fetch all orders for a pair "since"
:param pair: Pair for the query
:param since: Starting time for the query
"""
if self._config['dry_run']:
return []
def fetch_orders_emulate() -> List[Dict]:
orders = []
if self.exchange_has('fetchClosedOrders'):
orders = self._api.fetch_closed_orders(pair, since=since_ms)
if self.exchange_has('fetchOpenOrders'):
orders_open = self._api.fetch_open_orders(pair, since=since_ms)
orders.extend(orders_open)
return orders
try:
since_ms = int((since.timestamp() - 10) * 1000)
if self.exchange_has('fetchOrders'):
try:
orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms)
except ccxt.NotSupported:
# Some exchanges don't support fetchOrders
# attempt to fetch open and closed orders separately
orders = fetch_orders_emulate()
else:
orders = fetch_orders_emulate()
self._log_exchange_response('fetch_orders', orders)
orders = [self._order_contracts_to_amount(o) for o in orders]
return orders
except ccxt.DDoSProtection as e:
raise DDosProtection(e) from e
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
raise TemporaryError(
f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e
except ccxt.BaseError as e:
raise OperationalException(e) from e
@retrier @retrier
def fetch_trading_fees(self) -> Dict[str, Any]: def fetch_trading_fees(self) -> Dict[str, Any]:
""" """
@ -2900,8 +2941,8 @@ class Exchange:
if nominal_value >= tier['minNotional']: if nominal_value >= tier['minNotional']:
return (tier['maintenanceMarginRate'], tier['maintAmt']) return (tier['maintenanceMarginRate'], tier['maintAmt'])
raise OperationalException("nominal value can not be lower than 0") raise ExchangeError("nominal value can not be lower than 0")
# The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it # The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it
# describes the min amt for a tier, and the lowest tier will always go down to 0 # describes the min amt for a tier, and the lowest tier will always go down to 0
else: else:
raise OperationalException(f"Cannot get maintenance ratio using {self.name}") raise ExchangeError(f"Cannot get maintenance ratio using {self.name}")

View File

@ -1,7 +1,7 @@
import logging import logging
from enum import Enum from enum import Enum
from gym import spaces from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@ -94,9 +94,12 @@ class Base3ActionRLEnv(BaseEnvironment):
observation = self._get_observation() observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info) self._update_history(info)
return observation, step_reward, self._done, info return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool: def is_tradesignal(self, action: int) -> bool:
""" """

View File

@ -1,7 +1,7 @@
import logging import logging
from enum import Enum from enum import Enum
from gym import spaces from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@ -96,9 +96,12 @@ class Base4ActionRLEnv(BaseEnvironment):
observation = self._get_observation() observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info) self._update_history(info)
return observation, step_reward, self._done, info return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool: def is_tradesignal(self, action: int) -> bool:
""" """

View File

@ -1,7 +1,7 @@
import logging import logging
from enum import Enum from enum import Enum
from gym import spaces from gymnasium import spaces
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
@ -101,10 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment):
) )
observation = self._get_observation() observation = self._get_observation()
# user can play with time if they want
truncated = False
self._update_history(info) self._update_history(info)
return observation, step_reward, self._done, info return observation, step_reward, self._done, truncated, info
def is_tradesignal(self, action: int) -> bool: def is_tradesignal(self, action: int) -> bool:
""" """

View File

@ -4,11 +4,11 @@ from abc import abstractmethod
from enum import Enum from enum import Enum
from typing import Optional, Type, Union from typing import Optional, Type, Union
import gym import gymnasium as gym
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from gym import spaces from gymnasium import spaces
from gym.utils import seeding from gymnasium.utils import seeding
from pandas import DataFrame from pandas import DataFrame
@ -127,6 +127,14 @@ class BaseEnvironment(gym.Env):
self.history: dict = {} self.history: dict = {}
self.trade_history: list = [] self.trade_history: list = []
def get_attr(self, attr: str):
"""
Returns the attribute of the environment
:param attr: attribute to return
:return: attribute
"""
return getattr(self, attr)
@abstractmethod @abstractmethod
def set_action_space(self): def set_action_space(self):
""" """
@ -203,7 +211,7 @@ class BaseEnvironment(gym.Env):
self.close_trade_profit = [] self.close_trade_profit = []
self._total_unrealized_profit = 1 self._total_unrealized_profit = 1
return self._get_observation() return self._get_observation(), self.history
@abstractmethod @abstractmethod
def step(self, action: int): def step(self, action: int):
@ -298,6 +306,12 @@ class BaseEnvironment(gym.Env):
""" """
An example reward function. This is the one function that users will likely An example reward function. This is the one function that users will likely
wish to inject their own creativity into. wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle. :param action: int = The action made by the agent for the current candle.
:return: :return:
float = the reward to give to the agent for current step (used for optimization float = the reward to give to the agent for current step (used for optimization

View File

@ -6,7 +6,7 @@ from datetime import datetime, timezone
from pathlib import Path from pathlib import Path
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
import gym import gymnasium as gym
import numpy as np import numpy as np
import numpy.typing as npt import numpy.typing as npt
import pandas as pd import pandas as pd
@ -16,14 +16,14 @@ from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
from freqtrade.exceptions import OperationalException from freqtrade.exceptions import OperationalException
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.freqai_interface import IFreqaiModel
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment, Positions
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
from freqtrade.persistence import Trade from freqtrade.persistence import Trade
@ -46,8 +46,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) 'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
th.set_num_threads(self.max_threads) th.set_num_threads(self.max_threads)
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
self.eval_callback: Optional[EvalCallback] = None self.eval_callback: Optional[EvalCallback] = None
self.model_type = self.freqai_info['rl_config']['model_type'] self.model_type = self.freqai_info['rl_config']['model_type']
self.rl_config = self.freqai_info['rl_config'] self.rl_config = self.freqai_info['rl_config']
@ -371,6 +371,12 @@ class BaseReinforcementLearningModel(IFreqaiModel):
""" """
An example reward function. This is the one function that users will likely An example reward function. This is the one function that users will likely
wish to inject their own creativity into. wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle. :param action: int = The action made by the agent for the current candle.
:return: :return:
float = the reward to give to the agent for current step (used for optimization float = the reward to give to the agent for current step (used for optimization
@ -431,9 +437,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
return 0. return 0.
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int,
seed: int, train_df: DataFrame, price: DataFrame, seed: int, train_df: DataFrame, price: DataFrame,
monitor: bool = False,
env_info: Dict[str, Any] = {}) -> Callable: env_info: Dict[str, Any] = {}) -> Callable:
""" """
Utility function for multiprocessed env. Utility function for multiprocessed env.
@ -450,8 +455,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank, env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
**env_info) **env_info)
if monitor:
env = Monitor(env)
return env return env
set_random_seed(seed) set_random_seed(seed)
return _init return _init

View File

@ -45,6 +45,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
) -> Tuple[DataFrame, npt.NDArray[np.int_]]: ) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
""" """
Filter the prediction features data and predict with it. Filter the prediction features data and predict with it.
:param dk: dk: The datakitchen object
:param unfiltered_df: Full dataframe for the current backtest period. :param unfiltered_df: Full dataframe for the current backtest period.
:return: :return:
:pred_df: dataframe containing the predictions :pred_df: dataframe containing the predictions
@ -74,11 +75,14 @@ class BasePyTorchClassifier(BasePyTorchModel):
dk.data_dictionary["prediction_features"], dk.data_dictionary["prediction_features"],
device=self.device device=self.device
) )
self.model.model.eval()
logits = self.model.model(x) logits = self.model.model(x)
probs = F.softmax(logits, dim=-1) probs = F.softmax(logits, dim=-1)
predicted_classes = torch.argmax(probs, dim=-1) predicted_classes = torch.argmax(probs, dim=-1)
predicted_classes_str = self.decode_class_names(predicted_classes) predicted_classes_str = self.decode_class_names(predicted_classes)
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names) # used .tolist to convert probs into an iterable, in this way Tensors
# are automatically moved to the CPU first if necessary.
pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names)
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]]) pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
pred_df = pd.concat([pred_df, pred_df_prob], axis=1) pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
return (pred_df, dk.do_predict) return (pred_df, dk.do_predict)

View File

@ -27,6 +27,7 @@ class BasePyTorchModel(IFreqaiModel, ABC):
self.device = "cuda" if torch.cuda.is_available() else "cpu" self.device = "cuda" if torch.cuda.is_available() else "cpu"
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size') test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
self.splits = ["train", "test"] if test_size != 0 else ["train"] self.splits = ["train", "test"] if test_size != 0 else ["train"]
self.window_size = self.freqai_info.get("conv_width", 1)
def train( def train(
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs

View File

@ -44,8 +44,8 @@ class BasePyTorchRegressor(BasePyTorchModel):
dk.data_dictionary["prediction_features"], dk.data_dictionary["prediction_features"],
device=self.device device=self.device
) )
self.model.model.eval()
y = self.model.model(x) y = self.model.model(x)
y = y.cpu() pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]])
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
pred_df = dk.denormalize_labels_from_metadata(pred_df) pred_df = dk.denormalize_labels_from_metadata(pred_df)
return (pred_df, dk.do_predict) return (pred_df, dk.do_predict)

View File

@ -21,7 +21,7 @@ from freqtrade.exceptions import OperationalException
from freqtrade.exchange import timeframe_to_seconds from freqtrade.exchange import timeframe_to_seconds
from freqtrade.freqai.data_drawer import FreqaiDataDrawer from freqtrade.freqai.data_drawer import FreqaiDataDrawer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.utils import plot_feature_importance, record_params from freqtrade.freqai.utils import get_tb_logger, plot_feature_importance, record_params
from freqtrade.strategy.interface import IStrategy from freqtrade.strategy.interface import IStrategy
@ -80,6 +80,7 @@ class IFreqaiModel(ABC):
if self.keras and self.ft_params.get("DI_threshold", 0): if self.keras and self.ft_params.get("DI_threshold", 0):
self.ft_params["DI_threshold"] = 0 self.ft_params["DI_threshold"] = 0
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.") logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1) self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
if self.ft_params.get("inlier_metric_window", 0): if self.ft_params.get("inlier_metric_window", 0):
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2 self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
@ -109,6 +110,7 @@ class IFreqaiModel(ABC):
if self.ft_params.get('principal_component_analysis', False) and self.continual_learning: if self.ft_params.get('principal_component_analysis', False) and self.continual_learning:
self.ft_params.update({'principal_component_analysis': False}) self.ft_params.update({'principal_component_analysis': False})
logger.warning('User tried to use PCA with continual learning. Deactivating PCA.') logger.warning('User tried to use PCA with continual learning. Deactivating PCA.')
self.activate_tensorboard: bool = self.freqai_info.get('activate_tensorboard', True)
record_params(config, self.full_path) record_params(config, self.full_path)
@ -242,7 +244,7 @@ class IFreqaiModel(ABC):
new_trained_timerange, pair, strategy, dk, data_load_timerange new_trained_timerange, pair, strategy, dk, data_load_timerange
) )
except Exception as msg: except Exception as msg:
logger.warning(f"Training {pair} raised exception {msg.__class__.__name__}. " logger.exception(f"Training {pair} raised exception {msg.__class__.__name__}. "
f"Message: {msg}, skipping.") f"Message: {msg}, skipping.")
self.train_timer('stop', pair) self.train_timer('stop', pair)
@ -306,10 +308,11 @@ class IFreqaiModel(ABC):
if dk.check_if_backtest_prediction_is_valid(len_backtest_df): if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
if check_features: if check_features:
self.dd.load_metadata(dk) self.dd.load_metadata(dk)
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators( df_fts = self.dk.use_strategy_to_populate_indicators(
strategy, prediction_dataframe=dataframe.tail(1), pair=pair strategy, prediction_dataframe=dataframe.tail(1), pair=pair
) )
dk.find_features(dataframe_dummy_features) df_fts = dk.remove_special_chars_from_feature_names(df_fts)
dk.find_features(df_fts)
self.check_if_feature_list_matches_strategy(dk) self.check_if_feature_list_matches_strategy(dk)
check_features = False check_features = False
append_df = dk.get_backtesting_prediction() append_df = dk.get_backtesting_prediction()
@ -342,7 +345,10 @@ class IFreqaiModel(ABC):
dk.find_labels(dataframe_train) dk.find_labels(dataframe_train)
try: try:
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
self.activate_tensorboard)
self.model = self.train(dataframe_train, pair, dk) self.model = self.train(dataframe_train, pair, dk)
self.tb_logger.close()
except Exception as msg: except Exception as msg:
logger.warning( logger.warning(
f"Training {pair} raised exception {msg.__class__.__name__}. " f"Training {pair} raised exception {msg.__class__.__name__}. "
@ -620,18 +626,23 @@ class IFreqaiModel(ABC):
strategy, corr_dataframes, base_dataframes, pair strategy, corr_dataframes, base_dataframes, pair
) )
new_trained_timerange = dk.buffer_timerange(new_trained_timerange) trained_timestamp = new_trained_timerange.stopts
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe) buffered_timerange = dk.buffer_timerange(new_trained_timerange)
unfiltered_dataframe = dk.slice_dataframe(buffered_timerange, unfiltered_dataframe)
# find the features indicated by strategy and store in datakitchen # find the features indicated by strategy and store in datakitchen
dk.find_features(unfiltered_dataframe) dk.find_features(unfiltered_dataframe)
dk.find_labels(unfiltered_dataframe) dk.find_labels(unfiltered_dataframe)
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
self.activate_tensorboard)
model = self.train(unfiltered_dataframe, pair, dk) model = self.train(unfiltered_dataframe, pair, dk)
self.tb_logger.close()
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts self.dd.pair_dict[pair]["trained_timestamp"] = trained_timestamp
dk.set_new_model_names(pair, new_trained_timerange.stopts) dk.set_new_model_names(pair, trained_timestamp)
self.dd.save_data(model, pair, dk) self.dd.save_data(model, pair, dk)
if self.plot_features: if self.plot_features:

View File

@ -74,15 +74,17 @@ class PyTorchMLPClassifier(BasePyTorchClassifier):
model.to(self.device) model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate) optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.CrossEntropyLoss() criterion = torch.nn.CrossEntropyLoss()
init_model = self.get_init_model(dk.pair) # check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchModelTrainer( trainer = PyTorchModelTrainer(
model=model, model=model,
optimizer=optimizer, optimizer=optimizer,
criterion=criterion, criterion=criterion,
model_meta_data={"class_names": class_names}, model_meta_data={"class_names": class_names},
device=self.device, device=self.device,
init_model=init_model,
data_convertor=self.data_convertor, data_convertor=self.data_convertor,
tb_logger=self.tb_logger,
**self.trainer_kwargs, **self.trainer_kwargs,
) )
trainer.fit(data_dictionary, self.splits) trainer.fit(data_dictionary, self.splits)

View File

@ -69,14 +69,16 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
model.to(self.device) model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate) optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.MSELoss() criterion = torch.nn.MSELoss()
init_model = self.get_init_model(dk.pair) # check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchModelTrainer( trainer = PyTorchModelTrainer(
model=model, model=model,
optimizer=optimizer, optimizer=optimizer,
criterion=criterion, criterion=criterion,
device=self.device, device=self.device,
init_model=init_model,
data_convertor=self.data_convertor, data_convertor=self.data_convertor,
tb_logger=self.tb_logger,
**self.trainer_kwargs, **self.trainer_kwargs,
) )
trainer.fit(data_dictionary, self.splits) trainer.fit(data_dictionary, self.splits)

View File

@ -0,0 +1,140 @@
from typing import Any, Dict, Tuple
import numpy as np
import numpy.typing as npt
import pandas as pd
import torch
from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
PyTorchDataConvertor)
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchTransformerTrainer
from freqtrade.freqai.torch.PyTorchTransformerModel import PyTorchTransformerModel
class PyTorchTransformerRegressor(BasePyTorchRegressor):
"""
This class implements the fit method of IFreqaiModel.
in the fit method we initialize the model and trainer objects.
the only requirement from the model is to be aligned to PyTorchRegressor
predict method that expects the model to predict tensor of type float.
the trainer defines the training loop.
parameters are passed via `model_training_parameters` under the freqai
section in the config file. e.g:
{
...
"freqai": {
...
"model_training_parameters" : {
"learning_rate": 3e-4,
"trainer_kwargs": {
"max_iters": 5000,
"batch_size": 64,
"max_n_eval_batches": null
},
"model_kwargs": {
"hidden_dim": 512,
"dropout_percent": 0.2,
"n_layer": 1,
},
}
}
}
"""
@property
def data_convertor(self) -> PyTorchDataConvertor:
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
config = self.freqai_info.get("model_training_parameters", {})
self.learning_rate: float = config.get("learning_rate", 3e-4)
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
User sets up the training and test data to fit their desired model here
:param data_dictionary: the dictionary holding all data for train, test,
labels, weights
:param dk: The datakitchen object for the current coin/model
"""
n_features = data_dictionary["train_features"].shape[-1]
n_labels = data_dictionary["train_labels"].shape[-1]
model = PyTorchTransformerModel(
input_dim=n_features,
output_dim=n_labels,
time_window=self.window_size,
**self.model_kwargs
)
model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.MSELoss()
# check if continual_learning is activated, and retreive the model to continue training
trainer = self.get_init_model(dk.pair)
if trainer is None:
trainer = PyTorchTransformerTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
device=self.device,
data_convertor=self.data_convertor,
window_size=self.window_size,
tb_logger=self.tb_logger,
**self.trainer_kwargs,
)
trainer.fit(data_dictionary, self.splits)
return trainer
def predict(
self, unfiltered_df: pd.DataFrame, dk: FreqaiDataKitchen, **kwargs
) -> Tuple[pd.DataFrame, npt.NDArray[np.int_]]:
"""
Filter the prediction features data and predict with it.
:param unfiltered_df: Full dataframe for the current backtest period.
:return:
:pred_df: dataframe containing the predictions
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
data (NaNs) or felt uncertain about data (PCA and DI index)
"""
dk.find_features(unfiltered_df)
filtered_df, _ = dk.filter_features(
unfiltered_df, dk.training_features_list, training_filter=False
)
filtered_df = dk.normalize_data_from_metadata(filtered_df)
dk.data_dictionary["prediction_features"] = filtered_df
self.data_cleaning_predict(dk)
x = self.data_convertor.convert_x(
dk.data_dictionary["prediction_features"],
device=self.device
)
# if user is asking for multiple predictions, slide the window
# along the tensor
x = x.unsqueeze(0)
# create empty torch tensor
self.model.model.eval()
yb = torch.empty(0).to(self.device)
if x.shape[1] > 1:
ws = self.window_size
for i in range(0, x.shape[1] - ws):
xb = x[:, i:i + ws, :].to(self.device)
y = self.model.model(xb)
yb = torch.cat((yb, y), dim=0)
else:
yb = self.model.model(x)
yb = yb.cpu().squeeze()
pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list)
pred_df = dk.denormalize_labels_from_metadata(pred_df)
if x.shape[1] > 1:
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
columns=pred_df.columns)
pred_df = pd.concat([zeros_df, pred_df], axis=0, ignore_index=True)
return (pred_df, dk.do_predict)

View File

@ -1,11 +1,12 @@
import logging import logging
from pathlib import Path from pathlib import Path
from typing import Any, Dict from typing import Any, Dict, Type
import torch as th import torch as th
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
@ -57,10 +58,14 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
policy_kwargs = dict(activation_fn=th.nn.ReLU, policy_kwargs = dict(activation_fn=th.nn.ReLU,
net_arch=self.net_arch) net_arch=self.net_arch)
if self.activate_tensorboard:
tb_path = Path(dk.full_path / "tensorboard" / dk.pair.split('/')[0])
else:
tb_path = None
if dk.pair not in self.dd.model_dictionary or not self.continual_learning: if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs, model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
tensorboard_log=Path( tensorboard_log=tb_path,
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
**self.freqai_info.get('model_training_parameters', {}) **self.freqai_info.get('model_training_parameters', {})
) )
else: else:
@ -84,7 +89,9 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
return model return model
class MyRLEnv(Base5ActionRLEnv): MyRLEnv: Type[BaseEnvironment]
class MyRLEnv(Base5ActionRLEnv): # type: ignore[no-redef]
""" """
User can override any function in BaseRLEnv and gym.Env. Here the user User can override any function in BaseRLEnv and gym.Env. Here the user
sets a custom reward based on profit and trade duration. sets a custom reward based on profit and trade duration.
@ -94,6 +101,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
""" """
An example reward function. This is the one function that users will likely An example reward function. This is the one function that users will likely
wish to inject their own creativity into. wish to inject their own creativity into.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
:param action: int = The action made by the agent for the current candle. :param action: int = The action made by the agent for the current candle.
:return: :return:
float = the reward to give to the agent for current step (used for optimization float = the reward to give to the agent for current step (used for optimization

View File

@ -3,12 +3,12 @@ from typing import Any, Dict
from pandas import DataFrame from pandas import DataFrame
from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.callbacks import EvalCallback
from stable_baselines3.common.vec_env import SubprocVecEnv from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -41,22 +41,25 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
env_info = self.pack_env_dict(dk.pair) env_info = self.pack_env_dict(dk.pair)
eval_freq = len(train_df) // self.max_threads
env_id = "train_env" env_id = "train_env"
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1, self.train_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
train_df, prices_train, train_df, prices_train,
monitor=True,
env_info=env_info) for i env_info=env_info) for i
in range(self.max_threads)]) in range(self.max_threads)]))
eval_env_id = 'eval_env' eval_env_id = 'eval_env'
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1, self.eval_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
test_df, prices_test, test_df, prices_test,
monitor=True,
env_info=env_info) for i env_info=env_info) for i
in range(self.max_threads)]) in range(self.max_threads)]))
self.eval_callback = EvalCallback(self.eval_env, deterministic=True, self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
render=False, eval_freq=len(train_df), render=False, eval_freq=eval_freq,
best_model_save_path=str(dk.data_path)) best_model_save_path=str(dk.data_path))
# TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS,
# IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!!
actions = self.train_env.env_method("get_actions")[0] actions = self.train_env.env_method("get_actions")[0]
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)

View File

@ -5,6 +5,7 @@ from xgboost import XGBRegressor
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.tensorboard import TBCallback
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -44,7 +45,10 @@ class XGBoostRegressor(BaseRegressionModel):
model = XGBRegressor(**self.model_training_parameters) model = XGBRegressor(**self.model_training_parameters)
model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard)
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set, model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
sample_weight_eval_set=eval_weights, xgb_model=xgb_model) sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
# set the callbacks to empty so that we can serialize to disk later
model.set_params(callbacks=[])
return model return model

View File

@ -3,8 +3,9 @@ from typing import Any, Dict, Type, Union
from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.logger import HParam from stable_baselines3.common.logger import HParam
from stable_baselines3.common.vec_env import VecEnv
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment from freqtrade.freqai.RL.BaseEnvironment import BaseActions
class TensorboardCallback(BaseCallback): class TensorboardCallback(BaseCallback):
@ -12,11 +13,13 @@ class TensorboardCallback(BaseCallback):
Custom callback for plotting additional values in tensorboard and Custom callback for plotting additional values in tensorboard and
episodic summary reports. episodic summary reports.
""" """
# Override training_env type to fix type errors
training_env: Union[VecEnv, None] = None
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
super().__init__(verbose) super().__init__(verbose)
self.model: Any = None self.model: Any = None
self.logger = None # type: Any self.logger: Any = None
self.training_env: BaseEnvironment = None # type: ignore
self.actions: Type[Enum] = actions self.actions: Type[Enum] = actions
def _on_training_start(self) -> None: def _on_training_start(self) -> None:
@ -44,6 +47,8 @@ class TensorboardCallback(BaseCallback):
def _on_step(self) -> bool: def _on_step(self) -> bool:
local_info = self.locals["infos"][0] local_info = self.locals["infos"][0]
if self.training_env is None:
return True
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0] tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
for metric in local_info: for metric in local_info:

View File

@ -0,0 +1,15 @@
# ensure users can still use a non-torch freqai version
try:
from freqtrade.freqai.tensorboard.tensorboard import TensorBoardCallback, TensorboardLogger
TBLogger = TensorboardLogger
TBCallback = TensorBoardCallback
except ModuleNotFoundError:
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
BaseTensorboardLogger)
TBLogger = BaseTensorboardLogger # type: ignore
TBCallback = BaseTensorBoardCallback # type: ignore
__all__ = (
"TBLogger",
"TBCallback"
)

View File

@ -0,0 +1,35 @@
import logging
from pathlib import Path
from typing import Any
from xgboost.callback import TrainingCallback
logger = logging.getLogger(__name__)
class BaseTensorboardLogger:
def __init__(self, logdir: Path, activate: bool = True):
logger.warning("Tensorboard is not installed, no logs will be written."
"Ensure torch is installed, or use the torch/RL docker images")
def log_scalar(self, tag: str, scalar_value: Any, step: int):
return
def close(self):
return
class BaseTensorBoardCallback(TrainingCallback):
def __init__(self, logdir: Path, activate: bool = True):
logger.warning("Tensorboard is not installed, no logs will be written."
"Ensure torch is installed, or use the torch/RL docker images")
def after_iteration(
self, model, epoch: int, evals_log: TrainingCallback.EvalsLog
) -> bool:
return False
def after_training(self, model):
return model

View File

@ -0,0 +1,62 @@
import logging
from pathlib import Path
from typing import Any
from torch.utils.tensorboard import SummaryWriter
from xgboost import callback
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
BaseTensorboardLogger)
logger = logging.getLogger(__name__)
class TensorboardLogger(BaseTensorboardLogger):
def __init__(self, logdir: Path, activate: bool = True):
self.activate = activate
if self.activate:
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
def log_scalar(self, tag: str, scalar_value: Any, step: int):
if self.activate:
self.writer.add_scalar(tag, scalar_value, step)
def close(self):
if self.activate:
self.writer.flush()
self.writer.close()
class TensorBoardCallback(BaseTensorBoardCallback):
def __init__(self, logdir: Path, activate: bool = True):
self.activate = activate
if self.activate:
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
def after_iteration(
self, model, epoch: int, evals_log: callback.TrainingCallback.EvalsLog
) -> bool:
if not self.activate:
return False
if not evals_log:
return False
for data, metric in evals_log.items():
for metric_name, log in metric.items():
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
if data == "train":
self.writer.add_scalar("train_loss", score, epoch)
else:
self.writer.add_scalar("valid_loss", score, epoch)
return False
def after_training(self, model):
if not self.activate:
return model
self.writer.flush()
self.writer.close()
return model

View File

@ -1,5 +1,5 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import List, Optional from typing import Optional
import pandas as pd import pandas as pd
import torch import torch
@ -12,14 +12,14 @@ class PyTorchDataConvertor(ABC):
""" """
@abstractmethod @abstractmethod
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]: def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
""" """
:param df: "*_features" dataframe. :param df: "*_features" dataframe.
:param device: The device to use for training (e.g. 'cpu', 'cuda'). :param device: The device to use for training (e.g. 'cpu', 'cuda').
""" """
@abstractmethod @abstractmethod
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]: def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
""" """
:param df: "*_labels" dataframe. :param df: "*_labels" dataframe.
:param device: The device to use for training (e.g. 'cpu', 'cuda'). :param device: The device to use for training (e.g. 'cpu', 'cuda').
@ -45,14 +45,14 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
self._target_tensor_type = target_tensor_type self._target_tensor_type = target_tensor_type
self._squeeze_target_tensor = squeeze_target_tensor self._squeeze_target_tensor = squeeze_target_tensor
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]: def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
x = torch.from_numpy(df.values).float() x = torch.from_numpy(df.values).float()
if device: if device:
x = x.to(device) x = x.to(device)
return [x] return x
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]: def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
y = torch.from_numpy(df.values) y = torch.from_numpy(df.values)
if self._target_tensor_type: if self._target_tensor_type:
@ -64,4 +64,4 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
if device: if device:
y = y.to(device) y = y.to(device)
return [y] return y

View File

@ -1,5 +1,4 @@
import logging import logging
from typing import List
import torch import torch
from torch import nn from torch import nn
@ -47,8 +46,8 @@ class PyTorchMLPModel(nn.Module):
self.relu = nn.ReLU() self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_percent) self.dropout = nn.Dropout(p=dropout_percent)
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor: def forward(self, x: torch.Tensor) -> torch.Tensor:
x: torch.Tensor = tensors[0] # x: torch.Tensor = tensors[0]
x = self.relu(self.input_layer(x)) x = self.relu(self.input_layer(x))
x = self.dropout(x) x = self.dropout(x)
x = self.blocks(x) x = self.blocks(x)

View File

@ -12,6 +12,8 @@ from torch.utils.data import DataLoader, TensorDataset
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
from .datasets import WindowDataset
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -23,9 +25,10 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
optimizer: Optimizer, optimizer: Optimizer,
criterion: nn.Module, criterion: nn.Module,
device: str, device: str,
init_model: Dict,
data_convertor: PyTorchDataConvertor, data_convertor: PyTorchDataConvertor,
model_meta_data: Dict[str, Any] = {}, model_meta_data: Dict[str, Any] = {},
window_size: int = 1,
tb_logger: Any = None,
**kwargs **kwargs
): ):
""" """
@ -52,8 +55,8 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.batch_size: int = kwargs.get("batch_size", 64) self.batch_size: int = kwargs.get("batch_size", 64)
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None) self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
self.data_convertor = data_convertor self.data_convertor = data_convertor
if init_model: self.window_size: int = window_size
self.load_from_checkpoint(init_model) self.tb_logger = tb_logger
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]): def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
""" """
@ -75,36 +78,28 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
batch_size=self.batch_size, batch_size=self.batch_size,
n_iters=self.max_iters n_iters=self.max_iters
) )
self.model.train()
for epoch in range(1, epochs + 1): for epoch in range(1, epochs + 1):
# training
losses = []
for i, batch_data in enumerate(data_loaders_dictionary["train"]): for i, batch_data in enumerate(data_loaders_dictionary["train"]):
for tensor in batch_data: xb, yb = batch_data
tensor.to(self.device) xb.to(self.device)
yb.to(self.device)
xb = batch_data[:-1]
yb = batch_data[-1]
yb_pred = self.model(xb) yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb) loss = self.criterion(yb_pred, yb)
self.optimizer.zero_grad(set_to_none=True) self.optimizer.zero_grad(set_to_none=True)
loss.backward() loss.backward()
self.optimizer.step() self.optimizer.step()
losses.append(loss.item()) self.tb_logger.log_scalar("train_loss", loss.item(), i)
train_loss = sum(losses) / len(losses)
log_message = f"epoch {epoch}/{epochs}: train loss {train_loss:.4f}"
# evaluation # evaluation
if "test" in splits: if "test" in splits:
test_loss = self.estimate_loss( self.estimate_loss(
data_loaders_dictionary, data_loaders_dictionary,
self.max_n_eval_batches, self.max_n_eval_batches,
"test" "test"
) )
log_message += f" ; test loss {test_loss:.4f}"
logger.info(log_message)
@torch.no_grad() @torch.no_grad()
def estimate_loss( def estimate_loss(
@ -112,26 +107,22 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
data_loader_dictionary: Dict[str, DataLoader], data_loader_dictionary: Dict[str, DataLoader],
max_n_eval_batches: Optional[int], max_n_eval_batches: Optional[int],
split: str, split: str,
) -> float: ) -> None:
self.model.eval() self.model.eval()
n_batches = 0 n_batches = 0
losses = []
for i, batch_data in enumerate(data_loader_dictionary[split]): for i, batch_data in enumerate(data_loader_dictionary[split]):
if max_n_eval_batches and i > max_n_eval_batches: if max_n_eval_batches and i > max_n_eval_batches:
n_batches += 1 n_batches += 1
break break
xb, yb = batch_data
xb.to(self.device)
yb.to(self.device)
for tensor in batch_data:
tensor.to(self.device)
xb = batch_data[:-1]
yb = batch_data[-1]
yb_pred = self.model(xb) yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb) loss = self.criterion(yb_pred, yb)
losses.append(loss.item()) self.tb_logger.log_scalar(f"{split}_loss", loss.item(), i)
self.model.train() self.model.train()
return sum(losses) / len(losses)
def create_data_loaders_dictionary( def create_data_loaders_dictionary(
self, self,
@ -145,7 +136,7 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
for split in splits: for split in splits:
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device) x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device) y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
dataset = TensorDataset(*x, *y) dataset = TensorDataset(x, y)
data_loader = DataLoader( data_loader = DataLoader(
dataset, dataset,
batch_size=self.batch_size, batch_size=self.batch_size,
@ -206,3 +197,33 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.model_meta_data = checkpoint["model_meta_data"] self.model_meta_data = checkpoint["model_meta_data"]
return self return self
class PyTorchTransformerTrainer(PyTorchModelTrainer):
"""
Creating a trainer for the Transformer model.
"""
def create_data_loaders_dictionary(
self,
data_dictionary: Dict[str, pd.DataFrame],
splits: List[str]
) -> Dict[str, DataLoader]:
"""
Converts the input data to PyTorch tensors using a data loader.
"""
data_loader_dictionary = {}
for split in splits:
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
dataset = WindowDataset(x, y, self.window_size)
data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
drop_last=True,
num_workers=0,
)
data_loader_dictionary[split] = data_loader
return data_loader_dictionary

View File

@ -0,0 +1,93 @@
import math
import torch
import torch.nn as nn
"""
The architecture is based on the paper Attention Is All You Need.
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017.
"""
class PyTorchTransformerModel(nn.Module):
"""
A transformer approach to time series modeling using positional encoding.
The architecture is based on the paper Attention Is All You Need.
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
Lukasz Kaiser, and Illia Polosukhin. 2017.
"""
def __init__(self, input_dim: int = 7, output_dim: int = 7, hidden_dim=1024,
n_layer=2, dropout_percent=0.1, time_window=10, nhead=8):
super().__init__()
self.time_window = time_window
# ensure the input dimension to the transformer is divisible by nhead
self.dim_val = input_dim - (input_dim % nhead)
self.input_net = nn.Sequential(
nn.Dropout(dropout_percent), nn.Linear(input_dim, self.dim_val)
)
# Encode the timeseries with Positional encoding
self.positional_encoding = PositionalEncoding(d_model=self.dim_val, max_len=self.dim_val)
# Define the encoder block of the Transformer
self.encoder_layer = nn.TransformerEncoderLayer(
d_model=self.dim_val, nhead=nhead, dropout=dropout_percent, batch_first=True)
self.transformer = nn.TransformerEncoder(self.encoder_layer, num_layers=n_layer)
# the pseudo decoding FC
self.output_net = nn.Sequential(
nn.Linear(self.dim_val * time_window, int(hidden_dim)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim), int(hidden_dim / 2)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim / 2), int(hidden_dim / 4)),
nn.ReLU(),
nn.Dropout(dropout_percent),
nn.Linear(int(hidden_dim / 4), output_dim)
)
def forward(self, x, mask=None, add_positional_encoding=True):
"""
Args:
x: Input features of shape [Batch, SeqLen, input_dim]
mask: Mask to apply on the attention outputs (optional)
add_positional_encoding: If True, we add the positional encoding to the input.
Might not be desired for some tasks.
"""
x = self.input_net(x)
if add_positional_encoding:
x = self.positional_encoding(x)
x = self.transformer(x, mask=mask)
x = x.reshape(-1, 1, self.time_window * x.shape[-1])
x = self.output_net(x)
return x
class PositionalEncoding(torch.nn.Module):
def __init__(self, d_model, max_len=5000):
"""
Args
d_model: Hidden dimensionality of the input.
max_len: Maximum length of a sequence to expect.
"""
super().__init__()
# Create matrix of [SeqLen, HiddenDim] representing the positional encoding
# for max_len inputs
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe, persistent=False)
def forward(self, x):
x = x + self.pe[:, : x.size(1)]
return x

View File

@ -0,0 +1,19 @@
import torch
class WindowDataset(torch.utils.data.Dataset):
def __init__(self, xs, ys, window_size):
self.xs = xs
self.ys = ys
self.window_size = window_size
def __len__(self):
return len(self.xs) - self.window_size
def __getitem__(self, index):
idx_rev = len(self.xs) - self.window_size - index - 1
window_x = self.xs[idx_rev:idx_rev + self.window_size, :]
# Beware of indexing, these two window_x and window_y are aimed at the same row!
# this is what happens when you use :
window_y = self.ys[idx_rev + self.window_size - 1, :].unsqueeze(0)
return window_x, window_y

View File

@ -92,55 +92,6 @@ def get_required_data_timerange(config: Config) -> TimeRange:
return data_load_timerange return data_load_timerange
# Keep below for when we wish to download heterogeneously lengthed data for FreqAI.
# def download_all_data_for_training(dp: DataProvider, config: Config) -> None:
# """
# Called only once upon start of bot to download the necessary data for
# populating indicators and training a FreqAI model.
# :param timerange: TimeRange = The full data timerange for populating the indicators
# and training the model.
# :param dp: DataProvider instance attached to the strategy
# """
# if dp._exchange is not None:
# markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m)
# or config.get('include_inactive')]
# else:
# # This should not occur:
# raise OperationalException('No exchange object found.')
# all_pairs = dynamic_expand_pairlist(config, markets)
# if not dp._exchange:
# # Not realistic - this is only called in live mode.
# raise OperationalException("Dataprovider did not have an exchange attached.")
# time = datetime.now(tz=timezone.utc).timestamp()
# for tf in config["freqai"]["feature_parameters"].get("include_timeframes"):
# timerange = TimeRange()
# timerange.startts = int(time)
# timerange.stopts = int(time)
# startup_candles = dp.get_required_startup(str(tf))
# tf_seconds = timeframe_to_seconds(str(tf))
# timerange.subtract_start(tf_seconds * startup_candles)
# new_pairs_days = int((timerange.stopts - timerange.startts) / 86400)
# # FIXME: now that we are looping on `refresh_backtest_ohlcv_data`, the function
# # redownloads the funding rate for each pair.
# refresh_backtest_ohlcv_data(
# dp._exchange,
# pairs=all_pairs,
# timeframes=[tf],
# datadir=config["datadir"],
# timerange=timerange,
# new_pairs_days=new_pairs_days,
# erase=False,
# data_format=config.get("dataformat_ohlcv", "json"),
# trading_mode=config.get("trading_mode", "spot"),
# prepend=config.get("prepend_data", False),
# )
def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen, def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen,
count_max: int = 25) -> None: count_max: int = 25) -> None:
""" """
@ -233,3 +184,13 @@ def get_timerange_backtest_live_models(config: Config) -> str:
dd = FreqaiDataDrawer(models_path, config) dd = FreqaiDataDrawer(models_path, config)
timerange = dd.get_timerange_from_live_historic_predictions() timerange = dd.get_timerange_from_live_historic_predictions()
return timerange.timerange_str return timerange.timerange_str
def get_tb_logger(model_type: str, path: Path, activate: bool) -> Any:
if model_type == "pytorch" and activate:
from freqtrade.freqai.tensorboard import TBLogger
return TBLogger(path, activate)
else:
from freqtrade.freqai.tensorboard.base_tensorboard import BaseTensorboardLogger
return BaseTensorboardLogger(path, activate)

View File

@ -1,9 +1,9 @@
""" """
Freqtrade is the main module of this bot. It contains the class Freqtrade() Freqtrade is the main module of this bot. It contains the class Freqtrade()
""" """
import copy
import logging import logging
import traceback import traceback
from copy import deepcopy
from datetime import datetime, time, timedelta, timezone from datetime import datetime, time, timedelta, timezone
from math import isclose from math import isclose
from threading import Lock from threading import Lock
@ -13,7 +13,7 @@ from schedule import Scheduler
from freqtrade import constants from freqtrade import constants
from freqtrade.configuration import validate_config_consistency from freqtrade.configuration import validate_config_consistency
from freqtrade.constants import BuySell, Config, LongShort from freqtrade.constants import BuySell, Config, ExchangeConfig, LongShort
from freqtrade.data.converter import order_book_to_dataframe from freqtrade.data.converter import order_book_to_dataframe
from freqtrade.data.dataprovider import DataProvider from freqtrade.data.dataprovider import DataProvider
from freqtrade.edge import Edge from freqtrade.edge import Edge
@ -23,6 +23,7 @@ from freqtrade.exceptions import (DependencyException, ExchangeError, Insufficie
InvalidOrderException, PricingError) InvalidOrderException, PricingError)
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date, from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date,
timeframe_to_seconds) timeframe_to_seconds)
from freqtrade.exchange.common import remove_exchange_credentials
from freqtrade.misc import safe_value_fallback, safe_value_fallback2 from freqtrade.misc import safe_value_fallback, safe_value_fallback2
from freqtrade.mixins import LoggingMixin from freqtrade.mixins import LoggingMixin
from freqtrade.persistence import Order, PairLocks, Trade, init_db from freqtrade.persistence import Order, PairLocks, Trade, init_db
@ -63,6 +64,9 @@ class FreqtradeBot(LoggingMixin):
# Init objects # Init objects
self.config = config self.config = config
exchange_config: ExchangeConfig = deepcopy(config['exchange'])
# Remove credentials from original exchange config to avoid accidental credentail exposure
remove_exchange_credentials(config['exchange'], True)
self.strategy: IStrategy = StrategyResolver.load_strategy(self.config) self.strategy: IStrategy = StrategyResolver.load_strategy(self.config)
@ -70,7 +74,7 @@ class FreqtradeBot(LoggingMixin):
validate_config_consistency(config) validate_config_consistency(config)
self.exchange = ExchangeResolver.load_exchange( self.exchange = ExchangeResolver.load_exchange(
self.config['exchange']['name'], self.config, load_leverage_tiers=True) self.config, exchange_config=exchange_config, load_leverage_tiers=True)
init_db(self.config['db_url']) init_db(self.config['db_url'])
@ -451,6 +455,42 @@ class FreqtradeBot(LoggingMixin):
except ExchangeError: except ExchangeError:
logger.warning(f"Error updating {order.order_id}.") logger.warning(f"Error updating {order.order_id}.")
def handle_onexchange_order(self, trade: Trade):
"""
Try refinding a order that is not in the database.
Only used balance disappeared, which would make exiting impossible.
"""
try:
orders = self.exchange.fetch_orders(trade.pair, trade.open_date_utc)
for order in orders:
trade_order = [o for o in trade.orders if o.order_id == order['id']]
if trade_order:
continue
logger.info(f"Found previously unknown order {order['id']} for {trade.pair}.")
order_obj = Order.parse_from_ccxt_object(order, trade.pair, order['side'])
order_obj.order_filled_date = datetime.fromtimestamp(
safe_value_fallback(order, 'lastTradeTimestamp', 'timestamp') // 1000,
tz=timezone.utc)
trade.orders.append(order_obj)
# TODO: how do we handle open_order_id ...
Trade.commit()
prev_exit_reason = trade.exit_reason
trade.exit_reason = ExitType.SOLD_ON_EXCHANGE.value
self.update_trade_state(trade, order['id'], order)
logger.info(f"handled order {order['id']}")
if not trade.is_open:
# Trade was just closed
trade.close_date = order_obj.order_filled_date
Trade.commit()
break
else:
trade.exit_reason = prev_exit_reason
Trade.commit()
except ExchangeError:
logger.warning("Error finding onexchange order")
# #
# BUY / enter positions / open trades logic and methods # BUY / enter positions / open trades logic and methods
# #
@ -461,7 +501,7 @@ class FreqtradeBot(LoggingMixin):
""" """
trades_created = 0 trades_created = 0
whitelist = copy.deepcopy(self.active_pair_whitelist) whitelist = deepcopy(self.active_pair_whitelist)
if not whitelist: if not whitelist:
self.log_once("Active pair whitelist is empty.", logger.info) self.log_once("Active pair whitelist is empty.", logger.info)
return trades_created return trades_created
@ -982,7 +1022,7 @@ class FreqtradeBot(LoggingMixin):
'base_currency': self.exchange.get_pair_base_currency(trade.pair), 'base_currency': self.exchange.get_pair_base_currency(trade.pair),
'fiat_currency': self.config.get('fiat_display_currency', None), 'fiat_currency': self.config.get('fiat_display_currency', None),
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount), 'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
'open_date': trade.open_date or datetime.utcnow(), 'open_date': trade.open_date_utc or datetime.now(timezone.utc),
'current_rate': current_rate, 'current_rate': current_rate,
'sub_trade': sub_trade, 'sub_trade': sub_trade,
} }
@ -1034,6 +1074,13 @@ class FreqtradeBot(LoggingMixin):
""" """
trades_closed = 0 trades_closed = 0
for trade in trades: for trade in trades:
if not self.wallets.check_exit_amount(trade):
logger.warning(
f'Not enough {trade.safe_base_currency} in wallet to exit {trade}. '
'Trying to recover.')
self.handle_onexchange_order(trade)
try: try:
try: try:
if (self.strategy.order_types.get('stoploss_on_exchange') and if (self.strategy.order_types.get('stoploss_on_exchange') and
@ -1536,13 +1583,13 @@ class FreqtradeBot(LoggingMixin):
# Update wallets to ensure amounts tied up in a stoploss is now free! # Update wallets to ensure amounts tied up in a stoploss is now free!
self.wallets.update() self.wallets.update()
if self.trading_mode == TradingMode.FUTURES: if self.trading_mode == TradingMode.FUTURES:
# A safe exit amount isn't needed for futures, you can just exit/close the position
return amount return amount
trade_base_currency = self.exchange.get_pair_base_currency(pair) trade_base_currency = self.exchange.get_pair_base_currency(pair)
wallet_amount = self.wallets.get_free(trade_base_currency) wallet_amount = self.wallets.get_free(trade_base_currency)
logger.debug(f"{pair} - Wallet: {wallet_amount} - Trade-amount: {amount}") logger.debug(f"{pair} - Wallet: {wallet_amount} - Trade-amount: {amount}")
if wallet_amount >= amount: if wallet_amount >= amount:
# A safe exit amount isn't needed for futures, you can just exit/close the position
return amount return amount
elif wallet_amount > amount * 0.98: elif wallet_amount > amount * 0.98:
logger.info(f"{pair} - Falling back to wallet-amount {wallet_amount} -> {amount}.") logger.info(f"{pair} - Falling back to wallet-amount {wallet_amount} -> {amount}.")
@ -1698,8 +1745,8 @@ class FreqtradeBot(LoggingMixin):
'enter_tag': trade.enter_tag, 'enter_tag': trade.enter_tag,
'sell_reason': trade.exit_reason, # Deprecated 'sell_reason': trade.exit_reason, # Deprecated
'exit_reason': trade.exit_reason, 'exit_reason': trade.exit_reason,
'open_date': trade.open_date, 'open_date': trade.open_date_utc,
'close_date': trade.close_date or datetime.utcnow(), 'close_date': trade.close_date_utc or datetime.now(timezone.utc),
'stake_amount': trade.stake_amount, 'stake_amount': trade.stake_amount,
'stake_currency': self.config['stake_currency'], 'stake_currency': self.config['stake_currency'],
'base_currency': self.exchange.get_pair_base_currency(trade.pair), 'base_currency': self.exchange.get_pair_base_currency(trade.pair),
@ -1721,10 +1768,8 @@ class FreqtradeBot(LoggingMixin):
else: else:
trade.exit_order_status = reason trade.exit_order_status = reason
order = trade.select_order_by_order_id(order_id) order_or_none = trade.select_order_by_order_id(order_id)
if not order: order = self.order_obj_or_raise(order_id, order_or_none)
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
profit_rate: float = trade.safe_close_rate profit_rate: float = trade.safe_close_rate
profit_trade = trade.calc_profit(rate=profit_rate) profit_trade = trade.calc_profit(rate=profit_rate)
@ -1765,6 +1810,12 @@ class FreqtradeBot(LoggingMixin):
# Send the message # Send the message
self.rpc.send_msg(msg) self.rpc.send_msg(msg)
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
if not order_obj:
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
return order_obj
# #
# Common update trade state methods # Common update trade state methods
# #
@ -1803,10 +1854,8 @@ class FreqtradeBot(LoggingMixin):
# Handling of this will happen in check_handle_timedout. # Handling of this will happen in check_handle_timedout.
return True return True
order_obj = trade.select_order_by_order_id(order_id) order_obj_or_none = trade.select_order_by_order_id(order_id)
if not order_obj: order_obj = self.order_obj_or_raise(order_id, order_obj_or_none)
raise DependencyException(
f"Order_obj not found for {order_id}. This should not have happened.")
self.handle_order_fee(trade, order_obj, order) self.handle_order_fee(trade, order_obj, order)
@ -1824,6 +1873,7 @@ class FreqtradeBot(LoggingMixin):
# Must also run for partial exits # Must also run for partial exits
# TODO: Margin will need to use interest_rate as well. # TODO: Margin will need to use interest_rate as well.
# interest_rate = self.exchange.get_interest_rate() # interest_rate = self.exchange.get_interest_rate()
try:
trade.set_liquidation_price(self.exchange.get_liquidation_price( trade.set_liquidation_price(self.exchange.get_liquidation_price(
pair=trade.pair, pair=trade.pair,
open_rate=trade.open_rate, open_rate=trade.open_rate,
@ -1833,7 +1883,8 @@ class FreqtradeBot(LoggingMixin):
leverage=trade.leverage, leverage=trade.leverage,
wallet_balance=trade.stake_amount, wallet_balance=trade.stake_amount,
)) ))
except DependencyException:
logger.warning('Unable to calculate liquidation price')
# Updating wallets when order is closed # Updating wallets when order is closed
self.wallets.update() self.wallets.update()
Trade.commit() Trade.commit()

View File

@ -32,6 +32,7 @@ def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
logging.INFO if verbosity <= 2 else logging.DEBUG logging.INFO if verbosity <= 2 else logging.DEBUG
) )
logging.getLogger('telegram').setLevel(logging.INFO) logging.getLogger('telegram').setLevel(logging.INFO)
logging.getLogger('httpx').setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel( logging.getLogger('werkzeug').setLevel(
logging.ERROR if api_verbosity == 'error' else logging.INFO logging.ERROR if api_verbosity == 'error' else logging.INFO

View File

@ -9,7 +9,6 @@ from copy import deepcopy
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Optional, Tuple from typing import Any, Dict, List, Optional, Tuple
import pandas as pd
from numpy import nan from numpy import nan
from pandas import DataFrame from pandas import DataFrame
@ -28,8 +27,10 @@ from freqtrade.exchange import (amount_to_contract_precision, price_to_precision
from freqtrade.mixins import LoggingMixin from freqtrade.mixins import LoggingMixin
from freqtrade.optimize.backtest_caching import get_strategy_run_id from freqtrade.optimize.backtest_caching import get_strategy_run_id
from freqtrade.optimize.bt_progress import BTProgress from freqtrade.optimize.bt_progress import BTProgress
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results, from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_rejected_signals,
store_backtest_signal_candles, generate_trade_signal_candles,
show_backtest_results,
store_backtest_analysis_results,
store_backtest_stats) store_backtest_stats)
from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
@ -84,10 +85,11 @@ class Backtesting:
self.strategylist: List[IStrategy] = [] self.strategylist: List[IStrategy] = []
self.all_results: Dict[str, Dict] = {} self.all_results: Dict[str, Dict] = {}
self.processed_dfs: Dict[str, Dict] = {} self.processed_dfs: Dict[str, Dict] = {}
self.rejected_dict: Dict[str, List] = {}
self.rejected_df: Dict[str, Dict] = {}
self._exchange_name = self.config['exchange']['name'] self._exchange_name = self.config['exchange']['name']
self.exchange = ExchangeResolver.load_exchange( self.exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True)
self._exchange_name, self.config, load_leverage_tiers=True)
self.dataprovider = DataProvider(self.config, self.exchange) self.dataprovider = DataProvider(self.config, self.exchange)
if self.config.get('strategy_list'): if self.config.get('strategy_list'):
@ -1056,6 +1058,18 @@ class Backtesting:
return None return None
return row return row
def _collate_rejected(self, pair, row):
"""
Temporarily store rejected signal information for downstream use in backtesting_analysis
"""
# It could be fun to enable hyperopt mode to write
# a loss function to reduce rejected signals
if (self.config.get('export', 'none') == 'signals' and
self.dataprovider.runmode == RunMode.BACKTEST):
if pair not in self.rejected_dict:
self.rejected_dict[pair] = []
self.rejected_dict[pair].append([row[DATE_IDX], row[ENTER_TAG_IDX]])
def backtest_loop( def backtest_loop(
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime, self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
open_trade_count_start: int, trade_dir: Optional[LongShort], open_trade_count_start: int, trade_dir: Optional[LongShort],
@ -1081,11 +1095,11 @@ class Backtesting:
if ( if (
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0) (self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
and is_first and is_first
and self.trade_slot_available(open_trade_count_start)
and current_time != end_date and current_time != end_date
and trade_dir is not None and trade_dir is not None
and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir) and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir)
): ):
if (self.trade_slot_available(open_trade_count_start)):
trade = self._enter_trade(pair, row, trade_dir) trade = self._enter_trade(pair, row, trade_dir)
if trade: if trade:
# TODO: hacky workaround to avoid opening > max_open_trades # TODO: hacky workaround to avoid opening > max_open_trades
@ -1095,6 +1109,8 @@ class Backtesting:
# logger.debug(f"{pair} - Emulate creation of new trade: {trade}.") # logger.debug(f"{pair} - Emulate creation of new trade: {trade}.")
LocalTrade.add_bt_trade(trade) LocalTrade.add_bt_trade(trade)
self.wallets.update() self.wallets.update()
else:
self._collate_rejected(pair, row)
for trade in list(LocalTrade.bt_trades_open_pp[pair]): for trade in list(LocalTrade.bt_trades_open_pp[pair]):
# 3. Process entry orders. # 3. Process entry orders.
@ -1236,8 +1252,8 @@ class Backtesting:
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame], def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame],
timerange: TimeRange): timerange: TimeRange):
self.progress.init_step(BacktestState.ANALYZE, 0) self.progress.init_step(BacktestState.ANALYZE, 0)
strategy_name = strat.get_strategy_name()
logger.info(f"Running backtesting for Strategy {strat.get_strategy_name()}") logger.info(f"Running backtesting for Strategy {strategy_name}")
backtest_start_time = datetime.now(timezone.utc) backtest_start_time = datetime.now(timezone.utc)
self._set_strategy(strat) self._set_strategy(strat)
@ -1272,37 +1288,21 @@ class Backtesting:
) )
backtest_end_time = datetime.now(timezone.utc) backtest_end_time = datetime.now(timezone.utc)
results.update({ results.update({
'run_id': self.run_ids.get(strat.get_strategy_name(), ''), 'run_id': self.run_ids.get(strategy_name, ''),
'backtest_start_time': int(backtest_start_time.timestamp()), 'backtest_start_time': int(backtest_start_time.timestamp()),
'backtest_end_time': int(backtest_end_time.timestamp()), 'backtest_end_time': int(backtest_end_time.timestamp()),
}) })
self.all_results[self.strategy.get_strategy_name()] = results self.all_results[strategy_name] = results
if (self.config.get('export', 'none') == 'signals' and if (self.config.get('export', 'none') == 'signals' and
self.dataprovider.runmode == RunMode.BACKTEST): self.dataprovider.runmode == RunMode.BACKTEST):
self._generate_trade_signal_candles(preprocessed_tmp, results) self.processed_dfs[strategy_name] = generate_trade_signal_candles(
preprocessed_tmp, results)
self.rejected_df[strategy_name] = generate_rejected_signals(
preprocessed_tmp, self.rejected_dict)
return min_date, max_date return min_date, max_date
def _generate_trade_signal_candles(self, preprocessed_df, bt_results):
signal_candles_only = {}
for pair in preprocessed_df.keys():
signal_candles_only_df = DataFrame()
pairdf = preprocessed_df[pair]
resdf = bt_results['results']
pairresults = resdf.loc[(resdf["pair"] == pair)]
if pairdf.shape[0] > 0:
for t, v in pairresults.open_date.items():
allinds = pairdf.loc[(pairdf['date'] < v)]
signal_inds = allinds.iloc[[-1]]
signal_candles_only_df = pd.concat([signal_candles_only_df, signal_inds])
signal_candles_only[pair] = signal_candles_only_df
self.processed_dfs[self.strategy.get_strategy_name()] = signal_candles_only
def _get_min_cached_backtest_date(self): def _get_min_cached_backtest_date(self):
min_backtest_date = None min_backtest_date = None
backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT) backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT)
@ -1365,8 +1365,9 @@ class Backtesting:
if (self.config.get('export', 'none') == 'signals' and if (self.config.get('export', 'none') == 'signals' and
self.dataprovider.runmode == RunMode.BACKTEST): self.dataprovider.runmode == RunMode.BACKTEST):
store_backtest_signal_candles( store_backtest_analysis_results(
self.config['exportfilename'], self.processed_dfs, dt_appendix) self.config['exportfilename'], self.processed_dfs, self.rejected_df,
dt_appendix)
# Results may be mixed up now. Sort them so they follow --strategy-list order. # Results may be mixed up now. Sort them so they follow --strategy-list order.
if 'strategy_list' in self.config and len(self.results) > 0: if 'strategy_list' in self.config and len(self.results) > 0:

View File

@ -32,7 +32,7 @@ class EdgeCli:
# Ensure using dry-run # Ensure using dry-run
self.config['dry_run'] = True self.config['dry_run'] = True
self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config) self.exchange = ExchangeResolver.load_exchange(self.config)
self.strategy = StrategyResolver.load_strategy(self.config) self.strategy = StrategyResolver.load_strategy(self.config)
self.strategy.dp = DataProvider(config, self.exchange) self.strategy.dp = DataProvider(config, self.exchange)

View File

@ -4,7 +4,7 @@ from datetime import datetime, timedelta, timezone
from pathlib import Path from pathlib import Path
from typing import Any, Dict, List, Union from typing import Any, Dict, List, Union
from pandas import DataFrame, to_datetime from pandas import DataFrame, concat, to_datetime
from tabulate import tabulate from tabulate import tabulate
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN, from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
@ -46,29 +46,80 @@ def store_backtest_stats(
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)}) file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
def store_backtest_signal_candles( def _store_backtest_analysis_data(
recordfilename: Path, candles: Dict[str, Dict], dtappendix: str) -> Path: recordfilename: Path, data: Dict[str, Dict],
dtappendix: str, name: str) -> Path:
""" """
Stores backtest trade signal candles Stores backtest trade candles for analysis
:param recordfilename: Path object, which can either be a filename or a directory. :param recordfilename: Path object, which can either be a filename or a directory.
Filenames will be appended with a timestamp right before the suffix Filenames will be appended with a timestamp right before the suffix
while for directories, <directory>/backtest-result-<datetime>_signals.pkl will be used while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
as filename as filename
:param stats: Dict containing the backtesting signal candles :param candles: Dict containing the backtesting data for analysis
:param dtappendix: Datetime to use for the filename :param dtappendix: Datetime to use for the filename
:param name: Name to use for the file, e.g. signals, rejected
""" """
if recordfilename.is_dir(): if recordfilename.is_dir():
filename = (recordfilename / f'backtest-result-{dtappendix}_signals.pkl') filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
else: else:
filename = Path.joinpath( filename = Path.joinpath(
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_signals.pkl' recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
) )
file_dump_joblib(filename, candles) file_dump_joblib(filename, data)
return filename return filename
def store_backtest_analysis_results(
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
dtappendix: str) -> None:
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
bt_results: Dict[str, Any]) -> DataFrame:
signal_candles_only = {}
for pair in preprocessed_df.keys():
signal_candles_only_df = DataFrame()
pairdf = preprocessed_df[pair]
resdf = bt_results['results']
pairresults = resdf.loc[(resdf["pair"] == pair)]
if pairdf.shape[0] > 0:
for t, v in pairresults.open_date.items():
allinds = pairdf.loc[(pairdf['date'] < v)]
signal_inds = allinds.iloc[[-1]]
signal_candles_only_df = concat([
signal_candles_only_df.infer_objects(),
signal_inds.infer_objects()])
signal_candles_only[pair] = signal_candles_only_df
return signal_candles_only
def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
rejected_dict: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
rejected_candles_only = {}
for pair, signals in rejected_dict.items():
rejected_signals_only_df = DataFrame()
pairdf = preprocessed_df[pair]
for t in signals:
data_df_row = pairdf.loc[(pairdf['date'] == t[0])].copy()
data_df_row['pair'] = pair
data_df_row['enter_tag'] = t[1]
rejected_signals_only_df = concat([
rejected_signals_only_df.infer_objects(),
data_df_row.infer_objects()])
rejected_candles_only[pair] = rejected_signals_only_df
return rejected_candles_only
def _get_line_floatfmt(stake_currency: str) -> List[str]: def _get_line_floatfmt(stake_currency: str) -> List[str]:
""" """
Generate floatformat (goes in line with _generate_result_line()) Generate floatformat (goes in line with _generate_result_line())

View File

@ -425,7 +425,7 @@ class LocalTrade():
@property @property
def close_date_utc(self): def close_date_utc(self):
return self.close_date.replace(tzinfo=timezone.utc) return self.close_date.replace(tzinfo=timezone.utc) if self.close_date else None
@property @property
def entry_side(self) -> str: def entry_side(self) -> str:

View File

@ -633,7 +633,7 @@ def load_and_plot_trades(config: Config):
""" """
strategy = StrategyResolver.load_strategy(config) strategy = StrategyResolver.load_strategy(config)
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config) exchange = ExchangeResolver.load_exchange(config)
IStrategy.dp = DataProvider(config, exchange) IStrategy.dp = DataProvider(config, exchange)
strategy.ft_bot_start() strategy.ft_bot_start()
strategy.bot_loop_start(datetime.now(timezone.utc)) strategy.bot_loop_start(datetime.now(timezone.utc))
@ -678,7 +678,7 @@ def plot_profit(config: Config) -> None:
if 'timeframe' not in config: if 'timeframe' not in config:
raise OperationalException('Timeframe must be set in either config or via --timeframe.') raise OperationalException('Timeframe must be set in either config or via --timeframe.')
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config) exchange = ExchangeResolver.load_exchange(config)
plot_elements = init_plotscript(config, list(exchange.markets)) plot_elements = init_plotscript(config, list(exchange.markets))
trades = plot_elements['trades'] trades = plot_elements['trades']
# Filter trades to relevant pairs # Filter trades to relevant pairs

View File

@ -2,9 +2,10 @@
This module loads custom exchanges This module loads custom exchanges
""" """
import logging import logging
from typing import Optional
import freqtrade.exchange as exchanges import freqtrade.exchange as exchanges
from freqtrade.constants import Config from freqtrade.constants import Config, ExchangeConfig
from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, Exchange from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, Exchange
from freqtrade.resolvers import IResolver from freqtrade.resolvers import IResolver
@ -19,13 +20,14 @@ class ExchangeResolver(IResolver):
object_type = Exchange object_type = Exchange
@staticmethod @staticmethod
def load_exchange(exchange_name: str, config: Config, validate: bool = True, def load_exchange(config: Config, *, exchange_config: Optional[ExchangeConfig] = None,
load_leverage_tiers: bool = False) -> Exchange: validate: bool = True, load_leverage_tiers: bool = False) -> Exchange:
""" """
Load the custom class from config parameter Load the custom class from config parameter
:param exchange_name: name of the Exchange to load :param exchange_name: name of the Exchange to load
:param config: configuration dictionary :param config: configuration dictionary
""" """
exchange_name: str = config['exchange']['name']
# Map exchange name to avoid duplicate classes for identical exchanges # Map exchange name to avoid duplicate classes for identical exchanges
exchange_name = MAP_EXCHANGE_CHILDCLASS.get(exchange_name, exchange_name) exchange_name = MAP_EXCHANGE_CHILDCLASS.get(exchange_name, exchange_name)
exchange_name = exchange_name.title() exchange_name = exchange_name.title()
@ -36,13 +38,14 @@ class ExchangeResolver(IResolver):
kwargs={ kwargs={
'config': config, 'config': config,
'validate': validate, 'validate': validate,
'exchange_config': exchange_config,
'load_leverage_tiers': load_leverage_tiers} 'load_leverage_tiers': load_leverage_tiers}
) )
except ImportError: except ImportError:
logger.info( logger.info(
f"No {exchange_name} specific subclass found. Using the generic class instead.") f"No {exchange_name} specific subclass found. Using the generic class instead.")
if not exchange: if not exchange:
exchange = Exchange(config, validate=validate) exchange = Exchange(config, validate=validate, exchange_config=exchange_config,)
return exchange return exchange
@staticmethod @staticmethod

View File

@ -11,6 +11,7 @@ from freqtrade.configuration.config_validation import validate_config_consistenc
from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result
from freqtrade.enums import BacktestState from freqtrade.enums import BacktestState
from freqtrade.exceptions import DependencyException, OperationalException from freqtrade.exceptions import DependencyException, OperationalException
from freqtrade.exchange.common import remove_exchange_credentials
from freqtrade.misc import deep_merge_dicts from freqtrade.misc import deep_merge_dicts
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest, from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
BacktestResponse) BacktestResponse)
@ -38,6 +39,7 @@ async def api_start_backtest( # noqa: C901
raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.") raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.")
btconfig = deepcopy(config) btconfig = deepcopy(config)
remove_exchange_credentials(btconfig['exchange'], True)
settings = dict(bt_settings) settings = dict(bt_settings)
if settings.get('freqai', None) is not None: if settings.get('freqai', None) is not None:
settings['freqai'] = dict(settings['freqai']) settings['freqai'] = dict(settings['freqai'])

View File

@ -100,8 +100,10 @@ class Profit(BaseModel):
trade_count: int trade_count: int
closed_trade_count: int closed_trade_count: int
first_trade_date: str first_trade_date: str
first_trade_humanized: str
first_trade_timestamp: int first_trade_timestamp: int
latest_trade_date: str latest_trade_date: str
latest_trade_humanized: str
latest_trade_timestamp: int latest_trade_timestamp: int
avg_duration: str avg_duration: str
best_pair: str best_pair: str

View File

@ -44,7 +44,9 @@ logger = logging.getLogger(__name__)
# 2.24: Add cancel_open_order endpoint # 2.24: Add cancel_open_order endpoint
# 2.25: Add several profit values to /status endpoint # 2.25: Add several profit values to /status endpoint
# 2.26: increase /balance output # 2.26: increase /balance output
API_VERSION = 2.26 # 2.27: Add /trades/<id>/reload endpoint
# 2.28: Switch reload endpoint to Post
API_VERSION = 2.28
# Public API, requires no auth. # Public API, requires no auth.
router_public = APIRouter() router_public = APIRouter()
@ -127,11 +129,17 @@ def trades_delete(tradeid: int, rpc: RPC = Depends(get_rpc)):
@router.delete('/trades/{tradeid}/open-order', response_model=OpenTradeSchema, tags=['trading']) @router.delete('/trades/{tradeid}/open-order', response_model=OpenTradeSchema, tags=['trading'])
def cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)): def trade_cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)):
rpc._rpc_cancel_open_order(tradeid) rpc._rpc_cancel_open_order(tradeid)
return rpc._rpc_trade_status([tradeid])[0] return rpc._rpc_trade_status([tradeid])[0]
@router.post('/trades/{tradeid}/reload', response_model=OpenTradeSchema, tags=['trading'])
def trade_reload(tradeid: int, rpc: RPC = Depends(get_rpc)):
rpc._rpc_reload_trade_from_exchange(tradeid)
return rpc._rpc_trade_status([tradeid])[0]
# TODO: Missing response model # TODO: Missing response model
@router.get('/edge', tags=['info']) @router.get('/edge', tags=['info'])
def edge(rpc: RPC = Depends(get_rpc)): def edge(rpc: RPC = Depends(get_rpc)):
@ -247,14 +255,17 @@ def pair_candles(
@router.get('/pair_history', response_model=PairHistory, tags=['candle data']) @router.get('/pair_history', response_model=PairHistory, tags=['candle data'])
def pair_history(pair: str, timeframe: str, timerange: str, strategy: str, def pair_history(pair: str, timeframe: str, timerange: str, strategy: str,
freqaimodel: Optional[str] = None,
config=Depends(get_config), exchange=Depends(get_exchange)): config=Depends(get_config), exchange=Depends(get_exchange)):
# The initial call to this endpoint can be slow, as it may need to initialize # The initial call to this endpoint can be slow, as it may need to initialize
# the exchange class. # the exchange class.
config = deepcopy(config) config = deepcopy(config)
config.update({ config.update({
'strategy': strategy, 'strategy': strategy,
'timerange': timerange,
'freqaimodel': freqaimodel if freqaimodel else config.get('freqaimodel'),
}) })
return RPC._rpc_analysed_history_full(config, pair, timeframe, timerange, exchange) return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange)
@router.get('/plot_config', response_model=PlotConfig, tags=['candle data']) @router.get('/plot_config', response_model=PlotConfig, tags=['candle data'])

View File

@ -46,7 +46,7 @@ def get_exchange(config=Depends(get_config)):
if not ApiServer._exchange: if not ApiServer._exchange:
from freqtrade.resolvers import ExchangeResolver from freqtrade.resolvers import ExchangeResolver
ApiServer._exchange = ExchangeResolver.load_exchange( ApiServer._exchange = ExchangeResolver.load_exchange(
config['exchange']['name'], config, load_leverage_tiers=False) config, load_leverage_tiers=False)
return ApiServer._exchange return ApiServer._exchange

View File

@ -420,16 +420,15 @@ class RPC:
else: else:
return 'draws' return 'draws'
trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False) trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False)
# Sell reason # Duration
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
# Exit reason
exit_reasons = {} exit_reasons = {}
for trade in trades: for trade in trades:
if trade.exit_reason not in exit_reasons: if trade.exit_reason not in exit_reasons:
exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0} exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0}
exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1 exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1
# Duration
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
for trade in trades:
if trade.close_date is not None and trade.open_date is not None: if trade.close_date is not None and trade.open_date is not None:
trade_dur = (trade.close_date - trade.open_date).total_seconds() trade_dur = (trade.close_date - trade.open_date).total_seconds()
dur[trade_win_loss(trade)].append(trade_dur) dur[trade_win_loss(trade)].append(trade_dur)
@ -541,8 +540,8 @@ class RPC:
fiat_display_currency fiat_display_currency
) if self._fiat_converter else 0 ) if self._fiat_converter else 0
first_date = trades[0].open_date if trades else None first_date = trades[0].open_date_utc if trades else None
last_date = trades[-1].open_date if trades else None last_date = trades[-1].open_date_utc if trades else None
num = float(len(durations) or 1) num = float(len(durations) or 1)
bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME) bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME)
return { return {
@ -564,9 +563,11 @@ class RPC:
'profit_all_fiat': profit_all_fiat, 'profit_all_fiat': profit_all_fiat,
'trade_count': len(trades), 'trade_count': len(trades),
'closed_trade_count': len([t for t in trades if not t.is_open]), 'closed_trade_count': len([t for t in trades if not t.is_open]),
'first_trade_date': arrow.get(first_date).humanize() if first_date else '', 'first_trade_date': first_date.strftime(DATETIME_PRINT_FORMAT) if first_date else '',
'first_trade_humanized': arrow.get(first_date).humanize() if first_date else '',
'first_trade_timestamp': int(first_date.timestamp() * 1000) if first_date else 0, 'first_trade_timestamp': int(first_date.timestamp() * 1000) if first_date else 0,
'latest_trade_date': arrow.get(last_date).humanize() if last_date else '', 'latest_trade_date': last_date.strftime(DATETIME_PRINT_FORMAT) if last_date else '',
'latest_trade_humanized': arrow.get(last_date).humanize() if last_date else '',
'latest_trade_timestamp': int(last_date.timestamp() * 1000) if last_date else 0, 'latest_trade_timestamp': int(last_date.timestamp() * 1000) if last_date else 0,
'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0], 'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0],
'best_pair': best_pair[0] if best_pair else '', 'best_pair': best_pair[0] if best_pair else '',
@ -741,6 +742,18 @@ class RPC:
return {'status': 'No more entries will occur from now. Run /reload_config to reset.'} return {'status': 'No more entries will occur from now. Run /reload_config to reset.'}
def _rpc_reload_trade_from_exchange(self, trade_id: int) -> Dict[str, str]:
"""
Handler for reload_trade_from_exchange.
Reloads a trade from it's orders, should manual interaction have happened.
"""
trade = Trade.get_trades(trade_filter=[Trade.id == trade_id]).first()
if not trade:
raise RPCException(f"Could not find trade with id {trade_id}.")
self._freqtrade.handle_onexchange_order(trade)
return {'status': 'Reloaded from orders from exchange'}
def __exec_force_exit(self, trade: Trade, ordertype: Optional[str], def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],
amount: Optional[float] = None) -> None: amount: Optional[float] = None) -> None:
# Check if there is there is an open order # Check if there is there is an open order
@ -1216,8 +1229,8 @@ class RPC:
@staticmethod @staticmethod
def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str, def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str,
timerange: str, exchange) -> Dict[str, Any]: exchange) -> Dict[str, Any]:
timerange_parsed = TimeRange.parse_timerange(timerange) timerange_parsed = TimeRange.parse_timerange(config.get('timerange'))
_data = load_data( _data = load_data(
datadir=config["datadir"], datadir=config["datadir"],
@ -1228,7 +1241,8 @@ class RPC:
candle_type=config.get('candle_type_def', CandleType.SPOT) candle_type=config.get('candle_type_def', CandleType.SPOT)
) )
if pair not in _data: if pair not in _data:
raise RPCException(f"No data for {pair}, {timeframe} in {timerange} found.") raise RPCException(
f"No data for {pair}, {timeframe} in {config.get('timerange')} found.")
from freqtrade.data.dataprovider import DataProvider from freqtrade.data.dataprovider import DataProvider
from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.resolvers.strategy_resolver import StrategyResolver
strategy = StrategyResolver.load_strategy(config) strategy = StrategyResolver.load_strategy(config)

View File

@ -196,6 +196,7 @@ class Telegram(RPCHandler):
self._force_enter, order_side=SignalDirection.LONG)), self._force_enter, order_side=SignalDirection.LONG)),
CommandHandler('forceshort', partial( CommandHandler('forceshort', partial(
self._force_enter, order_side=SignalDirection.SHORT)), self._force_enter, order_side=SignalDirection.SHORT)),
CommandHandler('reload_trade', self._reload_trade_from_exchange),
CommandHandler('trades', self._trades), CommandHandler('trades', self._trades),
CommandHandler('delete', self._delete_trade), CommandHandler('delete', self._delete_trade),
CommandHandler(['coo', 'cancel_open_order'], self._cancel_open_order), CommandHandler(['coo', 'cancel_open_order'], self._cancel_open_order),
@ -852,8 +853,8 @@ class Telegram(RPCHandler):
profit_all_percent = stats['profit_all_percent'] profit_all_percent = stats['profit_all_percent']
profit_all_fiat = stats['profit_all_fiat'] profit_all_fiat = stats['profit_all_fiat']
trade_count = stats['trade_count'] trade_count = stats['trade_count']
first_trade_date = stats['first_trade_date'] first_trade_date = f"{stats['first_trade_humanized']} ({stats['first_trade_date']})"
latest_trade_date = stats['latest_trade_date'] latest_trade_date = f"{stats['latest_trade_humanized']} ({stats['latest_trade_date']})"
avg_duration = stats['avg_duration'] avg_duration = stats['avg_duration']
best_pair = stats['best_pair'] best_pair = stats['best_pair']
best_pair_profit_ratio = stats['best_pair_profit_ratio'] best_pair_profit_ratio = stats['best_pair_profit_ratio']
@ -1074,6 +1075,17 @@ class Telegram(RPCHandler):
msg = self._rpc._rpc_stopentry() msg = self._rpc._rpc_stopentry()
await self._send_msg(f"Status: `{msg['status']}`") await self._send_msg(f"Status: `{msg['status']}`")
@authorized_only
async def _reload_trade_from_exchange(self, update: Update, context: CallbackContext) -> None:
"""
Handler for /reload_trade <tradeid>.
"""
if not context.args or len(context.args) == 0:
raise RPCException("Trade-id not set.")
trade_id = int(context.args[0])
msg = self._rpc._rpc_reload_trade_from_exchange(trade_id)
await self._send_msg(f"Status: `{msg['status']}`")
@authorized_only @authorized_only
async def _force_exit(self, update: Update, context: CallbackContext) -> None: async def _force_exit(self, update: Update, context: CallbackContext) -> None:
""" """
@ -1561,6 +1573,7 @@ class Telegram(RPCHandler):
"*/fx <trade_id>|all:* `Alias to /forceexit`\n" "*/fx <trade_id>|all:* `Alias to /forceexit`\n"
f"{force_enter_text if self._config.get('force_entry_enable', False) else ''}" f"{force_enter_text if self._config.get('force_entry_enable', False) else ''}"
"*/delete <trade_id>:* `Instantly delete the given trade in the database`\n" "*/delete <trade_id>:* `Instantly delete the given trade in the database`\n"
"*/reload_trade <trade_id>:* `Relade trade from exchange Orders`\n"
"*/cancel_open_order <trade_id>:* `Cancels open orders for trade. " "*/cancel_open_order <trade_id>:* `Cancels open orders for trade. "
"Only valid when the trade has open orders.`\n" "Only valid when the trade has open orders.`\n"
"*/coo <trade_id>|all:* `Alias to /cancel_open_order`\n" "*/coo <trade_id>|all:* `Alias to /cancel_open_order`\n"

View File

@ -15,12 +15,15 @@ logger = logging.getLogger(__name__)
class FreqaiExampleStrategy(IStrategy): class FreqaiExampleStrategy(IStrategy):
""" """
Example strategy showing how the user connects their own Example strategy showing how the user connects their own
IFreqaiModel to the strategy. Namely, the user uses: IFreqaiModel to the strategy.
self.freqai.start(dataframe, metadata)
to make predictions on their data. feature_engineering_*() automatically Warning! This is a showcase of functionality,
generate the variety of features indicated by the user in the which means that it is designed to show various functions of FreqAI
canonical freqtrade configuration file under config['freqai']. and it runs on all computers. We use this showcase to help users
understand how to build a strategy, and we use it as a benchmark
to help debug possible problems.
This means this is *not* meant to be run live in production.
""" """
minimal_roi = {"0": 0.1, "240": -1} minimal_roi = {"0": 0.1, "240": -1}

View File

@ -181,6 +181,35 @@ class Wallets:
def get_all_positions(self) -> Dict[str, PositionWallet]: def get_all_positions(self) -> Dict[str, PositionWallet]:
return self._positions return self._positions
def _check_exit_amount(self, trade: Trade) -> bool:
if trade.trading_mode != TradingMode.FUTURES:
# Slightly higher offset than in safe_exit_amount.
wallet_amount: float = self.get_total(trade.safe_base_currency) * (2 - 0.981)
else:
# wallet_amount: float = self.wallets.get_free(trade.safe_base_currency)
position = self._positions.get(trade.pair)
if position is None:
# We don't own anything :O
return False
wallet_amount = position.position
if wallet_amount >= trade.amount:
return True
return False
def check_exit_amount(self, trade: Trade) -> bool:
"""
Checks if the exit amount is available in the wallet.
:param trade: Trade to check
:return: True if the exit amount is available, False otherwise
"""
if not self._check_exit_amount(trade):
# Update wallets just to make sure
self.update()
return self._check_exit_amount(trade)
return True
def get_starting_balance(self) -> float: def get_starting_balance(self) -> float:
""" """
Retrieves starting balance - based on either available capital, Retrieves starting balance - based on either available capital,

View File

@ -1,5 +1,5 @@
[build-system] [build-system]
requires = ["setuptools >= 46.4.0", "wheel"] requires = ["setuptools >= 64.0.0", "wheel"]
build-backend = "setuptools.build_meta" build-backend = "setuptools.build_meta"
[tool.black] [tool.black]

View File

@ -7,9 +7,9 @@
-r docs/requirements-docs.txt -r docs/requirements-docs.txt
coveralls==3.3.1 coveralls==3.3.1
ruff==0.0.262 ruff==0.0.267
mypy==1.2.0 mypy==1.3.0
pre-commit==3.2.2 pre-commit==3.3.1
pytest==7.3.1 pytest==7.3.1
pytest-asyncio==0.21.0 pytest-asyncio==0.21.0
pytest-cov==4.0.0 pytest-cov==4.0.0
@ -20,11 +20,11 @@ isort==5.12.0
time-machine==2.9.0 time-machine==2.9.0
# Convert jupyter notebooks to markdown documents # Convert jupyter notebooks to markdown documents
nbconvert==7.3.1 nbconvert==7.4.0
# mypy types # mypy types
types-cachetools==5.3.0.5 types-cachetools==5.3.0.5
types-filelock==3.2.7 types-filelock==3.2.7
types-requests==2.28.11.17 types-requests==2.30.0.0
types-tabulate==0.9.0.2 types-tabulate==0.9.0.2
types-python-dateutil==2.8.19.12 types-python-dateutil==2.8.19.13

View File

@ -2,11 +2,10 @@
-r requirements-freqai.txt -r requirements-freqai.txt
# Required for freqai-rl # Required for freqai-rl
torch==1.13.1; python_version < '3.11' torch==2.0.1
stable-baselines3==1.7.0; python_version < '3.11' #until these branches will be released we can use this
sb3-contrib==1.7.0; python_version < '3.11' gymnasium==0.28.1
# Gym is forced to this version by stable-baselines3. stable_baselines3==2.0.0a5
setuptools==65.5.1 # Should be removed when gym is fixed. sb3_contrib>=2.0.0a4
gym==0.21; python_version < '3.11'
# Progress bar for stable-baselines3 and sb3-contrib # Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.65.0; python_version < '3.11' tqdm==4.65.0

View File

@ -5,7 +5,8 @@
# Required for freqai # Required for freqai
scikit-learn==1.1.3 scikit-learn==1.1.3
joblib==1.2.0 joblib==1.2.0
catboost==1.1.1; platform_machine != 'aarch64' and 'arm' not in platform_machine and python_version < '3.11' catboost==1.1.1; sys_platform == 'darwin' and python_version < '3.9'
catboost==1.2; 'arm' not in platform_machine and (sys_platform != 'darwin' or python_version >= '3.9')
lightgbm==3.3.5 lightgbm==3.3.5
xgboost==1.7.5 xgboost==1.7.5
tensorboard==2.12.2 tensorboard==2.13.0

View File

@ -2,17 +2,18 @@ numpy==1.24.3
pandas==2.0.1 pandas==2.0.1
pandas-ta==0.3.14b pandas-ta==0.3.14b
ccxt==3.0.75 ccxt==3.0.103
cryptography==40.0.2 cryptography==40.0.2; platform_machine != 'armv7l'
cryptography==40.0.1; platform_machine == 'armv7l'
aiohttp==3.8.4 aiohttp==3.8.4
SQLAlchemy==2.0.10 SQLAlchemy==2.0.13
python-telegram-bot==20.2 python-telegram-bot==20.3
# can't be hard-pinned due to telegram-bot pinning httpx with ~ # can't be hard-pinned due to telegram-bot pinning httpx with ~
httpx>=0.23.3 httpx>=0.23.3
arrow==1.2.3 arrow==1.2.3
cachetools==4.2.2 cachetools==4.2.2
requests==2.28.2 requests==2.30.0
urllib3==1.26.15 urllib3==2.0.2
jsonschema==4.17.3 jsonschema==4.17.3
TA-Lib==0.4.26 TA-Lib==0.4.26
technical==1.4.0 technical==1.4.0
@ -22,8 +23,8 @@ jinja2==3.1.2
tables==3.8.0 tables==3.8.0
blosc==1.11.1 blosc==1.11.1
joblib==1.2.0 joblib==1.2.0
rich==13.3.4 rich==13.3.5
pyarrow==11.0.0; platform_machine != 'armv7l' pyarrow==12.0.0; platform_machine != 'armv7l'
# find first, C search in arrays # find first, C search in arrays
py_find_1st==1.1.5 py_find_1st==1.1.5
@ -31,7 +32,7 @@ py_find_1st==1.1.5
# Load ticker files 30% faster # Load ticker files 30% faster
python-rapidjson==1.10 python-rapidjson==1.10
# Properly format api responses # Properly format api responses
orjson==3.8.10 orjson==3.8.12
# Notify systemd # Notify systemd
sdnotify==0.3.2 sdnotify==0.3.2
@ -39,8 +40,8 @@ sdnotify==0.3.2
# API Server # API Server
fastapi==0.95.1 fastapi==0.95.1
pydantic==1.10.7 pydantic==1.10.7
uvicorn==0.21.1 uvicorn==0.22.0
pyjwt==2.6.0 pyjwt==2.7.0
aiofiles==23.1.0 aiofiles==23.1.0
psutil==5.9.5 psutil==5.9.5
@ -56,7 +57,8 @@ python-dateutil==2.8.2
schedule==1.2.0 schedule==1.2.0
#WS Messages #WS Messages
websockets==11.0.2 websockets==11.0.3
janus==1.0.0 janus==1.0.0
ast-comments==1.0.1 ast-comments==1.0.1
packaging==23.1

View File

@ -348,12 +348,13 @@ class FtRestClient():
params['limit'] = limit params['limit'] = limit
return self._get("pair_candles", params=params) return self._get("pair_candles", params=params)
def pair_history(self, pair, timeframe, strategy, timerange=None): def pair_history(self, pair, timeframe, strategy, timerange=None, freqaimodel=None):
"""Return historic, analyzed dataframe """Return historic, analyzed dataframe
:param pair: Pair to get data for :param pair: Pair to get data for
:param timeframe: Only pairs with this timeframe available. :param timeframe: Only pairs with this timeframe available.
:param strategy: Strategy to analyze and get values for :param strategy: Strategy to analyze and get values for
:param freqaimodel: FreqAI model to use for analysis
:param timerange: Timerange to get data for (same format than --timerange endpoints) :param timerange: Timerange to get data for (same format than --timerange endpoints)
:return: json object :return: json object
""" """
@ -361,6 +362,7 @@ class FtRestClient():
"pair": pair, "pair": pair,
"timeframe": timeframe, "timeframe": timeframe,
"strategy": strategy, "strategy": strategy,
"freqaimodel": freqaimodel,
"timerange": timerange if timerange else '', "timerange": timerange if timerange else '',
}) })

View File

@ -12,16 +12,19 @@ hyperopt = [
freqai = [ freqai = [
'scikit-learn', 'scikit-learn',
'joblib',
'catboost; platform_machine != "aarch64"', 'catboost; platform_machine != "aarch64"',
'lightgbm', 'lightgbm',
'xgboost' 'xgboost',
'tensorboard'
] ]
freqai_rl = [ freqai_rl = [
'torch', 'torch',
'gymnasium',
'stable-baselines3', 'stable-baselines3',
'gym==0.21', 'sb3-contrib',
'sb3-contrib' 'tqdm'
] ]
hdf5 = [ hdf5 = [
@ -32,11 +35,20 @@ hdf5 = [
develop = [ develop = [
'coveralls', 'coveralls',
'mypy', 'mypy',
'ruff',
'pre-commit',
'pytest', 'pytest',
'pytest-asyncio', 'pytest-asyncio',
'pytest-cov', 'pytest-cov',
'pytest-mock', 'pytest-mock',
'pytest-random-order', 'pytest-random-order',
'isort',
'time-machine',
'types-cachetools',
'types-filelock',
'types-requests',
'types-tabulate',
'types-python-dateutil'
] ]
jupyter = [ jupyter = [
@ -57,9 +69,9 @@ setup(
], ],
install_requires=[ install_requires=[
# from requirements.txt # from requirements.txt
'ccxt>=2.6.26', 'ccxt>=3.0.0',
'SQLAlchemy>=2.0.6', 'SQLAlchemy>=2.0.6',
'python-telegram-bot>=13.4', 'python-telegram-bot>=20.1',
'arrow>=0.17.0', 'arrow>=0.17.0',
'cachetools', 'cachetools',
'requests', 'requests',
@ -91,7 +103,13 @@ setup(
'aiofiles', 'aiofiles',
'schedule', 'schedule',
'websockets', 'websockets',
'janus' 'janus',
'ast-comments',
'aiohttp',
'cryptography',
'httpx',
'python-dateutil',
'packaging',
], ],
extras_require={ extras_require={
'dev': all_extra, 'dev': all_extra,

View File

@ -25,7 +25,7 @@ function check_installed_python() {
exit 2 exit 2
fi fi
for v in 10 9 8 for v in 11 10 9 8
do do
PYTHON="python3.${v}" PYTHON="python3.${v}"
which $PYTHON which $PYTHON
@ -49,8 +49,7 @@ function updateenv() {
source .env/bin/activate source .env/bin/activate
SYS_ARCH=$(uname -m) SYS_ARCH=$(uname -m)
echo "pip install in-progress. Please wait..." echo "pip install in-progress. Please wait..."
# Setuptools 65.5.0 is the last version that can install gym==0.21.0 ${PYTHON} -m pip install --upgrade pip wheel setuptools
${PYTHON} -m pip install --upgrade pip==23.0.1 wheel==0.38.4 setuptools==65.5.1
REQUIREMENTS_HYPEROPT="" REQUIREMENTS_HYPEROPT=""
REQUIREMENTS_PLOT="" REQUIREMENTS_PLOT=""
REQUIREMENTS_FREQAI="" REQUIREMENTS_FREQAI=""
@ -259,7 +258,7 @@ function install() {
install_redhat install_redhat
else else
echo "This script does not support your OS." echo "This script does not support your OS."
echo "If you have Python version 3.8 - 3.10, pip, virtualenv, ta-lib you can continue." echo "If you have Python version 3.8 - 3.11, pip, virtualenv, ta-lib you can continue."
echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell." echo "Wait 10 seconds to continue the next install steps or use ctrl+c to interrupt this shell."
sleep 10 sleep 10
fi fi

View File

@ -181,7 +181,7 @@ def get_patched_exchange(mocker, config, api_mock=None, id='binance',
patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes) patch_exchange(mocker, api_mock, id, mock_markets, mock_supported_modes)
config['exchange']['name'] = id config['exchange']['name'] = id
try: try:
exchange = ExchangeResolver.load_exchange(id, config, load_leverage_tiers=True) exchange = ExchangeResolver.load_exchange(config, load_leverage_tiers=True)
except ImportError: except ImportError:
exchange = Exchange(config) exchange = Exchange(config)
return exchange return exchange
@ -411,6 +411,14 @@ def patch_gc(mocker) -> None:
mocker.patch("freqtrade.main.gc_set_threshold") mocker.patch("freqtrade.main.gc_set_threshold")
@pytest.fixture(autouse=True)
def user_dir(mocker, tmpdir) -> Path:
user_dir = Path(tmpdir) / "user_data"
mocker.patch('freqtrade.configuration.configuration.create_userdata_dir',
return_value=user_dir)
return user_dir
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def patch_coingekko(mocker) -> None: def patch_coingekko(mocker) -> None:
""" """
@ -485,7 +493,6 @@ def get_default_conf(testdatadir):
}, },
"exchange": { "exchange": {
"name": "binance", "name": "binance",
"enabled": True,
"key": "key", "key": "key",
"secret": "secret", "secret": "secret",
"pair_whitelist": [ "pair_whitelist": [

View File

@ -18,8 +18,9 @@ def entryexitanalysis_cleanup() -> None:
Backtesting.cleanup() Backtesting.cleanup()
def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmpdir, capsys): def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, user_dir, capsys):
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
(user_dir / 'backtest_results').mkdir(parents=True, exist_ok=True)
default_conf.update({ default_conf.update({
"use_exit_signal": True, "use_exit_signal": True,
@ -80,7 +81,7 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
'backtesting', 'backtesting',
'--config', 'config.json', '--config', 'config.json',
'--datadir', str(testdatadir), '--datadir', str(testdatadir),
'--user-data-dir', str(tmpdir), '--user-data-dir', str(user_dir),
'--timeframe', '5m', '--timeframe', '5m',
'--timerange', '1515560100-1517287800', '--timerange', '1515560100-1517287800',
'--export', 'signals', '--export', 'signals',
@ -98,7 +99,7 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
'backtesting-analysis', 'backtesting-analysis',
'--config', 'config.json', '--config', 'config.json',
'--datadir', str(testdatadir), '--datadir', str(testdatadir),
'--user-data-dir', str(tmpdir), '--user-data-dir', str(user_dir),
] ]
# test group 0 and indicator list # test group 0 and indicator list
@ -200,8 +201,17 @@ def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, tmp
assert 'trailing_stop_loss' in captured.out assert 'trailing_stop_loss' in captured.out
# test date filtering # test date filtering
args = get_args(base_args + ['--timerange', "20180129-20180130"]) args = get_args(base_args +
['--analysis-groups', "0", "1", "2",
'--timerange', "20180129-20180130"]
)
start_analysis_entries_exits(args) start_analysis_entries_exits(args)
captured = capsys.readouterr() captured = capsys.readouterr()
assert 'enter_tag_long_a' in captured.out assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' not in captured.out assert 'enter_tag_long_b' not in captured.out
# Due to the backtest mock, there's no rejected signals generated.
args = get_args(base_args + ['--rejected-signals'])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'no rejected signals' in captured.out

View File

@ -302,7 +302,7 @@ def exchange(request, exchange_conf):
exchange_conf, EXCHANGES[request.param].get('use_ci_proxy', False)) exchange_conf, EXCHANGES[request.param].get('use_ci_proxy', False))
exchange_conf['exchange']['name'] = request.param exchange_conf['exchange']['name'] = request.param
exchange_conf['stake_currency'] = EXCHANGES[request.param]['stake_currency'] exchange_conf['stake_currency'] = EXCHANGES[request.param]['stake_currency']
exchange = ExchangeResolver.load_exchange(request.param, exchange_conf, validate=True) exchange = ExchangeResolver.load_exchange(exchange_conf, validate=True)
yield exchange, request.param yield exchange, request.param
@ -330,7 +330,7 @@ def exchange_futures(request, exchange_conf, class_mocker):
class_mocker.patch(f'{EXMS}.cache_leverage_tiers') class_mocker.patch(f'{EXMS}.cache_leverage_tiers')
exchange = ExchangeResolver.load_exchange( exchange = ExchangeResolver.load_exchange(
request.param, exchange_conf, validate=True, load_leverage_tiers=True) exchange_conf, validate=True, load_leverage_tiers=True)
yield exchange, request.param yield exchange, request.param

View File

@ -20,7 +20,7 @@ from freqtrade.exchange import (Binance, Bittrex, Exchange, Kraken, amount_to_pr
timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date,
timeframe_to_prev_date, timeframe_to_seconds) timeframe_to_prev_date, timeframe_to_seconds)
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, API_RETRY_COUNT, from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, API_RETRY_COUNT,
calculate_backoff, remove_credentials) calculate_backoff, remove_exchange_credentials)
from freqtrade.exchange.exchange import amount_to_contract_precision from freqtrade.exchange.exchange import amount_to_contract_precision
from freqtrade.resolvers.exchange_resolver import ExchangeResolver from freqtrade.resolvers.exchange_resolver import ExchangeResolver
from tests.conftest import (EXMS, generate_test_data_raw, get_mock_coro, get_patched_exchange, from tests.conftest import (EXMS, generate_test_data_raw, get_mock_coro, get_patched_exchange,
@ -137,16 +137,14 @@ def test_init(default_conf, mocker, caplog):
assert log_has('Instance is running with dry_run enabled', caplog) assert log_has('Instance is running with dry_run enabled', caplog)
def test_remove_credentials(default_conf, caplog) -> None: def test_remove_exchange_credentials(default_conf) -> None:
conf = deepcopy(default_conf) conf = deepcopy(default_conf)
conf['dry_run'] = False remove_exchange_credentials(conf['exchange'], False)
remove_credentials(conf)
assert conf['exchange']['key'] != '' assert conf['exchange']['key'] != ''
assert conf['exchange']['secret'] != '' assert conf['exchange']['secret'] != ''
conf['dry_run'] = True remove_exchange_credentials(conf['exchange'], True)
remove_credentials(conf)
assert conf['exchange']['key'] == '' assert conf['exchange']['key'] == ''
assert conf['exchange']['secret'] == '' assert conf['exchange']['secret'] == ''
assert conf['exchange']['password'] == '' assert conf['exchange']['password'] == ''
@ -228,27 +226,30 @@ def test_exchange_resolver(default_conf, mocker, caplog):
mocker.patch(f'{EXMS}.validate_timeframes') mocker.patch(f'{EXMS}.validate_timeframes')
mocker.patch(f'{EXMS}.validate_stakecurrency') mocker.patch(f'{EXMS}.validate_stakecurrency')
mocker.patch(f'{EXMS}.validate_pricing') mocker.patch(f'{EXMS}.validate_pricing')
default_conf['exchange']['name'] = 'zaif'
exchange = ExchangeResolver.load_exchange('zaif', default_conf) exchange = ExchangeResolver.load_exchange(default_conf)
assert isinstance(exchange, Exchange) assert isinstance(exchange, Exchange)
assert log_has_re(r"No .* specific subclass found. Using the generic class instead.", caplog) assert log_has_re(r"No .* specific subclass found. Using the generic class instead.", caplog)
caplog.clear() caplog.clear()
exchange = ExchangeResolver.load_exchange('Bittrex', default_conf) default_conf['exchange']['name'] = 'Bittrex'
exchange = ExchangeResolver.load_exchange(default_conf)
assert isinstance(exchange, Exchange) assert isinstance(exchange, Exchange)
assert isinstance(exchange, Bittrex) assert isinstance(exchange, Bittrex)
assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.", assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.",
caplog) caplog)
caplog.clear() caplog.clear()
exchange = ExchangeResolver.load_exchange('kraken', default_conf) default_conf['exchange']['name'] = 'kraken'
exchange = ExchangeResolver.load_exchange(default_conf)
assert isinstance(exchange, Exchange) assert isinstance(exchange, Exchange)
assert isinstance(exchange, Kraken) assert isinstance(exchange, Kraken)
assert not isinstance(exchange, Binance) assert not isinstance(exchange, Binance)
assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.", assert not log_has_re(r"No .* specific subclass found. Using the generic class instead.",
caplog) caplog)
exchange = ExchangeResolver.load_exchange('binance', default_conf) default_conf['exchange']['name'] = 'binance'
exchange = ExchangeResolver.load_exchange(default_conf)
assert isinstance(exchange, Exchange) assert isinstance(exchange, Exchange)
assert isinstance(exchange, Binance) assert isinstance(exchange, Binance)
assert not isinstance(exchange, Kraken) assert not isinstance(exchange, Kraken)
@ -257,7 +258,8 @@ def test_exchange_resolver(default_conf, mocker, caplog):
caplog) caplog)
# Test mapping # Test mapping
exchange = ExchangeResolver.load_exchange('binanceus', default_conf) default_conf['exchange']['name'] = 'binanceus'
exchange = ExchangeResolver.load_exchange(default_conf)
assert isinstance(exchange, Exchange) assert isinstance(exchange, Exchange)
assert isinstance(exchange, Binance) assert isinstance(exchange, Binance)
assert not isinstance(exchange, Kraken) assert not isinstance(exchange, Kraken)
@ -990,19 +992,20 @@ def test_validate_pricing(default_conf, mocker):
mocker.patch(f'{EXMS}.validate_timeframes') mocker.patch(f'{EXMS}.validate_timeframes')
mocker.patch(f'{EXMS}.validate_stakecurrency') mocker.patch(f'{EXMS}.validate_stakecurrency')
mocker.patch(f'{EXMS}.name', 'Binance') mocker.patch(f'{EXMS}.name', 'Binance')
ExchangeResolver.load_exchange('binance', default_conf) default_conf['exchange']['name'] = 'binance'
ExchangeResolver.load_exchange(default_conf)
has.update({'fetchTicker': False}) has.update({'fetchTicker': False})
with pytest.raises(OperationalException, match="Ticker pricing not available for .*"): with pytest.raises(OperationalException, match="Ticker pricing not available for .*"):
ExchangeResolver.load_exchange('binance', default_conf) ExchangeResolver.load_exchange(default_conf)
has.update({'fetchTicker': True}) has.update({'fetchTicker': True})
default_conf['exit_pricing']['use_order_book'] = True default_conf['exit_pricing']['use_order_book'] = True
ExchangeResolver.load_exchange('binance', default_conf) ExchangeResolver.load_exchange(default_conf)
has.update({'fetchL2OrderBook': False}) has.update({'fetchL2OrderBook': False})
with pytest.raises(OperationalException, match="Orderbook not available for .*"): with pytest.raises(OperationalException, match="Orderbook not available for .*"):
ExchangeResolver.load_exchange('binance', default_conf) ExchangeResolver.load_exchange(default_conf)
has.update({'fetchL2OrderBook': True}) has.update({'fetchL2OrderBook': True})
@ -1011,7 +1014,7 @@ def test_validate_pricing(default_conf, mocker):
default_conf['margin_mode'] = MarginMode.ISOLATED default_conf['margin_mode'] = MarginMode.ISOLATED
with pytest.raises(OperationalException, match="Ticker pricing not available for .*"): with pytest.raises(OperationalException, match="Ticker pricing not available for .*"):
ExchangeResolver.load_exchange('binance', default_conf) ExchangeResolver.load_exchange(default_conf)
def test_validate_ordertypes(default_conf, mocker): def test_validate_ordertypes(default_conf, mocker):
@ -1091,12 +1094,13 @@ def test_validate_ordertypes_stop_advanced(default_conf, mocker, exchange_name,
'stoploss_on_exchange': True, 'stoploss_on_exchange': True,
'stoploss_price_type': stopadv, 'stoploss_price_type': stopadv,
} }
default_conf['exchange']['name'] = exchange_name
if expected: if expected:
ExchangeResolver.load_exchange(exchange_name, default_conf) ExchangeResolver.load_exchange(default_conf)
else: else:
with pytest.raises(OperationalException, with pytest.raises(OperationalException,
match=r'On exchange stoploss price type is not supported for .*'): match=r'On exchange stoploss price type is not supported for .*'):
ExchangeResolver.load_exchange(exchange_name, default_conf) ExchangeResolver.load_exchange(default_conf)
def test_validate_order_types_not_in_config(default_conf, mocker): def test_validate_order_types_not_in_config(default_conf, mocker):
@ -1773,6 +1777,71 @@ def test_fetch_positions(default_conf, mocker, exchange_name):
"fetch_positions", "fetch_positions") "fetch_positions", "fetch_positions")
@pytest.mark.parametrize("exchange_name", EXCHANGES)
def test_fetch_orders(default_conf, mocker, exchange_name, limit_order):
api_mock = MagicMock()
api_mock.fetch_orders = MagicMock(return_value=[
limit_order['buy'],
limit_order['sell'],
])
api_mock.fetch_open_orders = MagicMock(return_value=[limit_order['buy']])
api_mock.fetch_closed_orders = MagicMock(return_value=[limit_order['buy']])
mocker.patch(f'{EXMS}.exchange_has', return_value=True)
start_time = datetime.now(timezone.utc) - timedelta(days=5)
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
# Not available in dry-run
assert exchange.fetch_orders('mocked', start_time) == []
assert api_mock.fetch_orders.call_count == 0
default_conf['dry_run'] = False
exchange = get_patched_exchange(mocker, default_conf, api_mock, id=exchange_name)
res = exchange.fetch_orders('mocked', start_time)
assert api_mock.fetch_orders.call_count == 1
assert api_mock.fetch_open_orders.call_count == 0
assert api_mock.fetch_closed_orders.call_count == 0
assert len(res) == 2
res = exchange.fetch_orders('mocked', start_time)
api_mock.fetch_orders.reset_mock()
def has_resp(_, endpoint):
if endpoint == 'fetchOrders':
return False
if endpoint == 'fetchClosedOrders':
return True
if endpoint == 'fetchOpenOrders':
return True
mocker.patch(f'{EXMS}.exchange_has', has_resp)
# happy path without fetchOrders
res = exchange.fetch_orders('mocked', start_time)
assert api_mock.fetch_orders.call_count == 0
assert api_mock.fetch_open_orders.call_count == 1
assert api_mock.fetch_closed_orders.call_count == 1
mocker.patch(f'{EXMS}.exchange_has', return_value=True)
ccxt_exceptionhandlers(mocker, default_conf, api_mock, exchange_name,
"fetch_orders", "fetch_orders", retries=1,
pair='mocked', since=start_time)
# Unhappy path - first fetch-orders call fails.
api_mock.fetch_orders = MagicMock(side_effect=ccxt.NotSupported())
api_mock.fetch_open_orders.reset_mock()
api_mock.fetch_closed_orders.reset_mock()
res = exchange.fetch_orders('mocked', start_time)
assert api_mock.fetch_orders.call_count == 1
assert api_mock.fetch_open_orders.call_count == 1
assert api_mock.fetch_closed_orders.call_count == 1
def test_fetch_trading_fees(default_conf, mocker): def test_fetch_trading_fees(default_conf, mocker):
api_mock = MagicMock() api_mock = MagicMock()
tick = { tick = {
@ -4932,7 +5001,7 @@ def test_get_maintenance_ratio_and_amt_exceptions(mocker, default_conf, leverage
exchange._leverage_tiers = leverage_tiers exchange._leverage_tiers = leverage_tiers
with pytest.raises( with pytest.raises(
OperationalException, DependencyException,
match='nominal value can not be lower than 0', match='nominal value can not be lower than 0',
): ):
exchange.get_maintenance_ratio_and_amt('1000SHIB/USDT:USDT', -1) exchange.get_maintenance_ratio_and_amt('1000SHIB/USDT:USDT', -1)

View File

@ -1,3 +1,4 @@
import platform
from copy import deepcopy from copy import deepcopy
from pathlib import Path from pathlib import Path
from typing import Any, Dict from typing import Any, Dict
@ -14,6 +15,11 @@ from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
from tests.conftest import get_patched_exchange from tests.conftest import get_patched_exchange
def is_mac() -> bool:
machine = platform.system()
return "Darwin" in machine
@pytest.fixture(scope="function") @pytest.fixture(scope="function")
def freqai_conf(default_conf, tmpdir): def freqai_conf(default_conf, tmpdir):
freqaiconf = deepcopy(default_conf) freqaiconf = deepcopy(default_conf)
@ -36,6 +42,7 @@ def freqai_conf(default_conf, tmpdir):
"identifier": "uniqe-id100", "identifier": "uniqe-id100",
"live_trained_timestamp": 0, "live_trained_timestamp": 0,
"data_kitchen_thread_count": 2, "data_kitchen_thread_count": 2,
"activate_tensorboard": False,
"feature_parameters": { "feature_parameters": {
"include_timeframes": ["5m"], "include_timeframes": ["5m"],
"include_corr_pairlist": ["ADA/BTC"], "include_corr_pairlist": ["ADA/BTC"],

View File

@ -12,6 +12,7 @@ from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from tests.conftest import get_patched_exchange, log_has_re from tests.conftest import get_patched_exchange, log_has_re
from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy, from tests.freqai.conftest import (get_patched_data_kitchen, get_patched_freqai_strategy,
make_data_dictionary, make_unfiltered_dataframe) make_data_dictionary, make_unfiltered_dataframe)
from tests.freqai.test_freqai_interface import is_mac
@pytest.mark.parametrize( @pytest.mark.parametrize(
@ -173,6 +174,9 @@ def test_get_full_model_path(mocker, freqai_conf, model):
freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"timerange": "20180110-20180130"})
freqai_conf.update({"strategy": "freqai_test_strat"}) freqai_conf.update({"strategy": "freqai_test_strat"})
if is_mac():
pytest.skip("Mac is confused during this test for unknown reasons")
strategy = get_patched_freqai_strategy(mocker, freqai_conf) strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf)
strategy.dp = DataProvider(freqai_conf, exchange) strategy.dp = DataProvider(freqai_conf, exchange)
@ -188,7 +192,7 @@ def test_get_full_model_path(mocker, freqai_conf, model):
data_load_timerange = TimeRange.parse_timerange("20180110-20180130") data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
new_timerange = TimeRange.parse_timerange("20180120-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130")
freqai.dk.set_paths('ADA/BTC', None)
freqai.extract_data_and_train_model( freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)

View File

@ -15,7 +15,7 @@ from freqtrade.optimize.backtesting import Backtesting
from freqtrade.persistence import Trade from freqtrade.persistence import Trade
from freqtrade.plugins.pairlistmanager import PairListManager from freqtrade.plugins.pairlistmanager import PairListManager
from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re from tests.conftest import EXMS, create_mock_trades, get_patched_exchange, log_has_re
from tests.freqai.conftest import (get_patched_freqai_strategy, make_rl_config, from tests.freqai.conftest import (get_patched_freqai_strategy, is_mac, make_rl_config,
mock_pytorch_mlp_model_training_parameters) mock_pytorch_mlp_model_training_parameters)
@ -28,29 +28,22 @@ def is_arm() -> bool:
return "arm" in machine or "aarch64" in machine return "arm" in machine or "aarch64" in machine
def is_mac() -> bool:
machine = platform.system()
return "Darwin" in machine
def can_run_model(model: str) -> None: def can_run_model(model: str) -> None:
if (is_arm() or is_py11()) and "Catboost" in model: if is_arm() and "Catboost" in model:
pytest.skip("CatBoost is not supported on ARM.") pytest.skip("CatBoost is not supported on ARM.")
is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model is_pytorch_model = 'Reinforcement' in model or 'PyTorch' in model
if is_pytorch_model and is_mac() and not is_arm(): if is_pytorch_model and is_mac() and not is_arm():
pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.") pytest.skip("Reinforcement learning / PyTorch module not available on intel based Mac OS.")
if is_pytorch_model and is_py11():
pytest.skip("Reinforcement learning / PyTorch currently not available on python 3.11.")
@pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer', [ @pytest.mark.parametrize('model, pca, dbscan, float32, can_short, shuffle, buffer', [
('LightGBMRegressor', True, False, True, True, False, 0), ('LightGBMRegressor', True, False, True, True, False, 0),
('XGBoostRegressor', False, True, False, True, False, 10), ('XGBoostRegressor', False, True, False, True, False, 10),
('XGBoostRFRegressor', False, False, False, True, False, 0), ('XGBoostRFRegressor', False, False, False, True, False, 0),
('CatboostRegressor', False, False, False, True, True, 0), ('CatboostRegressor', False, False, False, True, True, 0),
('PyTorchMLPRegressor', False, False, False, True, False, 0), ('PyTorchMLPRegressor', False, False, False, False, False, 0),
('PyTorchTransformerRegressor', False, False, False, False, False, 0),
('ReinforcementLearner', False, True, False, True, False, 0), ('ReinforcementLearner', False, True, False, True, False, 0),
('ReinforcementLearner_multiproc', False, False, False, True, False, 0), ('ReinforcementLearner_multiproc', False, False, False, True, False, 0),
('ReinforcementLearner_test_3ac', False, False, False, False, False, 0), ('ReinforcementLearner_test_3ac', False, False, False, False, False, 0),
@ -61,6 +54,11 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
dbscan, float32, can_short, shuffle, buffer): dbscan, float32, can_short, shuffle, buffer):
can_run_model(model) can_run_model(model)
test_tb = True
if is_mac():
test_tb = False
model_save_ext = 'joblib' model_save_ext = 'joblib'
freqai_conf.update({"freqaimodel": model}) freqai_conf.update({"freqaimodel": model})
freqai_conf.update({"timerange": "20180110-20180130"}) freqai_conf.update({"timerange": "20180110-20180130"})
@ -82,10 +80,13 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True freqai_conf["freqai"]["rl_config"]["drop_ohlc_from_features"] = True
if 'PyTorchMLPRegressor' in model: if 'PyTorch' in model:
model_save_ext = 'zip' model_save_ext = 'zip'
pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters() pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters()
freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp) freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp)
if 'Transformer' in model:
# transformer model takes a window, unlike the MLP regressor
freqai_conf.update({"conv_width": 10})
strategy = get_patched_freqai_strategy(mocker, freqai_conf) strategy = get_patched_freqai_strategy(mocker, freqai_conf)
exchange = get_patched_exchange(mocker, freqai_conf) exchange = get_patched_exchange(mocker, freqai_conf)
@ -93,6 +94,7 @@ def test_extract_data_and_train_model_Standard(mocker, freqai_conf, model, pca,
strategy.freqai_info = freqai_conf.get("freqai", {}) strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai freqai = strategy.freqai
freqai.live = True freqai.live = True
freqai.activate_tensorboard = test_tb
freqai.can_short = can_short freqai.can_short = can_short
freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk = FreqaiDataKitchen(freqai_conf)
freqai.dk.live = True freqai.dk.live = True
@ -228,6 +230,7 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
("XGBoostRegressor", 2, "freqai_test_strat"), ("XGBoostRegressor", 2, "freqai_test_strat"),
("CatboostRegressor", 2, "freqai_test_strat"), ("CatboostRegressor", 2, "freqai_test_strat"),
("PyTorchMLPRegressor", 2, "freqai_test_strat"), ("PyTorchMLPRegressor", 2, "freqai_test_strat"),
("PyTorchTransformerRegressor", 2, "freqai_test_strat"),
("ReinforcementLearner", 3, "freqai_rl_test_strat"), ("ReinforcementLearner", 3, "freqai_rl_test_strat"),
("XGBoostClassifier", 2, "freqai_test_classifier"), ("XGBoostClassifier", 2, "freqai_test_classifier"),
("LightGBMClassifier", 2, "freqai_test_classifier"), ("LightGBMClassifier", 2, "freqai_test_classifier"),
@ -237,6 +240,9 @@ def test_extract_data_and_train_model_Classifiers(mocker, freqai_conf, model):
) )
def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog): def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog):
can_run_model(model) can_run_model(model)
test_tb = True
if is_mac():
test_tb = False
freqai_conf.get("freqai", {}).update({"save_backtest_models": True}) freqai_conf.get("freqai", {}).update({"save_backtest_models": True})
freqai_conf['runmode'] = RunMode.BACKTEST freqai_conf['runmode'] = RunMode.BACKTEST
@ -253,9 +259,12 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
if 'test_4ac' in model: if 'test_4ac' in model:
freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models") freqai_conf["freqaimodel_path"] = str(Path(__file__).parents[1] / "freqai" / "test_models")
if 'PyTorchMLP' in model: if 'PyTorch' in model:
pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters() pytorch_mlp_mtp = mock_pytorch_mlp_model_training_parameters()
freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp) freqai_conf['freqai']['model_training_parameters'].update(pytorch_mlp_mtp)
if 'Transformer' in model:
# transformer model takes a window, unlike the MLP regressor
freqai_conf.update({"conv_width": 10})
freqai_conf.get("freqai", {}).get("feature_parameters", {}).update( freqai_conf.get("freqai", {}).get("feature_parameters", {}).update(
{"indicator_periods_candles": [2]}) {"indicator_periods_candles": [2]})
@ -266,6 +275,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
strategy.freqai_info = freqai_conf.get("freqai", {}) strategy.freqai_info = freqai_conf.get("freqai", {})
freqai = strategy.freqai freqai = strategy.freqai
freqai.live = False freqai.live = False
freqai.activate_tensorboard = test_tb
freqai.dk = FreqaiDataKitchen(freqai_conf) freqai.dk = FreqaiDataKitchen(freqai_conf)
timerange = TimeRange.parse_timerange("20180110-20180130") timerange = TimeRange.parse_timerange("20180110-20180130")
freqai.dd.load_all_pair_histories(timerange, freqai.dk) freqai.dd.load_all_pair_histories(timerange, freqai.dk)
@ -277,6 +287,7 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog)
df[f'%-constant_{i}'] = i df[f'%-constant_{i}'] = i
metadata = {"pair": "LTC/BTC"} metadata = {"pair": "LTC/BTC"}
freqai.dk.set_paths('LTC/BTC', None)
freqai.start_backtesting(df, metadata, freqai.dk, strategy) freqai.start_backtesting(df, metadata, freqai.dk, strategy)
model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()] model_folders = [x for x in freqai.dd.full_path.iterdir() if x.is_dir()]
@ -434,6 +445,7 @@ def test_principal_component_analysis(mocker, freqai_conf):
data_load_timerange = TimeRange.parse_timerange("20180110-20180130") data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
new_timerange = TimeRange.parse_timerange("20180120-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130")
freqai.dk.set_paths('ADA/BTC', None)
freqai.extract_data_and_train_model( freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)
@ -467,6 +479,7 @@ def test_plot_feature_importance(mocker, freqai_conf):
data_load_timerange = TimeRange.parse_timerange("20180110-20180130") data_load_timerange = TimeRange.parse_timerange("20180110-20180130")
new_timerange = TimeRange.parse_timerange("20180120-20180130") new_timerange = TimeRange.parse_timerange("20180120-20180130")
freqai.dk.set_paths('ADA/BTC', None)
freqai.extract_data_and_train_model( freqai.extract_data_and_train_model(
new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange) new_timerange, "ADA/BTC", strategy, freqai.dk, data_load_timerange)

View File

@ -18,6 +18,11 @@ class ReinforcementLearner_test_3ac(ReinforcementLearner):
""" """
User can override any function in BaseRLEnv and gym.Env. Here the user User can override any function in BaseRLEnv and gym.Env. Here the user
sets a custom reward based on profit and trade duration. sets a custom reward based on profit and trade duration.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
""" """
def calculate_reward(self, action: int) -> float: def calculate_reward(self, action: int) -> float:

View File

@ -18,6 +18,11 @@ class ReinforcementLearner_test_4ac(ReinforcementLearner):
""" """
User can override any function in BaseRLEnv and gym.Env. Here the user User can override any function in BaseRLEnv and gym.Env. Here the user
sets a custom reward based on profit and trade duration. sets a custom reward based on profit and trade duration.
Warning!
This is function is a showcase of functionality designed to show as many possible
environment control features as possible. It is also designed to run quickly
on small computers. This is a benchmark, it is *not* for live production.
""" """
def calculate_reward(self, action: int) -> float: def calculate_reward(self, action: int) -> float:

View File

@ -354,7 +354,7 @@ def test_backtesting_start(default_conf, mocker, caplog) -> None:
mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats') mocker.patch('freqtrade.optimize.backtesting.generate_backtest_stats')
mocker.patch('freqtrade.optimize.backtesting.show_backtest_results') mocker.patch('freqtrade.optimize.backtesting.show_backtest_results')
sbs = mocker.patch('freqtrade.optimize.backtesting.store_backtest_stats') sbs = mocker.patch('freqtrade.optimize.backtesting.store_backtest_stats')
sbc = mocker.patch('freqtrade.optimize.backtesting.store_backtest_signal_candles') sbc = mocker.patch('freqtrade.optimize.backtesting.store_backtest_analysis_results')
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist', mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['UNITTEST/BTC'])) PropertyMock(return_value=['UNITTEST/BTC']))

View File

@ -21,7 +21,7 @@ from freqtrade.optimize.optimize_reports import (_get_resample_from_period, gene
generate_periodic_breakdown_stats, generate_periodic_breakdown_stats,
generate_strategy_comparison, generate_strategy_comparison,
generate_trading_stats, show_sorted_pairlist, generate_trading_stats, show_sorted_pairlist,
store_backtest_signal_candles, store_backtest_analysis_results,
store_backtest_stats, text_table_bt_results, store_backtest_stats, text_table_bt_results,
text_table_exit_reason, text_table_strategy) text_table_exit_reason, text_table_strategy)
from freqtrade.resolvers.strategy_resolver import StrategyResolver from freqtrade.resolvers.strategy_resolver import StrategyResolver
@ -232,17 +232,17 @@ def test_store_backtest_candles(testdatadir, mocker):
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
# mock directory exporting # mock directory exporting
store_backtest_signal_candles(testdatadir, candle_dict, '2022_01_01_15_05_13') store_backtest_analysis_results(testdatadir, candle_dict, {}, '2022_01_01_15_05_13')
assert dump_mock.call_count == 1 assert dump_mock.call_count == 2
assert isinstance(dump_mock.call_args_list[0][0][0], Path) assert isinstance(dump_mock.call_args_list[0][0][0], Path)
assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl') assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl')
dump_mock.reset_mock() dump_mock.reset_mock()
# mock file exporting # mock file exporting
filename = Path(testdatadir / 'testresult') filename = Path(testdatadir / 'testresult')
store_backtest_signal_candles(filename, candle_dict, '2022_01_01_15_05_13') store_backtest_analysis_results(filename, candle_dict, {}, '2022_01_01_15_05_13')
assert dump_mock.call_count == 1 assert dump_mock.call_count == 2
assert isinstance(dump_mock.call_args_list[0][0][0], Path) assert isinstance(dump_mock.call_args_list[0][0][0], Path)
# result will be testdatadir / testresult-<timestamp>_signals.pkl # result will be testdatadir / testresult-<timestamp>_signals.pkl
assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl') assert str(dump_mock.call_args_list[0][0][0]).endswith('_signals.pkl')
@ -254,10 +254,11 @@ def test_write_read_backtest_candles(tmpdir):
candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}} candle_dict = {'DefStrat': {'UNITTEST/BTC': pd.DataFrame()}}
# test directory exporting # test directory exporting
stored_file = store_backtest_signal_candles(Path(tmpdir), candle_dict, '2022_01_01_15_05_13') sample_date = '2022_01_01_15_05_13'
scp = stored_file.open("rb") store_backtest_analysis_results(Path(tmpdir), candle_dict, {}, sample_date)
stored_file = Path(tmpdir / f'backtest-result-{sample_date}_signals.pkl')
with stored_file.open("rb") as scp:
pickled_signal_candles = joblib.load(scp) pickled_signal_candles = joblib.load(scp)
scp.close()
assert pickled_signal_candles.keys() == candle_dict.keys() assert pickled_signal_candles.keys() == candle_dict.keys()
assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys() assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys()
@ -268,10 +269,10 @@ def test_write_read_backtest_candles(tmpdir):
# test file exporting # test file exporting
filename = Path(tmpdir / 'testresult') filename = Path(tmpdir / 'testresult')
stored_file = store_backtest_signal_candles(filename, candle_dict, '2022_01_01_15_05_13') store_backtest_analysis_results(filename, candle_dict, {}, sample_date)
scp = stored_file.open("rb") stored_file = Path(tmpdir / f'testresult-{sample_date}_signals.pkl')
with stored_file.open("rb") as scp:
pickled_signal_candles = joblib.load(scp) pickled_signal_candles = joblib.load(scp)
scp.close()
assert pickled_signal_candles.keys() == candle_dict.keys() assert pickled_signal_candles.keys() == candle_dict.keys()
assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys() assert pickled_signal_candles['DefStrat'].keys() == pickled_signal_candles['DefStrat'].keys()

View File

@ -239,7 +239,7 @@ def test_interest(fee, exchange, is_short, lev, minutes, rate, interest,
stake_amount=20.0, stake_amount=20.0,
amount=30.0, amount=30.0,
open_rate=2.0, open_rate=2.0,
open_date=datetime.utcnow() - timedelta(minutes=minutes), open_date=datetime.now(timezone.utc) - timedelta(minutes=minutes),
fee_open=fee.return_value, fee_open=fee.return_value,
fee_close=fee.return_value, fee_close=fee.return_value,
exchange=exchange, exchange=exchange,
@ -2063,7 +2063,7 @@ def test_trade_truncates_string_fields():
stake_amount=20.0, stake_amount=20.0,
amount=30.0, amount=30.0,
open_rate=2.0, open_rate=2.0,
open_date=datetime.utcnow() - timedelta(minutes=20), open_date=datetime.now(timezone.utc) - timedelta(minutes=20),
fee_open=0.001, fee_open=0.001,
fee_close=0.001, fee_close=0.001,
exchange='binance', exchange='binance',

View File

@ -1,5 +1,5 @@
import random import random
from datetime import datetime, timedelta from datetime import datetime, timedelta, timezone
import pytest import pytest
@ -24,8 +24,8 @@ def generate_mock_trade(pair: str, fee: float, is_open: bool,
stake_amount=0.01, stake_amount=0.01,
fee_open=fee, fee_open=fee,
fee_close=fee, fee_close=fee,
open_date=datetime.utcnow() - timedelta(minutes=min_ago_open or 200), open_date=datetime.now(timezone.utc) - timedelta(minutes=min_ago_open or 200),
close_date=datetime.utcnow() - timedelta(minutes=min_ago_close or 30), close_date=datetime.now(timezone.utc) - timedelta(minutes=min_ago_close or 30),
open_rate=open_rate, open_rate=open_rate,
is_open=is_open, is_open=is_open,
amount=0.01 / open_rate, amount=0.01 / open_rate,
@ -87,9 +87,9 @@ def test_protectionmanager(mocker, default_conf):
for handler in freqtrade.protections._protection_handlers: for handler in freqtrade.protections._protection_handlers:
assert handler.name in constants.AVAILABLE_PROTECTIONS assert handler.name in constants.AVAILABLE_PROTECTIONS
if not handler.has_global_stop: if not handler.has_global_stop:
assert handler.global_stop(datetime.utcnow(), '*') is None assert handler.global_stop(datetime.now(timezone.utc), '*') is None
if not handler.has_local_stop: if not handler.has_local_stop:
assert handler.stop_per_pair('XRP/BTC', datetime.utcnow(), '*') is None assert handler.stop_per_pair('XRP/BTC', datetime.now(timezone.utc), '*') is None
@pytest.mark.parametrize('timeframe,expected,protconf', [ @pytest.mark.parametrize('timeframe,expected,protconf', [

View File

@ -261,8 +261,7 @@ def test_rpc_status_table(default_conf, ticker, fee, mocker) -> None:
assert isnan(fiat_profit_sum) assert isnan(fiat_profit_sum)
def test__rpc_timeunit_profit(default_conf_usdt, ticker, fee, def test__rpc_timeunit_profit(default_conf_usdt, ticker, fee, markets, mocker) -> None:
limit_buy_order, limit_sell_order, markets, mocker) -> None:
mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock()) mocker.patch('freqtrade.rpc.telegram.Telegram', MagicMock())
mocker.patch.multiple( mocker.patch.multiple(
EXMS, EXMS,
@ -295,7 +294,7 @@ def test__rpc_timeunit_profit(default_conf_usdt, ticker, fee,
assert day['starting_balance'] in (pytest.approx(1062.37), pytest.approx(1066.46)) assert day['starting_balance'] in (pytest.approx(1062.37), pytest.approx(1066.46))
assert day['fiat_value'] in (0.0, ) assert day['fiat_value'] in (0.0, )
# ensure first day is current date # ensure first day is current date
assert str(days['data'][0]['date']) == str(datetime.utcnow().date()) assert str(days['data'][0]['date']) == str(datetime.now(timezone.utc).date())
# Try invalid data # Try invalid data
with pytest.raises(RPCException, match=r'.*must be an integer greater than 0*'): with pytest.raises(RPCException, match=r'.*must be an integer greater than 0*'):
@ -415,8 +414,8 @@ def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None:
assert pytest.approx(stats['profit_all_percent_mean']) == -57.86 assert pytest.approx(stats['profit_all_percent_mean']) == -57.86
assert pytest.approx(stats['profit_all_fiat']) == -85.205614098 assert pytest.approx(stats['profit_all_fiat']) == -85.205614098
assert stats['trade_count'] == 7 assert stats['trade_count'] == 7
assert stats['first_trade_date'] == '2 days ago' assert stats['first_trade_humanized'] == '2 days ago'
assert stats['latest_trade_date'] == '17 minutes ago' assert stats['latest_trade_humanized'] == '17 minutes ago'
assert stats['avg_duration'] in ('0:17:40') assert stats['avg_duration'] in ('0:17:40')
assert stats['best_pair'] == 'XRP/USDT' assert stats['best_pair'] == 'XRP/USDT'
assert stats['best_rate'] == 10.0 assert stats['best_rate'] == 10.0
@ -426,8 +425,8 @@ def test_rpc_trade_statistics(default_conf_usdt, ticker, fee, mocker) -> None:
MagicMock(side_effect=ExchangeError("Pair 'XRP/USDT' not available"))) MagicMock(side_effect=ExchangeError("Pair 'XRP/USDT' not available")))
stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency) stats = rpc._rpc_trade_statistics(stake_currency, fiat_display_currency)
assert stats['trade_count'] == 7 assert stats['trade_count'] == 7
assert stats['first_trade_date'] == '2 days ago' assert stats['first_trade_humanized'] == '2 days ago'
assert stats['latest_trade_date'] == '17 minutes ago' assert stats['latest_trade_humanized'] == '17 minutes ago'
assert stats['avg_duration'] in ('0:17:40') assert stats['avg_duration'] in ('0:17:40')
assert stats['best_pair'] == 'XRP/USDT' assert stats['best_pair'] == 'XRP/USDT'
assert stats['best_rate'] == 10.0 assert stats['best_rate'] == 10.0

View File

@ -601,7 +601,7 @@ def test_api_daily(botclient, mocker, ticker, fee, markets):
assert len(rc.json()['data']) == 7 assert len(rc.json()['data']) == 7
assert rc.json()['stake_currency'] == 'BTC' assert rc.json()['stake_currency'] == 'BTC'
assert rc.json()['fiat_display_currency'] == 'USD' assert rc.json()['fiat_display_currency'] == 'USD'
assert rc.json()['data'][0]['date'] == str(datetime.utcnow().date()) assert rc.json()['data'][0]['date'] == str(datetime.now(timezone.utc).date())
@pytest.mark.parametrize('is_short', [True, False]) @pytest.mark.parametrize('is_short', [True, False])
@ -740,6 +740,33 @@ def test_api_delete_open_order(botclient, mocker, fee, markets, ticker, is_short
assert cancel_mock.call_count == 1 assert cancel_mock.call_count == 1
@pytest.mark.parametrize('is_short', [True, False])
def test_api_trade_reload_trade(botclient, mocker, fee, markets, ticker, is_short):
ftbot, client = botclient
patch_get_signal(ftbot, enter_long=not is_short, enter_short=is_short)
stoploss_mock = MagicMock()
cancel_mock = MagicMock()
ftbot.handle_onexchange_order = MagicMock()
mocker.patch.multiple(
EXMS,
markets=PropertyMock(return_value=markets),
fetch_ticker=ticker,
cancel_order=cancel_mock,
cancel_stoploss_order=stoploss_mock,
)
rc = client_post(client, f"{BASE_URI}/trades/10/reload")
assert_response(rc, 502)
assert 'Could not find trade with id 10.' in rc.json()['error']
assert ftbot.handle_onexchange_order.call_count == 0
create_mock_trades(fee, is_short=is_short)
Trade.commit()
rc = client_post(client, f"{BASE_URI}/trades/5/reload")
assert ftbot.handle_onexchange_order.call_count == 1
def test_api_logs(botclient): def test_api_logs(botclient):
ftbot, client = botclient ftbot, client = botclient
rc = client_get(client, f"{BASE_URI}/logs") rc = client_get(client, f"{BASE_URI}/logs")
@ -861,8 +888,10 @@ def test_api_profit(botclient, mocker, ticker, fee, markets, is_short, expected)
'best_pair_profit_ratio': expected['best_pair_profit_ratio'], 'best_pair_profit_ratio': expected['best_pair_profit_ratio'],
'best_rate': expected['best_rate'], 'best_rate': expected['best_rate'],
'first_trade_date': ANY, 'first_trade_date': ANY,
'first_trade_humanized': ANY,
'first_trade_timestamp': ANY, 'first_trade_timestamp': ANY,
'latest_trade_date': '5 minutes ago', 'latest_trade_date': ANY,
'latest_trade_humanized': '5 minutes ago',
'latest_trade_timestamp': ANY, 'latest_trade_timestamp': ANY,
'profit_all_coin': pytest.approx(expected['profit_all_coin']), 'profit_all_coin': pytest.approx(expected['profit_all_coin']),
'profit_all_fiat': pytest.approx(expected['profit_all_fiat']), 'profit_all_fiat': pytest.approx(expected['profit_all_fiat']),
@ -1197,7 +1226,7 @@ def test_api_force_entry(botclient, mocker, fee, endpoint):
stake_amount=1, stake_amount=1,
open_rate=0.245441, open_rate=0.245441,
open_order_id="123456", open_order_id="123456",
open_date=datetime.utcnow(), open_date=datetime.now(timezone.utc),
is_open=False, is_open=False,
is_short=False, is_short=False,
fee_close=fee.return_value, fee_close=fee.return_value,

View File

@ -52,7 +52,7 @@ def default_conf(default_conf) -> dict:
@pytest.fixture @pytest.fixture
def update(): def update():
message = Message(0, datetime.utcnow(), Chat(0, 0)) message = Message(0, datetime.now(timezone.utc), Chat(0, 0))
_update = Update(0, message=message) _update = Update(0, message=message)
return _update return _update
@ -143,8 +143,8 @@ def test_telegram_init(default_conf, mocker, caplog) -> None:
message_str = ("rpc.telegram is listening for following commands: [['status'], ['profit'], " message_str = ("rpc.telegram is listening for following commands: [['status'], ['profit'], "
"['balance'], ['start'], ['stop'], " "['balance'], ['start'], ['stop'], "
"['forceexit', 'forcesell', 'fx'], ['forcebuy', 'forcelong'], ['forceshort'], " "['forceexit', 'forcesell', 'fx'], ['forcebuy', 'forcelong'], ['forceshort'], "
"['trades'], ['delete'], ['cancel_open_order', 'coo'], ['performance'], " "['reload_trade'], ['trades'], ['delete'], ['cancel_open_order', 'coo'], "
"['buys', 'entries'], ['exits', 'sells'], ['mix_tags'], " "['performance'], ['buys', 'entries'], ['exits', 'sells'], ['mix_tags'], "
"['stats'], ['daily'], ['weekly'], ['monthly'], " "['stats'], ['daily'], ['weekly'], ['monthly'], "
"['count'], ['locks'], ['delete_locks', 'unlock'], " "['count'], ['locks'], ['delete_locks', 'unlock'], "
"['reload_conf', 'reload_config'], ['show_conf', 'show_config'], " "['reload_conf', 'reload_config'], ['show_conf', 'show_config'], "
@ -213,7 +213,7 @@ async def test_authorized_only_unauthorized(default_conf, mocker, caplog) -> Non
patch_exchange(mocker) patch_exchange(mocker)
caplog.set_level(logging.DEBUG) caplog.set_level(logging.DEBUG)
chat = Chat(0xdeadbeef, 0) chat = Chat(0xdeadbeef, 0)
message = Message(randint(1, 100), datetime.utcnow(), chat) message = Message(randint(1, 100), datetime.now(timezone.utc), chat)
update = Update(randint(1, 100), message=message) update = Update(randint(1, 100), message=message)
default_conf['telegram']['enabled'] = False default_conf['telegram']['enabled'] = False
@ -520,7 +520,7 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time
assert msg_mock.call_count == 1 assert msg_mock.call_count == 1
assert "Daily Profit over the last 2 days</b>:" in msg_mock.call_args_list[0][0][0] assert "Daily Profit over the last 2 days</b>:" in msg_mock.call_args_list[0][0][0]
assert 'Day ' in msg_mock.call_args_list[0][0][0] assert 'Day ' in msg_mock.call_args_list[0][0][0]
assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0] assert str(datetime.now(timezone.utc).date()) in msg_mock.call_args_list[0][0][0]
assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0] assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0]
assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0] assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0]
assert '(2)' in msg_mock.call_args_list[0][0][0] assert '(2)' in msg_mock.call_args_list[0][0][0]
@ -533,8 +533,9 @@ async def test_daily_handle(default_conf_usdt, update, ticker, fee, mocker, time
await telegram._daily(update=update, context=context) await telegram._daily(update=update, context=context)
assert msg_mock.call_count == 1 assert msg_mock.call_count == 1
assert "Daily Profit over the last 7 days</b>:" in msg_mock.call_args_list[0][0][0] assert "Daily Profit over the last 7 days</b>:" in msg_mock.call_args_list[0][0][0]
assert str(datetime.utcnow().date()) in msg_mock.call_args_list[0][0][0] assert str(datetime.now(timezone.utc).date()) in msg_mock.call_args_list[0][0][0]
assert str((datetime.utcnow() - timedelta(days=5)).date()) in msg_mock.call_args_list[0][0][0] assert str((datetime.now(timezone.utc) - timedelta(days=5)).date()
) in msg_mock.call_args_list[0][0][0]
assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0] assert ' 6.83 USDT' in msg_mock.call_args_list[0][0][0]
assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0] assert ' 7.51 USD' in msg_mock.call_args_list[0][0][0]
assert '(2)' in msg_mock.call_args_list[0][0][0] assert '(2)' in msg_mock.call_args_list[0][0][0]
@ -608,7 +609,7 @@ async def test_weekly_handle(default_conf_usdt, update, ticker, fee, mocker, tim
assert "Weekly Profit over the last 2 weeks (starting from Monday)</b>:" \ assert "Weekly Profit over the last 2 weeks (starting from Monday)</b>:" \
in msg_mock.call_args_list[0][0][0] in msg_mock.call_args_list[0][0][0]
assert 'Monday ' in msg_mock.call_args_list[0][0][0] assert 'Monday ' in msg_mock.call_args_list[0][0][0]
today = datetime.utcnow().date() today = datetime.now(timezone.utc).date()
first_iso_day_of_current_week = today - timedelta(days=today.weekday()) first_iso_day_of_current_week = today - timedelta(days=today.weekday())
assert str(first_iso_day_of_current_week) in msg_mock.call_args_list[0][0][0] assert str(first_iso_day_of_current_week) in msg_mock.call_args_list[0][0][0]
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
@ -677,7 +678,7 @@ async def test_monthly_handle(default_conf_usdt, update, ticker, fee, mocker, ti
assert msg_mock.call_count == 1 assert msg_mock.call_count == 1
assert 'Monthly Profit over the last 2 months</b>:' in msg_mock.call_args_list[0][0][0] assert 'Monthly Profit over the last 2 months</b>:' in msg_mock.call_args_list[0][0][0]
assert 'Month ' in msg_mock.call_args_list[0][0][0] assert 'Month ' in msg_mock.call_args_list[0][0][0]
today = datetime.utcnow().date() today = datetime.now(timezone.utc).date()
current_month = f"{today.year}-{today.month:02} " current_month = f"{today.year}-{today.month:02} "
assert current_month in msg_mock.call_args_list[0][0][0] assert current_month in msg_mock.call_args_list[0][0][0]
assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0] assert ' 2.74 USDT' in msg_mock.call_args_list[0][0][0]
@ -825,6 +826,9 @@ async def test_telegram_stats(default_conf, update, ticker, fee, mocker, is_shor
assert 'Exit Reason' in msg_mock.call_args_list[-1][0][0] assert 'Exit Reason' in msg_mock.call_args_list[-1][0][0]
assert 'ROI' in msg_mock.call_args_list[-1][0][0] assert 'ROI' in msg_mock.call_args_list[-1][0][0]
assert 'Avg. Duration' in msg_mock.call_args_list[-1][0][0] assert 'Avg. Duration' in msg_mock.call_args_list[-1][0][0]
# Duration is not only N/A
assert '0:19:00' in msg_mock.call_args_list[-1][0][0]
assert 'N/A' in msg_mock.call_args_list[-1][0][0]
msg_mock.reset_mock() msg_mock.reset_mock()
@ -1760,6 +1764,25 @@ async def test_telegram_delete_trade(mocker, update, default_conf, fee, is_short
assert "Please make sure to take care of this asset" in msg_mock.call_args_list[0][0][0] assert "Please make sure to take care of this asset" in msg_mock.call_args_list[0][0][0]
@pytest.mark.parametrize('is_short', [True, False])
async def test_telegram_reload_trade_from_exchange(mocker, update, default_conf, fee, is_short):
telegram, _, msg_mock = get_telegram_testobject(mocker, default_conf)
context = MagicMock()
context.args = []
await telegram._reload_trade_from_exchange(update=update, context=context)
assert "Trade-id not set." in msg_mock.call_args_list[0][0][0]
msg_mock.reset_mock()
create_mock_trades(fee, is_short=is_short)
context.args = [5]
await telegram._reload_trade_from_exchange(update=update, context=context)
assert "Status: `Reloaded from orders from exchange`" in msg_mock.call_args_list[0][0][0]
@pytest.mark.parametrize('is_short', [True, False]) @pytest.mark.parametrize('is_short', [True, False])
async def test_telegram_delete_open_order(mocker, update, default_conf, fee, is_short, ticker): async def test_telegram_delete_open_order(mocker, update, default_conf, fee, is_short, ticker):

View File

@ -1,4 +1,4 @@
from datetime import datetime from datetime import datetime, timezone
import pytest import pytest
from pandas import DataFrame from pandas import DataFrame
@ -43,12 +43,12 @@ def test_strategy_test_v3(dataframe_1m, fee, is_short, side):
assert strategy.confirm_trade_entry(pair='ETH/BTC', order_type='limit', amount=0.1, assert strategy.confirm_trade_entry(pair='ETH/BTC', order_type='limit', amount=0.1,
rate=20000, time_in_force='gtc', rate=20000, time_in_force='gtc',
current_time=datetime.utcnow(), current_time=datetime.now(timezone.utc),
side=side, entry_tag=None) is True side=side, entry_tag=None) is True
assert strategy.confirm_trade_exit(pair='ETH/BTC', trade=trade, order_type='limit', amount=0.1, assert strategy.confirm_trade_exit(pair='ETH/BTC', trade=trade, order_type='limit', amount=0.1,
rate=20000, time_in_force='gtc', exit_reason='roi', rate=20000, time_in_force='gtc', exit_reason='roi',
sell_reason='roi', sell_reason='roi',
current_time=datetime.utcnow(), current_time=datetime.now(timezone.utc),
side=side) is True side=side) is True
assert strategy.custom_stoploss(pair='ETH/BTC', trade=trade, current_time=datetime.now(), assert strategy.custom_stoploss(pair='ETH/BTC', trade=trade, current_time=datetime.now(),

View File

@ -1271,7 +1271,7 @@ def test_pairlist_resolving_with_config_pl_not_exists(mocker, default_conf):
configuration.get_config() configuration.get_config()
def test_pairlist_resolving_fallback(mocker): def test_pairlist_resolving_fallback(mocker, tmpdir):
mocker.patch.object(Path, "exists", MagicMock(return_value=True)) mocker.patch.object(Path, "exists", MagicMock(return_value=True))
mocker.patch.object(Path, "open", MagicMock(return_value=MagicMock())) mocker.patch.object(Path, "open", MagicMock(return_value=MagicMock()))
mocker.patch("freqtrade.configuration.configuration.load_file", mocker.patch("freqtrade.configuration.configuration.load_file",
@ -1290,7 +1290,7 @@ def test_pairlist_resolving_fallback(mocker):
assert config['pairs'] == ['ETH/BTC', 'XRP/BTC'] assert config['pairs'] == ['ETH/BTC', 'XRP/BTC']
assert config['exchange']['name'] == 'binance' assert config['exchange']['name'] == 'binance'
assert config['datadir'] == Path.cwd() / "user_data/data/binance" assert config['datadir'] == Path(tmpdir) / "user_data/data/binance"
@pytest.mark.parametrize("setting", [ @pytest.mark.parametrize("setting", [

View File

@ -121,7 +121,7 @@ def test_order_dict(default_conf_usdt, mocker, runmode, caplog) -> None:
freqtrade = FreqtradeBot(conf) freqtrade = FreqtradeBot(conf)
if runmode == RunMode.LIVE: if runmode == RunMode.LIVE:
assert not log_has_re(".*stoploss_on_exchange .* dry-run", caplog) assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
assert freqtrade.strategy.order_types['stoploss_on_exchange'] assert freqtrade.strategy.order_types['stoploss_on_exchange']
caplog.clear() caplog.clear()
@ -136,7 +136,7 @@ def test_order_dict(default_conf_usdt, mocker, runmode, caplog) -> None:
} }
freqtrade = FreqtradeBot(conf) freqtrade = FreqtradeBot(conf)
assert not freqtrade.strategy.order_types['stoploss_on_exchange'] assert not freqtrade.strategy.order_types['stoploss_on_exchange']
assert not log_has_re(".*stoploss_on_exchange .* dry-run", caplog) assert not log_has_re(r".*stoploss_on_exchange .* dry-run", caplog)
def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None: def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None:
@ -149,6 +149,34 @@ def test_get_trade_stake_amount(default_conf_usdt, mocker) -> None:
assert result == default_conf_usdt['stake_amount'] assert result == default_conf_usdt['stake_amount']
@pytest.mark.parametrize('runmode', [
RunMode.DRY_RUN,
RunMode.LIVE
])
def test_load_strategy_no_keys(default_conf_usdt, mocker, runmode, caplog) -> None:
patch_RPCManager(mocker)
patch_exchange(mocker)
conf = deepcopy(default_conf_usdt)
conf['runmode'] = runmode
erm = mocker.patch('freqtrade.freqtradebot.ExchangeResolver.load_exchange')
freqtrade = FreqtradeBot(conf)
strategy_config = freqtrade.strategy.config
assert id(strategy_config['exchange']) == id(conf['exchange'])
# Keys have been removed and are not passed to the exchange
assert strategy_config['exchange']['key'] == ''
assert strategy_config['exchange']['secret'] == ''
assert erm.call_count == 1
ex_conf = erm.call_args_list[0][1]['exchange_config']
assert id(ex_conf) != id(conf['exchange'])
# Keys are still present
assert ex_conf['key'] != ''
assert ex_conf['key'] == default_conf_usdt['exchange']['key']
assert ex_conf['secret'] != ''
assert ex_conf['secret'] == default_conf_usdt['exchange']['secret']
@pytest.mark.parametrize("amend_last,wallet,max_open,lsamr,expected", [ @pytest.mark.parametrize("amend_last,wallet,max_open,lsamr,expected", [
(False, 120, 2, 0.5, [60, None]), (False, 120, 2, 0.5, [60, None]),
(True, 120, 2, 0.5, [60, 58.8]), (True, 120, 2, 0.5, [60, 58.8]),
@ -5552,6 +5580,51 @@ def test_handle_insufficient_funds(mocker, default_conf_usdt, fee, is_short, cap
assert log_has(f"Error updating {order['id']}.", caplog) assert log_has(f"Error updating {order['id']}.", caplog)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize("is_short", [False, True])
def test_handle_onexchange_order(mocker, default_conf_usdt, limit_order, is_short, caplog):
freqtrade = get_patched_freqtradebot(mocker, default_conf_usdt)
mock_uts = mocker.spy(freqtrade, 'update_trade_state')
entry_order = limit_order[entry_side(is_short)]
exit_order = limit_order[exit_side(is_short)]
mock_fo = mocker.patch(f'{EXMS}.fetch_orders', return_value=[
entry_order,
exit_order,
])
order_id = entry_order['id']
trade = Trade(
open_order_id=order_id,
pair='ETH/USDT',
fee_open=0.001,
fee_close=0.001,
open_rate=entry_order['price'],
open_date=arrow.utcnow().datetime,
stake_amount=entry_order['cost'],
amount=entry_order['amount'],
exchange="binance",
is_short=is_short,
leverage=1,
)
trade.orders.append(Order.parse_from_ccxt_object(
entry_order, 'ADA/USDT', entry_side(is_short))
)
Trade.session.add(trade)
freqtrade.handle_onexchange_order(trade)
assert log_has_re(r"Found previously unknown order .*", caplog)
assert mock_uts.call_count == 1
assert mock_fo.call_count == 1
trade = Trade.session.scalars(select(Trade)).first()
assert len(trade.orders) == 2
assert trade.is_open is False
assert trade.exit_reason == ExitType.SOLD_ON_EXCHANGE.value
def test_get_valid_price(mocker, default_conf_usdt) -> None: def test_get_valid_price(mocker, default_conf_usdt) -> None:
patch_RPCManager(mocker) patch_RPCManager(mocker)
patch_exchange(mocker) patch_exchange(mocker)

View File

@ -75,8 +75,9 @@ def test_may_execute_exit_stoploss_on_exchange_multi(default_conf, ticker, fee,
_notify_exit=MagicMock(), _notify_exit=MagicMock(),
) )
mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock) mocker.patch("freqtrade.strategy.interface.IStrategy.should_exit", should_sell_mock)
wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update", MagicMock()) wallets_mock = mocker.patch("freqtrade.wallets.Wallets.update")
mocker.patch("freqtrade.wallets.Wallets.get_free", MagicMock(return_value=1000)) mocker.patch("freqtrade.wallets.Wallets.get_free", return_value=1000)
mocker.patch("freqtrade.wallets.Wallets.check_exit_amount", return_value=True)
freqtrade = get_patched_freqtradebot(mocker, default_conf) freqtrade = get_patched_freqtradebot(mocker, default_conf)
freqtrade.strategy.order_types['stoploss_on_exchange'] = True freqtrade.strategy.order_types['stoploss_on_exchange'] = True

View File

@ -1,5 +1,4 @@
from copy import deepcopy from copy import deepcopy
from pathlib import Path
from unittest.mock import MagicMock from unittest.mock import MagicMock
import pandas as pd import pandas as pd
@ -282,13 +281,13 @@ def test_generate_Plot_filename():
assert fn == "freqtrade-plot-UNITTEST_BTC-5m.html" assert fn == "freqtrade-plot-UNITTEST_BTC-5m.html"
def test_generate_plot_file(mocker, caplog): def test_generate_plot_file(mocker, caplog, user_dir):
fig = generate_empty_figure() fig = generate_empty_figure()
plot_mock = mocker.patch("freqtrade.plot.plotting.plot", MagicMock()) plot_mock = mocker.patch("freqtrade.plot.plotting.plot", MagicMock())
store_plot_file(fig, filename="freqtrade-plot-UNITTEST_BTC-5m.html", store_plot_file(fig, filename="freqtrade-plot-UNITTEST_BTC-5m.html",
directory=Path("user_data/plot")) directory=user_dir / "plot")
expected_fn = str(Path("user_data/plot/freqtrade-plot-UNITTEST_BTC-5m.html")) expected_fn = str(user_dir / "plot/freqtrade-plot-UNITTEST_BTC-5m.html")
assert plot_mock.call_count == 1 assert plot_mock.call_count == 1
assert plot_mock.call_args[0][0] == fig assert plot_mock.call_args[0][0] == fig
assert (plot_mock.call_args_list[0][1]['filename'] assert (plot_mock.call_args_list[0][1]['filename']

Some files were not shown because too many files have changed in this diff Show More