mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-14 04:03:55 +00:00
commit
43f1537383
48
.github/workflows/ci.yml
vendored
48
.github/workflows/ci.yml
vendored
|
@ -14,7 +14,7 @@ on:
|
||||||
- cron: '0 5 * * 4'
|
- cron: '0 5 * * 4'
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
group: "${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }}"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
permissions:
|
permissions:
|
||||||
repository-projects: read
|
repository-projects: read
|
||||||
|
@ -57,7 +57,7 @@ jobs:
|
||||||
- name: Installation - *nix
|
- name: Installation - *nix
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
|
@ -77,6 +77,17 @@ jobs:
|
||||||
# Allow failure for coveralls
|
# Allow failure for coveralls
|
||||||
coveralls || true
|
coveralls || true
|
||||||
|
|
||||||
|
- name: Check for repository changes
|
||||||
|
run: |
|
||||||
|
if [ -n "$(git status --porcelain)" ]; then
|
||||||
|
echo "Repository is dirty, changes detected:"
|
||||||
|
git status
|
||||||
|
git diff
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Repository is clean, no changes detected."
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Backtesting (multi)
|
- name: Backtesting (multi)
|
||||||
run: |
|
run: |
|
||||||
cp config_examples/config_bittrex.example.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
|
@ -163,7 +174,7 @@ jobs:
|
||||||
rm /usr/local/bin/python3.11-config || true
|
rm /usr/local/bin/python3.11-config || true
|
||||||
|
|
||||||
brew install hdf5 c-blosc
|
brew install hdf5 c-blosc
|
||||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
|
@ -174,6 +185,17 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order
|
||||||
|
|
||||||
|
- name: Check for repository changes
|
||||||
|
run: |
|
||||||
|
if [ -n "$(git status --porcelain)" ]; then
|
||||||
|
echo "Repository is dirty, changes detected:"
|
||||||
|
git status
|
||||||
|
git diff
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
echo "Repository is clean, no changes detected."
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Backtesting
|
- name: Backtesting
|
||||||
run: |
|
run: |
|
||||||
cp config_examples/config_bittrex.example.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
|
@ -237,6 +259,18 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
pytest --random-order
|
pytest --random-order
|
||||||
|
|
||||||
|
- name: Check for repository changes
|
||||||
|
run: |
|
||||||
|
if (git status --porcelain) {
|
||||||
|
Write-Host "Repository is dirty, changes detected:"
|
||||||
|
git status
|
||||||
|
git diff
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Write-Host "Repository is clean, no changes detected."
|
||||||
|
}
|
||||||
|
|
||||||
- name: Backtesting
|
- name: Backtesting
|
||||||
run: |
|
run: |
|
||||||
cp config_examples/config_bittrex.example.json config.json
|
cp config_examples/config_bittrex.example.json config.json
|
||||||
|
@ -302,7 +336,7 @@ jobs:
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: "3.10"
|
python-version: "3.11"
|
||||||
|
|
||||||
- name: Documentation build
|
- name: Documentation build
|
||||||
run: |
|
run: |
|
||||||
|
@ -352,7 +386,7 @@ jobs:
|
||||||
- name: Installation - *nix
|
- name: Installation - *nix
|
||||||
if: runner.os == 'Linux'
|
if: runner.os == 'Linux'
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
python -m pip install --upgrade pip wheel
|
||||||
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
export LD_LIBRARY_PATH=${HOME}/dependencies/lib:$LD_LIBRARY_PATH
|
||||||
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
export TA_LIBRARY_PATH=${HOME}/dependencies/lib
|
||||||
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
export TA_INCLUDE_PATH=${HOME}/dependencies/include
|
||||||
|
@ -425,7 +459,7 @@ jobs:
|
||||||
python setup.py sdist bdist_wheel
|
python setup.py sdist bdist_wheel
|
||||||
|
|
||||||
- name: Publish to PyPI (Test)
|
- name: Publish to PyPI (Test)
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.5
|
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
|
@ -433,7 +467,7 @@ jobs:
|
||||||
repository_url: https://test.pypi.org/legacy/
|
repository_url: https://test.pypi.org/legacy/
|
||||||
|
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
uses: pypa/gh-action-pypi-publish@v1.8.5
|
uses: pypa/gh-action-pypi-publish@v1.8.6
|
||||||
if: (github.event_name == 'release')
|
if: (github.event_name == 'release')
|
||||||
with:
|
with:
|
||||||
user: __token__
|
user: __token__
|
||||||
|
|
|
@ -15,10 +15,10 @@ repos:
|
||||||
additional_dependencies:
|
additional_dependencies:
|
||||||
- types-cachetools==5.3.0.5
|
- types-cachetools==5.3.0.5
|
||||||
- types-filelock==3.2.7
|
- types-filelock==3.2.7
|
||||||
- types-requests==2.28.11.17
|
- types-requests==2.30.0.0
|
||||||
- types-tabulate==0.9.0.2
|
- types-tabulate==0.9.0.2
|
||||||
- types-python-dateutil==2.8.19.12
|
- types-python-dateutil==2.8.19.13
|
||||||
- SQLAlchemy==2.0.10
|
- SQLAlchemy==2.0.15
|
||||||
# stages: [push]
|
# stages: [push]
|
||||||
|
|
||||||
- repo: https://github.com/pycqa/isort
|
- repo: https://github.com/pycqa/isort
|
||||||
|
@ -30,7 +30,7 @@ repos:
|
||||||
|
|
||||||
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
||||||
# Ruff version.
|
# Ruff version.
|
||||||
rev: 'v0.0.255'
|
rev: 'v0.0.263'
|
||||||
hooks:
|
hooks:
|
||||||
- id: ruff
|
- id: ruff
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ FROM base as python-deps
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
|
&& apt-get -y install build-essential libssl-dev git libffi-dev libgfortran5 pkg-config cmake gcc \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& pip install --upgrade pip==23.0.1 wheel==0.38.4
|
&& pip install --upgrade pip wheel
|
||||||
|
|
||||||
# Install TA-lib
|
# Install TA-lib
|
||||||
COPY build_helpers/* /tmp/
|
COPY build_helpers/* /tmp/
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
# Downloads don't work automatically, since the URL is regenerated via javascript.
|
||||||
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
# Downloaded from https://www.lfd.uci.edu/~gohlke/pythonlibs/#ta-lib
|
||||||
|
|
||||||
python -m pip install --upgrade pip==23.0.1 wheel==0.38.4
|
python -m pip install --upgrade pip wheel
|
||||||
|
|
||||||
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
$pyv = python -c "import sys; print(f'{sys.version_info.major}.{sys.version_info.minor}')"
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -6,6 +6,15 @@ services:
|
||||||
# image: freqtradeorg/freqtrade:develop
|
# image: freqtradeorg/freqtrade:develop
|
||||||
# Use plotting image
|
# Use plotting image
|
||||||
# image: freqtradeorg/freqtrade:develop_plot
|
# image: freqtradeorg/freqtrade:develop_plot
|
||||||
|
# # Enable GPU Image and GPU Resources (only relevant for freqAI)
|
||||||
|
# # Make sure to uncomment the whole deploy section
|
||||||
|
# deploy:
|
||||||
|
# resources:
|
||||||
|
# reservations:
|
||||||
|
# devices:
|
||||||
|
# - driver: nvidia
|
||||||
|
# count: 1
|
||||||
|
# capabilities: [gpu]
|
||||||
# Build step - only needed when additional dependencies are needed
|
# Build step - only needed when additional dependencies are needed
|
||||||
# build:
|
# build:
|
||||||
# context: .
|
# context: .
|
||||||
|
@ -16,7 +25,7 @@ services:
|
||||||
- "./user_data:/freqtrade/user_data"
|
- "./user_data:/freqtrade/user_data"
|
||||||
# Expose api on port 8080 (localhost only)
|
# Expose api on port 8080 (localhost only)
|
||||||
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
||||||
# before enabling this.
|
# for more information.
|
||||||
ports:
|
ports:
|
||||||
- "127.0.0.1:8080:8080"
|
- "127.0.0.1:8080:8080"
|
||||||
# Default command used when running `docker compose up`
|
# Default command used when running `docker compose up`
|
||||||
|
|
36
docker/docker-compose-freqai.yml
Normal file
36
docker/docker-compose-freqai.yml
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
---
|
||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
freqtrade:
|
||||||
|
image: freqtradeorg/freqtrade:stable_freqaitorch
|
||||||
|
# # Enable GPU Image and GPU Resources
|
||||||
|
# # Make sure to uncomment the whole deploy section
|
||||||
|
# deploy:
|
||||||
|
# resources:
|
||||||
|
# reservations:
|
||||||
|
# devices:
|
||||||
|
# - driver: nvidia
|
||||||
|
# count: 1
|
||||||
|
# capabilities: [gpu]
|
||||||
|
|
||||||
|
# Build step - only needed when additional dependencies are needed
|
||||||
|
# build:
|
||||||
|
# context: .
|
||||||
|
# dockerfile: "./docker/Dockerfile.custom"
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: freqtrade
|
||||||
|
volumes:
|
||||||
|
- "./user_data:/freqtrade/user_data"
|
||||||
|
# Expose api on port 8080 (localhost only)
|
||||||
|
# Please read the https://www.freqtrade.io/en/stable/rest-api/ documentation
|
||||||
|
# for more information.
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8080:8080"
|
||||||
|
# Default command used when running `docker compose up`
|
||||||
|
command: >
|
||||||
|
trade
|
||||||
|
--logfile /freqtrade/user_data/logs/freqtrade.log
|
||||||
|
--db-url sqlite:////freqtrade/user_data/tradesv3.sqlite
|
||||||
|
--config /freqtrade/user_data/config.json
|
||||||
|
--freqai-model XGBoostClassifier
|
||||||
|
--strategy SampleStrategy
|
|
@ -29,7 +29,7 @@ If all goes well, you should now see a `backtest-result-{timestamp}_signals.pkl`
|
||||||
`user_data/backtest_results` folder.
|
`user_data/backtest_results` folder.
|
||||||
|
|
||||||
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
To analyze the entry/exit tags, we now need to use the `freqtrade backtesting-analysis` command
|
||||||
with `--analysis-groups` option provided with space-separated arguments (default `0 1 2`):
|
with `--analysis-groups` option provided with space-separated arguments:
|
||||||
|
|
||||||
``` bash
|
``` bash
|
||||||
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
|
freqtrade backtesting-analysis -c <config.json> --analysis-groups 0 1 2 3 4 5
|
||||||
|
@ -39,6 +39,7 @@ This command will read from the last backtesting results. The `--analysis-groups
|
||||||
used to specify the various tabular outputs showing the profit fo each group or trade,
|
used to specify the various tabular outputs showing the profit fo each group or trade,
|
||||||
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
ranging from the simplest (0) to the most detailed per pair, per buy and per sell tag (4):
|
||||||
|
|
||||||
|
* 0: overall winrate and profit summary by enter_tag
|
||||||
* 1: profit summaries grouped by enter_tag
|
* 1: profit summaries grouped by enter_tag
|
||||||
* 2: profit summaries grouped by enter_tag and exit_tag
|
* 2: profit summaries grouped by enter_tag and exit_tag
|
||||||
* 3: profit summaries grouped by pair and enter_tag
|
* 3: profit summaries grouped by pair and enter_tag
|
||||||
|
@ -115,3 +116,38 @@ For example, if your backtest timerange was `20220101-20221231` but you only wan
|
||||||
```bash
|
```bash
|
||||||
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
|
freqtrade backtesting-analysis -c <config.json> --timerange 20220101-20220201
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Printing out rejected signals
|
||||||
|
|
||||||
|
Use the `--rejected-signals` option to print out rejected signals.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade backtesting-analysis -c <config.json> --rejected-signals
|
||||||
|
```
|
||||||
|
|
||||||
|
### Writing tables to CSV
|
||||||
|
|
||||||
|
Some of the tabular outputs can become large, so printing them out to the terminal is not preferable.
|
||||||
|
Use the `--analysis-to-csv` option to disable printing out of tables to standard out and write them to CSV files.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv
|
||||||
|
```
|
||||||
|
|
||||||
|
By default this will write one file per output table you specified in the `backtesting-analysis` command, e.g.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --rejected-signals --analysis-groups 0 1
|
||||||
|
```
|
||||||
|
|
||||||
|
This will write to `user_data/backtest_results`:
|
||||||
|
|
||||||
|
* rejected_signals.csv
|
||||||
|
* group_0.csv
|
||||||
|
* group_1.csv
|
||||||
|
|
||||||
|
To override where the files will be written, also specify the `--analysis-csv-path` option.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
freqtrade backtesting-analysis -c <config.json> --analysis-to-csv --analysis-csv-path another/data/path/
|
||||||
|
```
|
||||||
|
|
|
@ -327,18 +327,18 @@ To check how the new exchange behaves, you can use the following snippet:
|
||||||
|
|
||||||
``` python
|
``` python
|
||||||
import ccxt
|
import ccxt
|
||||||
from datetime import datetime
|
from datetime import datetime, timezone
|
||||||
from freqtrade.data.converter import ohlcv_to_dataframe
|
from freqtrade.data.converter import ohlcv_to_dataframe
|
||||||
ct = ccxt.binance()
|
ct = ccxt.binance() # Use the exchange you're testing
|
||||||
timeframe = "1d"
|
timeframe = "1d"
|
||||||
pair = "XLM/BTC" # Make sure to use a pair that exists on that exchange!
|
pair = "BTC/USDT" # Make sure to use a pair that exists on that exchange!
|
||||||
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
|
raw = ct.fetch_ohlcv(pair, timeframe=timeframe)
|
||||||
|
|
||||||
# convert to dataframe
|
# convert to dataframe
|
||||||
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
|
df1 = ohlcv_to_dataframe(raw, timeframe, pair=pair, drop_incomplete=False)
|
||||||
|
|
||||||
print(df1.tail(1))
|
print(df1.tail(1))
|
||||||
print(datetime.utcnow())
|
print(datetime.now(timezone.utc))
|
||||||
```
|
```
|
||||||
|
|
||||||
``` output
|
``` output
|
||||||
|
|
|
@ -248,9 +248,11 @@ The easiest way to quickly run a pytorch model is with the following command (fo
|
||||||
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
|
freqtrade trade --config config_examples/config_freqai.example.json --strategy FreqaiExampleStrategy --freqaimodel PyTorchMLPRegressor --strategy-path freqtrade/templates
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note "Installation/docker"
|
!!! Note "Installation/docker"
|
||||||
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
|
The PyTorch module requires large packages such as `torch`, which should be explicitly requested during `./setup.sh -i` by answering "y" to the question "Do you also want dependencies for freqai-rl or PyTorch (~700mb additional space required) [y/N]?".
|
||||||
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
|
Users who prefer docker should ensure they use the docker image appended with `_freqaitorch`.
|
||||||
|
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file.
|
||||||
|
This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
||||||
|
|
||||||
### Structure
|
### Structure
|
||||||
|
|
||||||
|
@ -395,3 +397,21 @@ Here we create a `PyTorchMLPRegressor` class that implements the `fit` method. T
|
||||||
return dataframe
|
return dataframe
|
||||||
```
|
```
|
||||||
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
|
To see a full example, you can refer to the [classifier test strategy class](https://github.com/freqtrade/freqtrade/blob/develop/tests/strategy/strats/freqai_test_classifier.py).
|
||||||
|
|
||||||
|
|
||||||
|
#### Improving performance with `torch.compile()`
|
||||||
|
|
||||||
|
Torch provides a `torch.compile()` method that can be used to improve performance for specific GPU hardware. More details can be found [here](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html). In brief, you simply wrap your `model` in `torch.compile()`:
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
model = PyTorchMLPModel(
|
||||||
|
input_dim=n_features,
|
||||||
|
output_dim=1,
|
||||||
|
**self.model_kwargs
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
model = torch.compile(model)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then proceed to use the model as normal. Keep in mind that doing this will remove eager execution, which means errors and tracebacks will not be informative.
|
||||||
|
|
|
@ -18,9 +18,10 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
||||||
| `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`.
|
| `purge_old_models` | Number of models to keep on disk (not relevant to backtesting). Default is 2, which means that dry/live runs will keep the latest 2 models on disk. Setting to 0 keeps all models. This parameter also accepts a boolean to maintain backwards compatibility. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||||
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
| `save_backtest_models` | Save models to disk when running backtesting. Backtesting operates most efficiently by saving the prediction data and reusing them directly for subsequent runs (when you wish to tune entry/exit parameters). Saving backtesting models to disk also allows to use the same model files for starting a dry/live instance with the same model `identifier`. <br> **Datatype:** Boolean. <br> Default: `False` (no models are saved).
|
||||||
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
| `fit_live_predictions_candles` | Number of historical candles to use for computing target (label) statistics from prediction data, instead of from the training dataset (more information can be found [here](freqai-configuration.md#creating-a-dynamic-target-threshold)). <br> **Datatype:** Positive integer.
|
||||||
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `continual_learning` | Use the final state of the most recently trained model as starting point for the new model, allowing for incremental learning (more information can be found [here](freqai-running.md#continual-learning)). Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the connections here primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
| `write_metrics_to_disk` | Collect train timings, inference timings and cpu usage in json file. <br> **Datatype:** Boolean. <br> Default: `False`
|
||||||
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
|
| `data_kitchen_thread_count` | <br> Designate the number of threads you want to use for data processing (outlier methods, normalization, etc.). This has no impact on the number of threads used for training. If user does not set it (default), FreqAI will use max number of threads - 2 (leaving 1 physical core available for Freqtrade bot and FreqUI) <br> **Datatype:** Positive integer.
|
||||||
|
| `activate_tensorboard` | <br> Indicate whether or not to activate tensorboard for the tensorboard enabled modules (currently Reinforcment Learning, XGBoost, Catboost, and PyTorch). Tensorboard needs Torch installed, which means you will need the torch/RL docker image or you need to answer "yes" to the install question about whether or not you wish to install Torch. <br> **Datatype:** Boolean. <br> Default: `True`.
|
||||||
|
|
||||||
### Feature parameters
|
### Feature parameters
|
||||||
|
|
||||||
|
@ -114,5 +115,5 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
|
||||||
|------------|-------------|
|
|------------|-------------|
|
||||||
| | **Extraneous parameters**
|
| | **Extraneous parameters**
|
||||||
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `freqai.keras` | If the selected model makes use of Keras (typical for TensorFlow-based prediction models), this flag needs to be activated so that the model save/loading follows Keras standards. <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
| `freqai.conv_width` | The width of a convolutional neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
| `freqai.conv_width` | The width of a neural network input tensor. This replaces the need for shifting candles (`include_shifted_candles`) by feeding in historical data points as the second dimension of the tensor. Technically, this parameter can also be used for regressors, but it only adds computational overhead and does not change the model training/prediction. <br> **Datatype:** Integer. <br> Default: `2`.
|
||||||
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
| `freqai.reduce_df_footprint` | Recast all numeric columns to float32/int32, with the objective of reducing ram/disk usage and decreasing train/inference timing. This parameter is set in the main level of the Freqtrade configuration file (not inside FreqAI). <br> **Datatype:** Boolean. <br> Default: `False`.
|
||||||
|
|
|
@ -135,92 +135,104 @@ Parameter details can be found [here](freqai-parameter-table.md), but in general
|
||||||
|
|
||||||
## Creating a custom reward function
|
## Creating a custom reward function
|
||||||
|
|
||||||
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but users are encouraged to create their own custom reinforcement learning model class (see below) and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
!!! danger "Not for production"
|
||||||
|
Warning!
|
||||||
|
The reward function provided with the Freqtrade source code is a showcase of functionality designed to show/test as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is *not* for live production. Please beware that you will need to create your own custom_reward() function or use a template built by other users outside of the Freqtrade source code.
|
||||||
|
|
||||||
|
As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the `calculate_reward()` function inside the `MyRLEnv` class (see below). A default `calculate_reward()` is provided inside `prediction_models/ReinforcementLearner.py` to demonstrate the necessary building blocks for creating rewards, but this is *not* designed for production. Users *must* create their own custom reinforcement learning model class or use a pre-built one from outside the Freqtrade source code and save it to `user_data/freqaimodels`. It is inside the `calculate_reward()` where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:
|
||||||
|
|
||||||
|
!!! note "Hint"
|
||||||
|
The best reward functions are ones that are continuously differentiable, and well scaled. In other words, adding a single large negative penalty to a rare event is not a good idea, and the neural net will not be able to learn that function. Instead, it is better to add a small negative penalty to a common event. This will help the agent learn faster. Not only this, but you can help improve the continuity of your rewards/penalties by having them scale with severity according to some linear/exponential functions. In other words, you'd slowly scale the penalty as the duration of the trade increases. This is better than a single large penalty occuring at a single point in time.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||||
|
|
||||||
|
|
||||||
class MyCoolRLModel(ReinforcementLearner):
|
class MyCoolRLModel(ReinforcementLearner):
|
||||||
|
"""
|
||||||
|
User created RL prediction model.
|
||||||
|
|
||||||
|
Save this file to `freqtrade/user_data/freqaimodels`
|
||||||
|
|
||||||
|
then use it with:
|
||||||
|
|
||||||
|
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
|
||||||
|
|
||||||
|
Here the users can override any of the functions
|
||||||
|
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
|
||||||
|
is where the user overrides `MyRLEnv` (see below), to define custom
|
||||||
|
`calculate_reward()` function, or to override any other parts of the environment.
|
||||||
|
|
||||||
|
This class also allows users to override any other part of the IFreqaiModel tree.
|
||||||
|
For example, the user can override `def fit()` or `def train()` or `def predict()`
|
||||||
|
to take fine-tuned control over these processes.
|
||||||
|
|
||||||
|
Another common override may be `def data_cleaning_predict()` where the user can
|
||||||
|
take fine-tuned control over the data handling pipeline.
|
||||||
|
"""
|
||||||
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
"""
|
"""
|
||||||
User created RL prediction model.
|
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||||
|
Users can override any functions from those parent classes. Here is an example
|
||||||
|
of a user customized `calculate_reward()` function.
|
||||||
|
|
||||||
Save this file to `freqtrade/user_data/freqaimodels`
|
Warning!
|
||||||
|
This is function is a showcase of functionality designed to show as many possible
|
||||||
then use it with:
|
environment control features as possible. It is also designed to run quickly
|
||||||
|
on small computers. This is a benchmark, it is *not* for live production.
|
||||||
freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat
|
|
||||||
|
|
||||||
Here the users can override any of the functions
|
|
||||||
available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this
|
|
||||||
is where the user overrides `MyRLEnv` (see below), to define custom
|
|
||||||
`calculate_reward()` function, or to override any other parts of the environment.
|
|
||||||
|
|
||||||
This class also allows users to override any other part of the IFreqaiModel tree.
|
|
||||||
For example, the user can override `def fit()` or `def train()` or `def predict()`
|
|
||||||
to take fine-tuned control over these processes.
|
|
||||||
|
|
||||||
Another common override may be `def data_cleaning_predict()` where the user can
|
|
||||||
take fine-tuned control over the data handling pipeline.
|
|
||||||
"""
|
"""
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
def calculate_reward(self, action: int) -> float:
|
||||||
"""
|
# first, penalize if the action is not valid
|
||||||
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
if not self._is_valid(action):
|
||||||
Users can override any functions from those parent classes. Here is an example
|
return -2
|
||||||
of a user customized `calculate_reward()` function.
|
pnl = self.get_unrealized_profit()
|
||||||
"""
|
|
||||||
def calculate_reward(self, action: int) -> float:
|
|
||||||
# first, penalize if the action is not valid
|
|
||||||
if not self._is_valid(action):
|
|
||||||
return -2
|
|
||||||
pnl = self.get_unrealized_profit()
|
|
||||||
|
|
||||||
factor = 100
|
factor = 100
|
||||||
|
|
||||||
pair = self.pair.replace(':', '')
|
pair = self.pair.replace(':', '')
|
||||||
|
|
||||||
# you can use feature values from dataframe
|
# you can use feature values from dataframe
|
||||||
# Assumes the shifted RSI indicator has been generated in the strategy.
|
# Assumes the shifted RSI indicator has been generated in the strategy.
|
||||||
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
|
rsi_now = self.raw_features[f"%-rsi-period_10_shift-1_{pair}_"
|
||||||
f"{self.config['timeframe']}"].iloc[self._current_tick]
|
f"{self.config['timeframe']}"].iloc[self._current_tick]
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
if (action in (Actions.Long_enter.value, Actions.Short_enter.value)
|
||||||
and self._position == Positions.Neutral):
|
and self._position == Positions.Neutral):
|
||||||
if rsi_now < 40:
|
if rsi_now < 40:
|
||||||
factor = 40 / rsi_now
|
factor = 40 / rsi_now
|
||||||
else:
|
else:
|
||||||
factor = 1
|
factor = 1
|
||||||
return 25 * factor
|
return 25 * factor
|
||||||
|
|
||||||
# discourage agent from not entering trades
|
# discourage agent from not entering trades
|
||||||
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
if action == Actions.Neutral.value and self._position == Positions.Neutral:
|
||||||
return -1
|
return -1
|
||||||
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300)
|
||||||
trade_duration = self._current_tick - self._last_trade_tick
|
trade_duration = self._current_tick - self._last_trade_tick
|
||||||
if trade_duration <= max_trade_duration:
|
if trade_duration <= max_trade_duration:
|
||||||
factor *= 1.5
|
factor *= 1.5
|
||||||
elif trade_duration > max_trade_duration:
|
elif trade_duration > max_trade_duration:
|
||||||
factor *= 0.5
|
factor *= 0.5
|
||||||
# discourage sitting in position
|
# discourage sitting in position
|
||||||
if self._position in (Positions.Short, Positions.Long) and \
|
if self._position in (Positions.Short, Positions.Long) and \
|
||||||
action == Actions.Neutral.value:
|
action == Actions.Neutral.value:
|
||||||
return -1 * trade_duration / max_trade_duration
|
return -1 * trade_duration / max_trade_duration
|
||||||
# close long
|
# close long
|
||||||
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
if action == Actions.Long_exit.value and self._position == Positions.Long:
|
||||||
if pnl > self.profit_aim * self.rr:
|
if pnl > self.profit_aim * self.rr:
|
||||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
return float(pnl * factor)
|
return float(pnl * factor)
|
||||||
# close short
|
# close short
|
||||||
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
if action == Actions.Short_exit.value and self._position == Positions.Short:
|
||||||
if pnl > self.profit_aim * self.rr:
|
if pnl > self.profit_aim * self.rr:
|
||||||
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2)
|
||||||
return float(pnl * factor)
|
return float(pnl * factor)
|
||||||
return 0.
|
return 0.
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Tensorboard
|
## Using Tensorboard
|
||||||
|
|
||||||
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
|
Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
|
||||||
|
|
||||||
|
@ -233,32 +245,30 @@ where `unique-id` is the `identifier` set in the `freqai` configuration file. Th
|
||||||
|
|
||||||
![tensorboard](assets/tensorboard.jpg)
|
![tensorboard](assets/tensorboard.jpg)
|
||||||
|
|
||||||
|
## Custom logging
|
||||||
### Custom logging
|
|
||||||
|
|
||||||
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
|
FreqAI also provides a built in episodic summary logger called `self.tensorboard_log` for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.
|
||||||
|
|
||||||
|
|
||||||
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
|
`self.tensorboard_log` can also be used anywhere inside the environment, for example, it can be added to the `calculate_reward` function to collect more detailed information about how often various parts of the reward were called:
|
||||||
|
|
||||||
```py
|
```python
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
class MyRLEnv(Base5ActionRLEnv):
|
||||||
"""
|
"""
|
||||||
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
User made custom environment. This class inherits from BaseEnvironment and gym.env.
|
||||||
Users can override any functions from those parent classes. Here is an example
|
Users can override any functions from those parent classes. Here is an example
|
||||||
of a user customized `calculate_reward()` function.
|
of a user customized `calculate_reward()` function.
|
||||||
"""
|
"""
|
||||||
def calculate_reward(self, action: int) -> float:
|
def calculate_reward(self, action: int) -> float:
|
||||||
if not self._is_valid(action):
|
if not self._is_valid(action):
|
||||||
self.tensorboard_log("invalid")
|
self.tensorboard_log("invalid")
|
||||||
return -2
|
return -2
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
|
The `self.tensorboard_log()` function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. `self.tensorboard_log("float_metric1", 0.23)`. In this case the metric values are not incremented.
|
||||||
|
|
||||||
### Choosing a base environment
|
## Choosing a base environment
|
||||||
|
|
||||||
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:
|
FreqAI provides three base environments, `Base3ActionRLEnvironment`, `Base4ActionEnvironment` and `Base5ActionEnvironment`. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The `Base3ActionEnvironment` is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the `can_short` flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the `Base4ActionEnvironment`, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the `Base5ActionEnvironment`, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:
|
||||||
|
|
||||||
|
|
|
@ -131,6 +131,9 @@ You can choose to adopt a continual learning scheme by setting `"continual_learn
|
||||||
???+ danger "Continual learning enforces a constant parameter space"
|
???+ danger "Continual learning enforces a constant parameter space"
|
||||||
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
|
Since `continual_learning` means that the model parameter space *cannot* change between trainings, `principal_component_analysis` is automatically disabled when `continual_learning` is enabled. Hint: PCA changes the parameter space and the number of features, learn more about PCA [here](freqai-feature-engineering.md#data-dimensionality-reduction-with-principal-component-analysis).
|
||||||
|
|
||||||
|
???+ danger "Experimental functionality"
|
||||||
|
Beware that this is currently a naive approach to incremental learning, and it has a high probability of overfitting/getting stuck in local minima while the market moves away from your model. We have the mechanics available in FreqAI primarily for experimental purposes and so that it is ready for more mature approaches to continual learning in chaotic systems like the crypto market.
|
||||||
|
|
||||||
## Hyperopt
|
## Hyperopt
|
||||||
|
|
||||||
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
You can hyperopt using the same command as for [typical Freqtrade hyperopt](hyperopt.md):
|
||||||
|
@ -158,7 +161,14 @@ This specific hyperopt would help you understand the appropriate `DI_values` for
|
||||||
|
|
||||||
## Using Tensorboard
|
## Using Tensorboard
|
||||||
|
|
||||||
CatBoost models benefit from tracking training metrics via Tensorboard. You can take advantage of the FreqAI integration to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:
|
!!! note "Availability"
|
||||||
|
FreqAI includes tensorboard for a variety of models, including XGBoost, all PyTorch models, Reinforcement Learning, and Catboost. If you would like to see Tensorboard integrated into another model type, please open an issue on the [Freqtrade GitHub](https://github.com/freqtrade/freqtrade/issues)
|
||||||
|
|
||||||
|
!!! danger "Requirements"
|
||||||
|
Tensorboard logging requires the FreqAI torch installation/docker image.
|
||||||
|
|
||||||
|
|
||||||
|
The easiest way to use tensorboard is to ensure `freqai.activate_tensorboard` is set to `True` (default setting) in your configuration file, run FreqAI, then open a separate shell and run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cd freqtrade
|
cd freqtrade
|
||||||
|
@ -168,3 +178,7 @@ tensorboard --logdir user_data/models/unique-id
|
||||||
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
|
where `unique-id` is the `identifier` set in the `freqai` configuration file. This command must be run in a separate shell if you wish to view the output in your browser at 127.0.0.1:6060 (6060 is the default port used by Tensorboard).
|
||||||
|
|
||||||
![tensorboard](assets/tensorboard.jpg)
|
![tensorboard](assets/tensorboard.jpg)
|
||||||
|
|
||||||
|
|
||||||
|
!!! note "Deactivate for improved performance"
|
||||||
|
Tensorboard logging can slow down training and should be deactivated for production use.
|
||||||
|
|
|
@ -34,6 +34,9 @@ freqtrade trade --config config_examples/config_freqai.example.json --strategy F
|
||||||
|
|
||||||
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
|
You will see the boot-up process of automatic data downloading, followed by simultaneous training and trading.
|
||||||
|
|
||||||
|
!!! danger "Not for production"
|
||||||
|
The example strategy provided with the Freqtrade source code is designed for showcasing/testing a wide variety of FreqAI features. It is also designed to run on small computers so that it can be used as a benchmark between developers and users. It is *not* designed to be run in production.
|
||||||
|
|
||||||
An example strategy, prediction model, and config to use as a starting points can be found in
|
An example strategy, prediction model, and config to use as a starting points can be found in
|
||||||
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
|
`freqtrade/templates/FreqaiExampleStrategy.py`, `freqtrade/freqai/prediction_models/LightGBMRegressor.py`, and
|
||||||
`config_examples/config_freqai.example.json`, respectively.
|
`config_examples/config_freqai.example.json`, respectively.
|
||||||
|
@ -69,16 +72,15 @@ pip install -r requirements-freqai.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! Note
|
!!! Note
|
||||||
Catboost will not be installed on arm devices (raspberry, Mac M1, ARM based VPS, ...), since it does not provide wheels for this platform.
|
Catboost will not be installed on low-powered arm devices (raspberry), since it does not provide wheels for this platform.
|
||||||
|
|
||||||
!!! Note "python 3.11"
|
|
||||||
Some dependencies (Catboost, Torch) currently don't support python 3.11. Freqtrade therefore only supports python 3.10 for these models/dependencies.
|
|
||||||
Tests involving these dependencies are skipped on 3.11.
|
|
||||||
|
|
||||||
### Usage with docker
|
### Usage with docker
|
||||||
|
|
||||||
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
If you are using docker, a dedicated tag with FreqAI dependencies is available as `:freqai`. As such - you can replace the image line in your docker compose file with `image: freqtradeorg/freqtrade:develop_freqai`. This image contains the regular FreqAI dependencies. Similar to native installs, Catboost will not be available on ARM based devices.
|
||||||
|
|
||||||
|
!!! note "docker-compose-freqai.yml"
|
||||||
|
We do provide an explicit docker-compose file for this in `docker/docker-compose-freqai.yml` - which can be used via `docker compose -f docker/docker-compose-freqai.yml run ...` - or can be copied to replace the original docker file. This docker-compose file also contains a (disabled) section to enable GPU resources within docker containers. This obviously assumes the system has GPU resources available.
|
||||||
|
|
||||||
### FreqAI position in open-source machine learning landscape
|
### FreqAI position in open-source machine learning landscape
|
||||||
|
|
||||||
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
Forecasting chaotic time-series based systems, such as equity/cryptocurrency markets, requires a broad set of tools geared toward testing a wide range of hypotheses. Fortunately, a recent maturation of robust machine learning libraries (e.g. `scikit-learn`) has opened up a wide range of research possibilities. Scientists from a diverse range of fields can now easily prototype their studies on an abundance of established machine learning algorithms. Similarly, these user-friendly libraries enable "citzen scientists" to use their basic Python skills for data exploration. However, leveraging these machine learning libraries on historical and live chaotic data sources can be logistically difficult and expensive. Additionally, robust data collection, storage, and handling presents a disparate challenge. [`FreqAI`](#freqai) aims to provide a generalized and extensible open-sourced framework geared toward live deployments of adaptive modeling for market forecasting. The `FreqAI` framework is effectively a sandbox for the rich world of open-source machine learning libraries. Inside the `FreqAI` sandbox, users find they can combine a wide variety of third-party libraries to test creative hypotheses on a free live 24/7 chaotic data source - cryptocurrency exchange data.
|
||||||
|
|
|
@ -30,12 +30,6 @@ The easiest way to install and run Freqtrade is to clone the bot Github reposito
|
||||||
!!! Warning "Up-to-date clock"
|
!!! Warning "Up-to-date clock"
|
||||||
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
|
The clock on the system running the bot must be accurate, synchronized to a NTP server frequently enough to avoid problems with communication to the exchanges.
|
||||||
|
|
||||||
!!! Error "Running setup.py install for gym did not run successfully."
|
|
||||||
If you get an error related with gym we suggest you to downgrade setuptools it to version 65.5.0 you can do it with the following command:
|
|
||||||
```bash
|
|
||||||
pip install setuptools==65.5.0
|
|
||||||
```
|
|
||||||
|
|
||||||
------
|
------
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
@ -242,6 +236,7 @@ source .env/bin/activate
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python3 -m pip install --upgrade pip
|
python3 -m pip install --upgrade pip
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
python3 -m pip install -e .
|
python3 -m pip install -e .
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
markdown==3.3.7
|
markdown==3.3.7
|
||||||
mkdocs==1.4.2
|
mkdocs==1.4.3
|
||||||
mkdocs-material==9.1.7
|
mkdocs-material==9.1.14
|
||||||
mdx_truly_sane_lists==1.3
|
mdx_truly_sane_lists==1.3
|
||||||
pymdown-extensions==9.11
|
pymdown-extensions==10.0.1
|
||||||
jinja2==3.1.2
|
jinja2==3.1.2
|
||||||
|
|
|
@ -134,7 +134,9 @@ python3 scripts/rest_client.py --config rest_config.json <command> [optional par
|
||||||
| `reload_config` | Reloads the configuration file.
|
| `reload_config` | Reloads the configuration file.
|
||||||
| `trades` | List last trades. Limited to 500 trades per call.
|
| `trades` | List last trades. Limited to 500 trades per call.
|
||||||
| `trade/<tradeid>` | Get specific trade.
|
| `trade/<tradeid>` | Get specific trade.
|
||||||
| `delete_trade <trade_id>` | Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
|
| `trade/<tradeid>` | DELETE - Remove trade from the database. Tries to close open orders. Requires manual handling of this trade on the exchange.
|
||||||
|
| `trade/<tradeid>/open-order` | DELETE - Cancel open order for this trade.
|
||||||
|
| `trade/<tradeid>/reload` | GET - Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
|
||||||
| `show_config` | Shows part of the current configuration with relevant settings to operation.
|
| `show_config` | Shows part of the current configuration with relevant settings to operation.
|
||||||
| `logs` | Shows last log messages.
|
| `logs` | Shows last log messages.
|
||||||
| `status` | Lists all open trades.
|
| `status` | Lists all open trades.
|
||||||
|
|
|
@ -227,8 +227,8 @@ for val in self.buy_ema_short.range:
|
||||||
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
|
f'ema_short_{val}': ta.EMA(dataframe, timeperiod=val)
|
||||||
}))
|
}))
|
||||||
|
|
||||||
# Append columns to existing dataframe
|
# Combine all dataframes, and reassign the original dataframe column
|
||||||
merged_frame = pd.concat(frames, axis=1)
|
dataframe = pd.concat(frames, axis=1)
|
||||||
```
|
```
|
||||||
|
|
||||||
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.
|
Freqtrade does however also counter this by running `dataframe.copy()` on the dataframe right after the `populate_indicators()` method - so performance implications of this should be low to non-existant.
|
||||||
|
|
|
@ -187,11 +187,13 @@ official commands. You can ask at any moment for help with `/help`.
|
||||||
| `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True)
|
| `/forcelong <pair> [rate]` | Instantly buys the given pair. Rate is optional and only applies to limit orders. (`force_entry_enable` must be set to True)
|
||||||
| `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True)
|
| `/forceshort <pair> [rate]` | Instantly shorts the given pair. Rate is optional and only applies to limit orders. This will only work on non-spot markets. (`force_entry_enable` must be set to True)
|
||||||
| `/delete <trade_id>` | Delete a specific trade from the Database. Tries to close open orders. Requires manual handling of this trade on the exchange.
|
| `/delete <trade_id>` | Delete a specific trade from the Database. Tries to close open orders. Requires manual handling of this trade on the exchange.
|
||||||
|
| `/reload_trade <trade_id>` | Reload a trade from the Exchange. Only works in live, and can potentially help recover a trade that was manually sold on the exchange.
|
||||||
| `/cancel_open_order <trade_id> | /coo <trade_id>` | Cancel an open order for a trade.
|
| `/cancel_open_order <trade_id> | /coo <trade_id>` | Cancel an open order for a trade.
|
||||||
| **Metrics** |
|
| **Metrics** |
|
||||||
| `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default)
|
| `/profit [<n>]` | Display a summary of your profit/loss from close trades and some stats about your performance, over the last n days (all trades by default)
|
||||||
| `/performance` | Show performance of each finished trade grouped by pair
|
| `/performance` | Show performance of each finished trade grouped by pair
|
||||||
| `/balance` | Show account balance per currency
|
| `/balance` | Show bot managed balance per currency
|
||||||
|
| `/balance full` | Show account balance per currency
|
||||||
| `/daily <n>` | Shows profit or loss per day, over the last n days (n defaults to 7)
|
| `/daily <n>` | Shows profit or loss per day, over the last n days (n defaults to 7)
|
||||||
| `/weekly <n>` | Shows profit or loss per week, over the last n weeks (n defaults to 8)
|
| `/weekly <n>` | Shows profit or loss per week, over the last n weeks (n defaults to 8)
|
||||||
| `/monthly <n>` | Shows profit or loss per month, over the last n months (n defaults to 6)
|
| `/monthly <n>` | Shows profit or loss per month, over the last n months (n defaults to 6)
|
||||||
|
@ -202,7 +204,6 @@ official commands. You can ask at any moment for help with `/help`.
|
||||||
| `/blacklist [pair]` | Show the current blacklist, or adds a pair to the blacklist.
|
| `/blacklist [pair]` | Show the current blacklist, or adds a pair to the blacklist.
|
||||||
| `/edge` | Show validated pairs by Edge if it is enabled.
|
| `/edge` | Show validated pairs by Edge if it is enabled.
|
||||||
|
|
||||||
|
|
||||||
## Telegram commands in action
|
## Telegram commands in action
|
||||||
|
|
||||||
Below, example of Telegram message you will receive for each command.
|
Below, example of Telegram message you will receive for each command.
|
||||||
|
|
|
@ -723,6 +723,9 @@ usage: freqtrade backtesting-analysis [-h] [-v] [--logfile FILE] [-V]
|
||||||
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
[--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]]
|
||||||
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
|
[--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]]
|
||||||
[--timerange YYYYMMDD-[YYYYMMDD]]
|
[--timerange YYYYMMDD-[YYYYMMDD]]
|
||||||
|
[--rejected]
|
||||||
|
[--analysis-to-csv]
|
||||||
|
[--analysis-csv-path PATH]
|
||||||
|
|
||||||
optional arguments:
|
optional arguments:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
|
@ -736,19 +739,27 @@ optional arguments:
|
||||||
pair and enter_tag, 4: by pair, enter_ and exit_tag
|
pair and enter_tag, 4: by pair, enter_ and exit_tag
|
||||||
(this can get quite large)
|
(this can get quite large)
|
||||||
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
|
--enter-reason-list ENTER_REASON_LIST [ENTER_REASON_LIST ...]
|
||||||
Comma separated list of entry signals to analyse.
|
Space separated list of entry signals to analyse.
|
||||||
Default: all. e.g. 'entry_tag_a,entry_tag_b'
|
Default: all. e.g. 'entry_tag_a entry_tag_b'
|
||||||
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
|
--exit-reason-list EXIT_REASON_LIST [EXIT_REASON_LIST ...]
|
||||||
Comma separated list of exit signals to analyse.
|
Space separated list of exit signals to analyse.
|
||||||
Default: all. e.g.
|
Default: all. e.g.
|
||||||
'exit_tag_a,roi,stop_loss,trailing_stop_loss'
|
'exit_tag_a roi stop_loss trailing_stop_loss'
|
||||||
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
|
--indicator-list INDICATOR_LIST [INDICATOR_LIST ...]
|
||||||
Comma separated list of indicators to analyse. e.g.
|
Space separated list of indicators to analyse. e.g.
|
||||||
'close,rsi,bb_lowerband,profit_abs'
|
'close rsi bb_lowerband profit_abs'
|
||||||
--timerange YYYYMMDD-[YYYYMMDD]
|
--timerange YYYYMMDD-[YYYYMMDD]
|
||||||
Timerange to filter trades for analysis,
|
Timerange to filter trades for analysis,
|
||||||
start inclusive, end exclusive. e.g.
|
start inclusive, end exclusive. e.g.
|
||||||
20220101-20220201
|
20220101-20220201
|
||||||
|
--rejected
|
||||||
|
Print out rejected trades table
|
||||||
|
--analysis-to-csv
|
||||||
|
Write out tables to individual CSVs, by default to
|
||||||
|
'user_data/backtest_results' unless '--analysis-csv-path' is given.
|
||||||
|
--analysis-csv-path [PATH]
|
||||||
|
Optional path where individual CSVs will be written. If not used,
|
||||||
|
CSVs will be written to 'user_data/backtest_results'.
|
||||||
|
|
||||||
Common arguments:
|
Common arguments:
|
||||||
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
-v, --verbose Verbose mode (-vv for more, -vvv to get all messages).
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
""" Freqtrade bot """
|
""" Freqtrade bot """
|
||||||
__version__ = '2023.4'
|
__version__ = '2023.5'
|
||||||
|
|
||||||
if 'dev' in __version__:
|
if 'dev' in __version__:
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
|
@ -106,7 +106,8 @@ ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperop
|
||||||
"disableparamexport", "backtest_breakdown"]
|
"disableparamexport", "backtest_breakdown"]
|
||||||
|
|
||||||
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
|
||||||
"exit_reason_list", "indicator_list", "timerange"]
|
"exit_reason_list", "indicator_list", "timerange",
|
||||||
|
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
|
||||||
|
|
||||||
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
|
||||||
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
|
||||||
|
|
|
@ -636,30 +636,45 @@ AVAILABLE_CLI_OPTIONS = {
|
||||||
"4: by pair, enter_ and exit_tag (this can get quite large), "
|
"4: by pair, enter_ and exit_tag (this can get quite large), "
|
||||||
"5: by exit_tag"),
|
"5: by exit_tag"),
|
||||||
nargs='+',
|
nargs='+',
|
||||||
default=['0', '1', '2'],
|
default=[],
|
||||||
choices=['0', '1', '2', '3', '4', '5'],
|
choices=['0', '1', '2', '3', '4', '5'],
|
||||||
),
|
),
|
||||||
"enter_reason_list": Arg(
|
"enter_reason_list": Arg(
|
||||||
"--enter-reason-list",
|
"--enter-reason-list",
|
||||||
help=("Comma separated list of entry signals to analyse. Default: all. "
|
help=("Space separated list of entry signals to analyse. Default: all. "
|
||||||
"e.g. 'entry_tag_a,entry_tag_b'"),
|
"e.g. 'entry_tag_a entry_tag_b'"),
|
||||||
nargs='+',
|
nargs='+',
|
||||||
default=['all'],
|
default=['all'],
|
||||||
),
|
),
|
||||||
"exit_reason_list": Arg(
|
"exit_reason_list": Arg(
|
||||||
"--exit-reason-list",
|
"--exit-reason-list",
|
||||||
help=("Comma separated list of exit signals to analyse. Default: all. "
|
help=("Space separated list of exit signals to analyse. Default: all. "
|
||||||
"e.g. 'exit_tag_a,roi,stop_loss,trailing_stop_loss'"),
|
"e.g. 'exit_tag_a roi stop_loss trailing_stop_loss'"),
|
||||||
nargs='+',
|
nargs='+',
|
||||||
default=['all'],
|
default=['all'],
|
||||||
),
|
),
|
||||||
"indicator_list": Arg(
|
"indicator_list": Arg(
|
||||||
"--indicator-list",
|
"--indicator-list",
|
||||||
help=("Comma separated list of indicators to analyse. "
|
help=("Space separated list of indicators to analyse. "
|
||||||
"e.g. 'close,rsi,bb_lowerband,profit_abs'"),
|
"e.g. 'close rsi bb_lowerband profit_abs'"),
|
||||||
nargs='+',
|
nargs='+',
|
||||||
default=[],
|
default=[],
|
||||||
),
|
),
|
||||||
|
"analysis_rejected": Arg(
|
||||||
|
'--rejected-signals',
|
||||||
|
help='Analyse rejected signals',
|
||||||
|
action='store_true',
|
||||||
|
),
|
||||||
|
"analysis_to_csv": Arg(
|
||||||
|
'--analysis-to-csv',
|
||||||
|
help='Save selected analysis tables to individual CSVs',
|
||||||
|
action='store_true',
|
||||||
|
),
|
||||||
|
"analysis_csv_path": Arg(
|
||||||
|
'--analysis-csv-path',
|
||||||
|
help=("Specify a path to save the analysis CSVs "
|
||||||
|
"if --analysis-to-csv is enabled. Default: user_data/basktesting_results/"),
|
||||||
|
),
|
||||||
"freqaimodel": Arg(
|
"freqaimodel": Arg(
|
||||||
'--freqaimodel',
|
'--freqaimodel',
|
||||||
help='Specify a custom freqaimodels.',
|
help='Specify a custom freqaimodels.',
|
||||||
|
|
|
@ -52,7 +52,7 @@ def start_download_data(args: Dict[str, Any]) -> None:
|
||||||
pairs_not_available: List[str] = []
|
pairs_not_available: List[str] = []
|
||||||
|
|
||||||
# Init exchange
|
# Init exchange
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||||
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
|
markets = [p for p, m in exchange.markets.items() if market_is_active(m)
|
||||||
or config.get('include_inactive')]
|
or config.get('include_inactive')]
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ def start_convert_trades(args: Dict[str, Any]) -> None:
|
||||||
"Please check the documentation on how to configure this.")
|
"Please check the documentation on how to configure this.")
|
||||||
|
|
||||||
# Init exchange
|
# Init exchange
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||||
# Manual validations of relevant settings
|
# Manual validations of relevant settings
|
||||||
if not config['exchange'].get('skip_pair_validation', False):
|
if not config['exchange'].get('skip_pair_validation', False):
|
||||||
exchange.validate_pairs(config['pairs'])
|
exchange.validate_pairs(config['pairs'])
|
||||||
|
|
|
@ -114,7 +114,7 @@ def start_list_timeframes(args: Dict[str, Any]) -> None:
|
||||||
config['timeframe'] = None
|
config['timeframe'] = None
|
||||||
|
|
||||||
# Init exchange
|
# Init exchange
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||||
|
|
||||||
if args['print_one_column']:
|
if args['print_one_column']:
|
||||||
print('\n'.join(exchange.timeframes))
|
print('\n'.join(exchange.timeframes))
|
||||||
|
@ -133,7 +133,7 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
|
||||||
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
|
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
|
||||||
|
|
||||||
# Init exchange
|
# Init exchange
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||||
|
|
||||||
# By default only active pairs/markets are to be shown
|
# By default only active pairs/markets are to be shown
|
||||||
active_only = not args.get('list_pairs_all', False)
|
active_only = not args.get('list_pairs_all', False)
|
||||||
|
|
|
@ -18,7 +18,7 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
|
||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
|
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
|
||||||
|
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config, validate=False)
|
exchange = ExchangeResolver.load_exchange(config, validate=False)
|
||||||
|
|
||||||
quote_currencies = args.get('quote_currencies')
|
quote_currencies = args.get('quote_currencies')
|
||||||
if not quote_currencies:
|
if not quote_currencies:
|
||||||
|
|
|
@ -174,7 +174,7 @@ def _validate_whitelist(conf: Dict[str, Any]) -> None:
|
||||||
return
|
return
|
||||||
|
|
||||||
for pl in conf.get('pairlists', [{'method': 'StaticPairList'}]):
|
for pl in conf.get('pairlists', [{'method': 'StaticPairList'}]):
|
||||||
if (pl.get('method') == 'StaticPairList'
|
if (isinstance(pl, dict) and pl.get('method') == 'StaticPairList'
|
||||||
and not conf.get('exchange', {}).get('pair_whitelist')):
|
and not conf.get('exchange', {}).get('pair_whitelist')):
|
||||||
raise OperationalException("StaticPairList requires pair_whitelist to be set.")
|
raise OperationalException("StaticPairList requires pair_whitelist to be set.")
|
||||||
|
|
||||||
|
|
|
@ -465,6 +465,15 @@ class Configuration:
|
||||||
self._args_to_config(config, argname='timerange',
|
self._args_to_config(config, argname='timerange',
|
||||||
logstring='Filter trades by timerange: {}')
|
logstring='Filter trades by timerange: {}')
|
||||||
|
|
||||||
|
self._args_to_config(config, argname='analysis_rejected',
|
||||||
|
logstring='Analyse rejected signals: {}')
|
||||||
|
|
||||||
|
self._args_to_config(config, argname='analysis_to_csv',
|
||||||
|
logstring='Store analysis tables to CSV: {}')
|
||||||
|
|
||||||
|
self._args_to_config(config, argname='analysis_csv_path',
|
||||||
|
logstring='Path to store analysis CSVs: {}')
|
||||||
|
|
||||||
def _process_runmode(self, config: Config) -> None:
|
def _process_runmode(self, config: Config) -> None:
|
||||||
|
|
||||||
self._args_to_config(config, argname='dry_run',
|
self._args_to_config(config, argname='dry_run',
|
||||||
|
|
|
@ -6,8 +6,6 @@ import re
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import arrow
|
|
||||||
|
|
||||||
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
from freqtrade.constants import DATETIME_PRINT_FORMAT
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
|
|
||||||
|
@ -139,7 +137,8 @@ class TimeRange:
|
||||||
if stype[0]:
|
if stype[0]:
|
||||||
starts = rvals[index]
|
starts = rvals[index]
|
||||||
if stype[0] == 'date' and len(starts) == 8:
|
if stype[0] == 'date' and len(starts) == 8:
|
||||||
start = arrow.get(starts, 'YYYYMMDD').int_timestamp
|
start = int(datetime.strptime(starts, '%Y%m%d').replace(
|
||||||
|
tzinfo=timezone.utc).timestamp())
|
||||||
elif len(starts) == 13:
|
elif len(starts) == 13:
|
||||||
start = int(starts) // 1000
|
start = int(starts) // 1000
|
||||||
else:
|
else:
|
||||||
|
@ -148,7 +147,8 @@ class TimeRange:
|
||||||
if stype[1]:
|
if stype[1]:
|
||||||
stops = rvals[index]
|
stops = rvals[index]
|
||||||
if stype[1] == 'date' and len(stops) == 8:
|
if stype[1] == 'date' and len(stops) == 8:
|
||||||
stop = arrow.get(stops, 'YYYYMMDD').int_timestamp
|
stop = int(datetime.strptime(stops, '%Y%m%d').replace(
|
||||||
|
tzinfo=timezone.utc).timestamp())
|
||||||
elif len(stops) == 13:
|
elif len(stops) == 13:
|
||||||
stop = int(stops) // 1000
|
stop = int(stops) // 1000
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -690,4 +690,6 @@ BidAsk = Literal['bid', 'ask']
|
||||||
OBLiteral = Literal['asks', 'bids']
|
OBLiteral = Literal['asks', 'bids']
|
||||||
|
|
||||||
Config = Dict[str, Any]
|
Config = Dict[str, Any]
|
||||||
|
# Exchange part of the configuration.
|
||||||
|
ExchangeConfig = Dict[str, Any]
|
||||||
IntOrInf = float
|
IntOrInf = float
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
import joblib
|
import joblib
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
@ -15,22 +16,31 @@ from freqtrade.exceptions import OperationalException
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def _load_signal_candles(backtest_dir: Path):
|
def _load_backtest_analysis_data(backtest_dir: Path, name: str):
|
||||||
if backtest_dir.is_dir():
|
if backtest_dir.is_dir():
|
||||||
scpf = Path(backtest_dir,
|
scpf = Path(backtest_dir,
|
||||||
Path(get_latest_backtest_filename(backtest_dir)).stem + "_signals.pkl"
|
Path(get_latest_backtest_filename(backtest_dir)).stem + "_" + name + ".pkl"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_signals.pkl")
|
scpf = Path(backtest_dir.parent / f"{backtest_dir.stem}_{name}.pkl")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with scpf.open("rb") as scp:
|
with scpf.open("rb") as scp:
|
||||||
signal_candles = joblib.load(scp)
|
loaded_data = joblib.load(scp)
|
||||||
logger.info(f"Loaded signal candles: {str(scpf)}")
|
logger.info(f"Loaded {name} candles: {str(scpf)}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Cannot load signal candles from pickled results: ", e)
|
logger.error(f"Cannot load {name} data from pickled results: ", e)
|
||||||
|
return None
|
||||||
|
|
||||||
return signal_candles
|
return loaded_data
|
||||||
|
|
||||||
|
|
||||||
|
def _load_rejected_signals(backtest_dir: Path):
|
||||||
|
return _load_backtest_analysis_data(backtest_dir, "rejected")
|
||||||
|
|
||||||
|
|
||||||
|
def _load_signal_candles(backtest_dir: Path):
|
||||||
|
return _load_backtest_analysis_data(backtest_dir, "signals")
|
||||||
|
|
||||||
|
|
||||||
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
|
def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_candles):
|
||||||
|
@ -43,9 +53,7 @@ def _process_candles_and_indicators(pairlist, strategy_name, trades, signal_cand
|
||||||
for pair in pairlist:
|
for pair in pairlist:
|
||||||
if pair in signal_candles[strategy_name]:
|
if pair in signal_candles[strategy_name]:
|
||||||
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
|
analysed_trades_dict[strategy_name][pair] = _analyze_candles_and_indicators(
|
||||||
pair,
|
pair, trades, signal_candles[strategy_name][pair])
|
||||||
trades,
|
|
||||||
signal_candles[strategy_name][pair])
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
|
print(f"Cannot process entry/exit reasons for {strategy_name}: ", e)
|
||||||
|
|
||||||
|
@ -85,7 +93,7 @@ def _analyze_candles_and_indicators(pair, trades: pd.DataFrame, signal_candles:
|
||||||
return pd.DataFrame()
|
return pd.DataFrame()
|
||||||
|
|
||||||
|
|
||||||
def _do_group_table_output(bigdf, glist):
|
def _do_group_table_output(bigdf, glist, csv_path: Path, to_csv=False, ):
|
||||||
for g in glist:
|
for g in glist:
|
||||||
# 0: summary wins/losses grouped by enter tag
|
# 0: summary wins/losses grouped by enter tag
|
||||||
if g == "0":
|
if g == "0":
|
||||||
|
@ -116,7 +124,8 @@ def _do_group_table_output(bigdf, glist):
|
||||||
|
|
||||||
sortcols = ['total_num_buys']
|
sortcols = ['total_num_buys']
|
||||||
|
|
||||||
_print_table(new, sortcols, show_index=True)
|
_print_table(new, sortcols, show_index=True, name="Group 0:",
|
||||||
|
to_csv=to_csv, csv_path=csv_path)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
|
agg_mask = {'profit_abs': ['count', 'sum', 'median', 'mean'],
|
||||||
|
@ -154,11 +163,24 @@ def _do_group_table_output(bigdf, glist):
|
||||||
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
|
new['mean_profit_pct'] = new['mean_profit_pct'] * 100
|
||||||
new['total_profit_pct'] = new['total_profit_pct'] * 100
|
new['total_profit_pct'] = new['total_profit_pct'] * 100
|
||||||
|
|
||||||
_print_table(new, sortcols)
|
_print_table(new, sortcols, name=f"Group {g}:",
|
||||||
|
to_csv=to_csv, csv_path=csv_path)
|
||||||
else:
|
else:
|
||||||
logger.warning("Invalid group mask specified.")
|
logger.warning("Invalid group mask specified.")
|
||||||
|
|
||||||
|
|
||||||
|
def _do_rejected_signals_output(rejected_signals_df: pd.DataFrame,
|
||||||
|
to_csv: bool = False, csv_path=None) -> None:
|
||||||
|
cols = ['pair', 'date', 'enter_tag']
|
||||||
|
sortcols = ['date', 'pair', 'enter_tag']
|
||||||
|
_print_table(rejected_signals_df[cols],
|
||||||
|
sortcols,
|
||||||
|
show_index=False,
|
||||||
|
name="Rejected Signals:",
|
||||||
|
to_csv=to_csv,
|
||||||
|
csv_path=csv_path)
|
||||||
|
|
||||||
|
|
||||||
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
|
def _select_rows_within_dates(df, timerange=None, df_date_col: str = 'date'):
|
||||||
if timerange:
|
if timerange:
|
||||||
if timerange.starttype == 'date':
|
if timerange.starttype == 'date':
|
||||||
|
@ -192,38 +214,64 @@ def prepare_results(analysed_trades, stratname,
|
||||||
return res_df
|
return res_df
|
||||||
|
|
||||||
|
|
||||||
def print_results(res_df, analysis_groups, indicator_list):
|
def print_results(res_df: pd.DataFrame, analysis_groups: List[str], indicator_list: List[str],
|
||||||
|
csv_path: Path, rejected_signals=None, to_csv=False):
|
||||||
if res_df.shape[0] > 0:
|
if res_df.shape[0] > 0:
|
||||||
if analysis_groups:
|
if analysis_groups:
|
||||||
_do_group_table_output(res_df, analysis_groups)
|
_do_group_table_output(res_df, analysis_groups, to_csv=to_csv, csv_path=csv_path)
|
||||||
|
|
||||||
|
if rejected_signals is not None:
|
||||||
|
if rejected_signals.empty:
|
||||||
|
print("There were no rejected signals.")
|
||||||
|
else:
|
||||||
|
_do_rejected_signals_output(rejected_signals, to_csv=to_csv, csv_path=csv_path)
|
||||||
|
|
||||||
|
# NB this can be large for big dataframes!
|
||||||
if "all" in indicator_list:
|
if "all" in indicator_list:
|
||||||
print(res_df)
|
_print_table(res_df,
|
||||||
elif indicator_list is not None:
|
show_index=False,
|
||||||
|
name="Indicators:",
|
||||||
|
to_csv=to_csv,
|
||||||
|
csv_path=csv_path)
|
||||||
|
elif indicator_list is not None and indicator_list:
|
||||||
available_inds = []
|
available_inds = []
|
||||||
for ind in indicator_list:
|
for ind in indicator_list:
|
||||||
if ind in res_df:
|
if ind in res_df:
|
||||||
available_inds.append(ind)
|
available_inds.append(ind)
|
||||||
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
ilist = ["pair", "enter_reason", "exit_reason"] + available_inds
|
||||||
_print_table(res_df[ilist], sortcols=['exit_reason'], show_index=False)
|
_print_table(res_df[ilist],
|
||||||
|
sortcols=['exit_reason'],
|
||||||
|
show_index=False,
|
||||||
|
name="Indicators:",
|
||||||
|
to_csv=to_csv,
|
||||||
|
csv_path=csv_path)
|
||||||
else:
|
else:
|
||||||
print("\\No trades to show")
|
print("\\No trades to show")
|
||||||
|
|
||||||
|
|
||||||
def _print_table(df, sortcols=None, show_index=False):
|
def _print_table(df: pd.DataFrame, sortcols=None, *, show_index=False, name=None,
|
||||||
|
to_csv=False, csv_path: Path):
|
||||||
if (sortcols is not None):
|
if (sortcols is not None):
|
||||||
data = df.sort_values(sortcols)
|
data = df.sort_values(sortcols)
|
||||||
else:
|
else:
|
||||||
data = df
|
data = df
|
||||||
|
|
||||||
print(
|
if to_csv:
|
||||||
tabulate(
|
safe_name = Path(csv_path, name.lower().replace(" ", "_").replace(":", "") + ".csv")
|
||||||
data,
|
data.to_csv(safe_name)
|
||||||
headers='keys',
|
print(f"Saved {name} to {safe_name}")
|
||||||
tablefmt='psql',
|
else:
|
||||||
showindex=show_index
|
if name is not None:
|
||||||
|
print(name)
|
||||||
|
|
||||||
|
print(
|
||||||
|
tabulate(
|
||||||
|
data,
|
||||||
|
headers='keys',
|
||||||
|
tablefmt='psql',
|
||||||
|
showindex=show_index
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def process_entry_exit_reasons(config: Config):
|
def process_entry_exit_reasons(config: Config):
|
||||||
|
@ -232,6 +280,11 @@ def process_entry_exit_reasons(config: Config):
|
||||||
enter_reason_list = config.get('enter_reason_list', ["all"])
|
enter_reason_list = config.get('enter_reason_list', ["all"])
|
||||||
exit_reason_list = config.get('exit_reason_list', ["all"])
|
exit_reason_list = config.get('exit_reason_list', ["all"])
|
||||||
indicator_list = config.get('indicator_list', [])
|
indicator_list = config.get('indicator_list', [])
|
||||||
|
do_rejected = config.get('analysis_rejected', False)
|
||||||
|
to_csv = config.get('analysis_to_csv', False)
|
||||||
|
csv_path = Path(config.get('analysis_csv_path', config['exportfilename']))
|
||||||
|
if to_csv and not csv_path.is_dir():
|
||||||
|
raise OperationalException(f"Specified directory {csv_path} does not exist.")
|
||||||
|
|
||||||
timerange = TimeRange.parse_timerange(None if config.get(
|
timerange = TimeRange.parse_timerange(None if config.get(
|
||||||
'timerange') is None else str(config.get('timerange')))
|
'timerange') is None else str(config.get('timerange')))
|
||||||
|
@ -241,8 +294,16 @@ def process_entry_exit_reasons(config: Config):
|
||||||
for strategy_name, results in backtest_stats['strategy'].items():
|
for strategy_name, results in backtest_stats['strategy'].items():
|
||||||
trades = load_backtest_data(config['exportfilename'], strategy_name)
|
trades = load_backtest_data(config['exportfilename'], strategy_name)
|
||||||
|
|
||||||
if not trades.empty:
|
if trades is not None and not trades.empty:
|
||||||
signal_candles = _load_signal_candles(config['exportfilename'])
|
signal_candles = _load_signal_candles(config['exportfilename'])
|
||||||
|
|
||||||
|
rej_df = None
|
||||||
|
if do_rejected:
|
||||||
|
rejected_signals_dict = _load_rejected_signals(config['exportfilename'])
|
||||||
|
rej_df = prepare_results(rejected_signals_dict, strategy_name,
|
||||||
|
enter_reason_list, exit_reason_list,
|
||||||
|
timerange=timerange)
|
||||||
|
|
||||||
analysed_trades_dict = _process_candles_and_indicators(
|
analysed_trades_dict = _process_candles_and_indicators(
|
||||||
config['exchange']['pair_whitelist'], strategy_name,
|
config['exchange']['pair_whitelist'], strategy_name,
|
||||||
trades, signal_candles)
|
trades, signal_candles)
|
||||||
|
@ -253,7 +314,10 @@ def process_entry_exit_reasons(config: Config):
|
||||||
|
|
||||||
print_results(res_df,
|
print_results(res_df,
|
||||||
analysis_groups,
|
analysis_groups,
|
||||||
indicator_list)
|
indicator_list,
|
||||||
|
rejected_signals=rej_df,
|
||||||
|
to_csv=to_csv,
|
||||||
|
csv_path=csv_path)
|
||||||
|
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
raise OperationalException(e) from e
|
raise OperationalException(e) from e
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
import logging
|
import logging
|
||||||
import operator
|
import operator
|
||||||
from datetime import datetime
|
from datetime import datetime, timedelta
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Optional, Tuple
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import arrow
|
|
||||||
from pandas import DataFrame, concat
|
from pandas import DataFrame, concat
|
||||||
|
|
||||||
from freqtrade.configuration import TimeRange
|
from freqtrade.configuration import TimeRange
|
||||||
|
@ -236,8 +235,8 @@ def _download_pair_history(pair: str, *,
|
||||||
new_data = exchange.get_historic_ohlcv(pair=pair,
|
new_data = exchange.get_historic_ohlcv(pair=pair,
|
||||||
timeframe=timeframe,
|
timeframe=timeframe,
|
||||||
since_ms=since_ms if since_ms else
|
since_ms=since_ms if since_ms else
|
||||||
arrow.utcnow().shift(
|
int((datetime.now() - timedelta(days=new_pairs_days)
|
||||||
days=-new_pairs_days).int_timestamp * 1000,
|
).timestamp()) * 1000,
|
||||||
is_new_pair=data.empty,
|
is_new_pair=data.empty,
|
||||||
candle_type=candle_type,
|
candle_type=candle_type,
|
||||||
until_ms=until_ms if until_ms else None
|
until_ms=until_ms if until_ms else None
|
||||||
|
@ -349,7 +348,7 @@ def _download_trades_history(exchange: Exchange,
|
||||||
trades = []
|
trades = []
|
||||||
|
|
||||||
if not since:
|
if not since:
|
||||||
since = arrow.utcnow().shift(days=-new_pairs_days).int_timestamp * 1000
|
since = int((datetime.now() - timedelta(days=-new_pairs_days)).timestamp()) * 1000
|
||||||
|
|
||||||
from_id = trades[-1][1] if trades else None
|
from_id = trades[-1][1] if trades else None
|
||||||
if trades and since < trades[-1][0]:
|
if trades and since < trades[-1][0]:
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
import logging
|
import logging
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, NamedTuple
|
from typing import Any, Dict, List, NamedTuple
|
||||||
|
|
||||||
import arrow
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import utils_find_1st as utf1st
|
import utils_find_1st as utf1st
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
@ -18,6 +18,7 @@ from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_seconds
|
||||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||||
from freqtrade.strategy.interface import IStrategy
|
from freqtrade.strategy.interface import IStrategy
|
||||||
|
from freqtrade.util import dt_now
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -79,8 +80,8 @@ class Edge:
|
||||||
self._stoploss_range_step
|
self._stoploss_range_step
|
||||||
)
|
)
|
||||||
|
|
||||||
self._timerange: TimeRange = TimeRange.parse_timerange("%s-" % arrow.now().shift(
|
self._timerange: TimeRange = TimeRange.parse_timerange(
|
||||||
days=-1 * self._since_number_of_days).format('YYYYMMDD'))
|
f"{(dt_now() - timedelta(days=self._since_number_of_days)).strftime('%Y%m%d')}-")
|
||||||
if config.get('fee'):
|
if config.get('fee'):
|
||||||
self.fee = config['fee']
|
self.fee = config['fee']
|
||||||
else:
|
else:
|
||||||
|
@ -97,7 +98,7 @@ class Edge:
|
||||||
heartbeat = self.edge_config.get('process_throttle_secs')
|
heartbeat = self.edge_config.get('process_throttle_secs')
|
||||||
|
|
||||||
if (self._last_updated > 0) and (
|
if (self._last_updated > 0) and (
|
||||||
self._last_updated + heartbeat > arrow.utcnow().int_timestamp):
|
self._last_updated + heartbeat > int(dt_now().timestamp())):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
data: Dict[str, Any] = {}
|
data: Dict[str, Any] = {}
|
||||||
|
@ -189,7 +190,7 @@ class Edge:
|
||||||
# Fill missing, calculable columns, profit, duration , abs etc.
|
# Fill missing, calculable columns, profit, duration , abs etc.
|
||||||
trades_df = self._fill_calculable_fields(DataFrame(trades))
|
trades_df = self._fill_calculable_fields(DataFrame(trades))
|
||||||
self._cached_pairs = self._process_expectancy(trades_df)
|
self._cached_pairs = self._process_expectancy(trades_df)
|
||||||
self._last_updated = arrow.utcnow().int_timestamp
|
self._last_updated = int(dt_now().timestamp())
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ class ExitType(Enum):
|
||||||
EMERGENCY_EXIT = "emergency_exit"
|
EMERGENCY_EXIT = "emergency_exit"
|
||||||
CUSTOM_EXIT = "custom_exit"
|
CUSTOM_EXIT = "custom_exit"
|
||||||
PARTIAL_EXIT = "partial_exit"
|
PARTIAL_EXIT = "partial_exit"
|
||||||
|
SOLD_ON_EXCHANGE = "sold_on_exchange"
|
||||||
NONE = ""
|
NONE = ""
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# flake8: noqa: F401
|
# flake8: noqa: F401
|
||||||
# isort: off
|
# isort: off
|
||||||
from freqtrade.exchange.common import remove_credentials, MAP_EXCHANGE_CHILDCLASS
|
from freqtrade.exchange.common import remove_exchange_credentials, MAP_EXCHANGE_CHILDCLASS
|
||||||
from freqtrade.exchange.exchange import Exchange
|
from freqtrade.exchange.exchange import Exchange
|
||||||
# isort: on
|
# isort: on
|
||||||
from freqtrade.exchange.binance import Binance
|
from freqtrade.exchange.binance import Binance
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
""" Binance exchange subclass """
|
""" Binance exchange subclass """
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime
|
from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Dict, List, Optional, Tuple
|
from typing import Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import arrow
|
|
||||||
import ccxt
|
import ccxt
|
||||||
|
|
||||||
from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode
|
from freqtrade.enums import CandleType, MarginMode, PriceType, TradingMode
|
||||||
|
@ -66,7 +65,7 @@ class Binance(Exchange):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']:
|
if self.trading_mode == TradingMode.FUTURES and not self._config['dry_run']:
|
||||||
position_side = self._api.fapiPrivateGetPositionsideDual()
|
position_side = self._api.fapiPrivateGetPositionSideDual()
|
||||||
self._log_exchange_response('position_side_setting', position_side)
|
self._log_exchange_response('position_side_setting', position_side)
|
||||||
assets_margin = self._api.fapiPrivateGetMultiAssetsMargin()
|
assets_margin = self._api.fapiPrivateGetMultiAssetsMargin()
|
||||||
self._log_exchange_response('multi_asset_margin', assets_margin)
|
self._log_exchange_response('multi_asset_margin', assets_margin)
|
||||||
|
@ -105,8 +104,9 @@ class Binance(Exchange):
|
||||||
if x and x[3] and x[3][0] and x[3][0][0] > since_ms:
|
if x and x[3] and x[3][0] and x[3][0][0] > since_ms:
|
||||||
# Set starting date to first available candle.
|
# Set starting date to first available candle.
|
||||||
since_ms = x[3][0][0]
|
since_ms = x[3][0][0]
|
||||||
logger.info(f"Candle-data for {pair} available starting with "
|
logger.info(
|
||||||
f"{arrow.get(since_ms // 1000).isoformat()}.")
|
f"Candle-data for {pair} available starting with "
|
||||||
|
f"{datetime.fromtimestamp(since_ms // 1000, tz=timezone.utc).isoformat()}.")
|
||||||
|
|
||||||
return await super()._async_get_historic_ohlcv(
|
return await super()._async_get_historic_ohlcv(
|
||||||
pair=pair,
|
pair=pair,
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,7 @@ import time
|
||||||
from functools import wraps
|
from functools import wraps
|
||||||
from typing import Any, Callable, Optional, TypeVar, cast, overload
|
from typing import Any, Callable, Optional, TypeVar, cast, overload
|
||||||
|
|
||||||
|
from freqtrade.constants import ExchangeConfig
|
||||||
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
|
from freqtrade.exceptions import DDosProtection, RetryableOrderError, TemporaryError
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
|
|
||||||
|
@ -84,20 +85,22 @@ EXCHANGE_HAS_OPTIONAL = [
|
||||||
# 'fetchPositions', # Futures trading
|
# 'fetchPositions', # Futures trading
|
||||||
# 'fetchLeverageTiers', # Futures initialization
|
# 'fetchLeverageTiers', # Futures initialization
|
||||||
# 'fetchMarketLeverageTiers', # Futures initialization
|
# 'fetchMarketLeverageTiers', # Futures initialization
|
||||||
|
# 'fetchOpenOrders', 'fetchClosedOrders', # 'fetchOrders', # Refinding balance...
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def remove_credentials(config) -> None:
|
def remove_exchange_credentials(exchange_config: ExchangeConfig, dry_run: bool) -> None:
|
||||||
"""
|
"""
|
||||||
Removes exchange keys from the configuration and specifies dry-run
|
Removes exchange keys from the configuration and specifies dry-run
|
||||||
Used for backtesting / hyperopt / edge and utils.
|
Used for backtesting / hyperopt / edge and utils.
|
||||||
Modifies the input dict!
|
Modifies the input dict!
|
||||||
"""
|
"""
|
||||||
if config.get('dry_run', False):
|
if dry_run:
|
||||||
config['exchange']['key'] = ''
|
exchange_config['key'] = ''
|
||||||
config['exchange']['secret'] = ''
|
exchange_config['apiKey'] = ''
|
||||||
config['exchange']['password'] = ''
|
exchange_config['secret'] = ''
|
||||||
config['exchange']['uid'] = ''
|
exchange_config['password'] = ''
|
||||||
|
exchange_config['uid'] = ''
|
||||||
|
|
||||||
|
|
||||||
def calculate_backoff(retrycount, max_retries):
|
def calculate_backoff(retrycount, max_retries):
|
||||||
|
|
|
@ -11,7 +11,6 @@ from math import floor
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union
|
from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union
|
||||||
|
|
||||||
import arrow
|
|
||||||
import ccxt
|
import ccxt
|
||||||
import ccxt.async_support as ccxt_async
|
import ccxt.async_support as ccxt_async
|
||||||
from cachetools import TTLCache
|
from cachetools import TTLCache
|
||||||
|
@ -20,16 +19,16 @@ from dateutil import parser
|
||||||
from pandas import DataFrame, concat
|
from pandas import DataFrame, concat
|
||||||
|
|
||||||
from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk,
|
from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk,
|
||||||
BuySell, Config, EntryExit, ListPairsWithTimeframes, MakerTaker,
|
BuySell, Config, EntryExit, ExchangeConfig,
|
||||||
OBLiteral, PairWithTimeframe)
|
ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe)
|
||||||
from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list
|
from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list
|
||||||
from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode
|
from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, TradingMode
|
||||||
from freqtrade.enums.pricetype import PriceType
|
from freqtrade.enums.pricetype import PriceType
|
||||||
from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError,
|
from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError,
|
||||||
InvalidOrderException, OperationalException, PricingError,
|
InvalidOrderException, OperationalException, PricingError,
|
||||||
RetryableOrderError, TemporaryError)
|
RetryableOrderError, TemporaryError)
|
||||||
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_credentials, retrier,
|
from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials,
|
||||||
retrier_async)
|
retrier, retrier_async)
|
||||||
from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType,
|
from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType,
|
||||||
amount_to_contract_precision, amount_to_contracts,
|
amount_to_contract_precision, amount_to_contracts,
|
||||||
amount_to_precision, contracts_to_amount,
|
amount_to_precision, contracts_to_amount,
|
||||||
|
@ -42,6 +41,8 @@ from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers
|
||||||
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json,
|
||||||
safe_value_fallback2)
|
safe_value_fallback2)
|
||||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||||
|
from freqtrade.util import dt_from_ts, dt_now
|
||||||
|
from freqtrade.util.datetime_helpers import dt_humanize, dt_ts
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -92,8 +93,8 @@ class Exchange:
|
||||||
# TradingMode.SPOT always supported and not required in this list
|
# TradingMode.SPOT always supported and not required in this list
|
||||||
]
|
]
|
||||||
|
|
||||||
def __init__(self, config: Config, validate: bool = True,
|
def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None,
|
||||||
load_leverage_tiers: bool = False) -> None:
|
validate: bool = True, load_leverage_tiers: bool = False) -> None:
|
||||||
"""
|
"""
|
||||||
Initializes this module with the given config,
|
Initializes this module with the given config,
|
||||||
it does basic validation whether the specified exchange and pairs are valid.
|
it does basic validation whether the specified exchange and pairs are valid.
|
||||||
|
@ -107,8 +108,7 @@ class Exchange:
|
||||||
# Lock event loop. This is necessary to avoid race-conditions when using force* commands
|
# Lock event loop. This is necessary to avoid race-conditions when using force* commands
|
||||||
# Due to funding fee fetching.
|
# Due to funding fee fetching.
|
||||||
self._loop_lock = Lock()
|
self._loop_lock = Lock()
|
||||||
self.loop = asyncio.new_event_loop()
|
self.loop = self._init_async_loop()
|
||||||
asyncio.set_event_loop(self.loop)
|
|
||||||
self._config: Config = {}
|
self._config: Config = {}
|
||||||
|
|
||||||
self._config.update(config)
|
self._config.update(config)
|
||||||
|
@ -132,13 +132,13 @@ class Exchange:
|
||||||
|
|
||||||
# Holds all open sell orders for dry_run
|
# Holds all open sell orders for dry_run
|
||||||
self._dry_run_open_orders: Dict[str, Any] = {}
|
self._dry_run_open_orders: Dict[str, Any] = {}
|
||||||
remove_credentials(config)
|
|
||||||
|
|
||||||
if config['dry_run']:
|
if config['dry_run']:
|
||||||
logger.info('Instance is running with dry_run enabled')
|
logger.info('Instance is running with dry_run enabled')
|
||||||
logger.info(f"Using CCXT {ccxt.__version__}")
|
logger.info(f"Using CCXT {ccxt.__version__}")
|
||||||
exchange_config = config['exchange']
|
exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange']
|
||||||
self.log_responses = exchange_config.get('log_responses', False)
|
remove_exchange_credentials(exchange_conf, config.get('dry_run', False))
|
||||||
|
self.log_responses = exchange_conf.get('log_responses', False)
|
||||||
|
|
||||||
# Leverage properties
|
# Leverage properties
|
||||||
self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT)
|
self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT)
|
||||||
|
@ -153,8 +153,8 @@ class Exchange:
|
||||||
self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default))
|
self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default))
|
||||||
if self.trading_mode == TradingMode.FUTURES:
|
if self.trading_mode == TradingMode.FUTURES:
|
||||||
self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has)
|
self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has)
|
||||||
if exchange_config.get('_ft_has_params'):
|
if exchange_conf.get('_ft_has_params'):
|
||||||
self._ft_has = deep_merge_dicts(exchange_config.get('_ft_has_params'),
|
self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'),
|
||||||
self._ft_has)
|
self._ft_has)
|
||||||
logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has)
|
logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has)
|
||||||
|
|
||||||
|
@ -166,18 +166,18 @@ class Exchange:
|
||||||
|
|
||||||
# Initialize ccxt objects
|
# Initialize ccxt objects
|
||||||
ccxt_config = self._ccxt_config
|
ccxt_config = self._ccxt_config
|
||||||
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}), ccxt_config)
|
ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config)
|
||||||
ccxt_config = deep_merge_dicts(exchange_config.get('ccxt_sync_config', {}), ccxt_config)
|
ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config)
|
||||||
|
|
||||||
self._api = self._init_ccxt(exchange_config, ccxt_kwargs=ccxt_config)
|
self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config)
|
||||||
|
|
||||||
ccxt_async_config = self._ccxt_config
|
ccxt_async_config = self._ccxt_config
|
||||||
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_config', {}),
|
ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}),
|
||||||
ccxt_async_config)
|
ccxt_async_config)
|
||||||
ccxt_async_config = deep_merge_dicts(exchange_config.get('ccxt_async_config', {}),
|
ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}),
|
||||||
ccxt_async_config)
|
ccxt_async_config)
|
||||||
self._api_async = self._init_ccxt(
|
self._api_async = self._init_ccxt(
|
||||||
exchange_config, ccxt_async, ccxt_kwargs=ccxt_async_config)
|
exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config)
|
||||||
|
|
||||||
logger.info(f'Using Exchange "{self.name}"')
|
logger.info(f'Using Exchange "{self.name}"')
|
||||||
self.required_candle_call_count = 1
|
self.required_candle_call_count = 1
|
||||||
|
@ -190,7 +190,7 @@ class Exchange:
|
||||||
self._startup_candle_count, config.get('timeframe', ''))
|
self._startup_candle_count, config.get('timeframe', ''))
|
||||||
|
|
||||||
# Converts the interval provided in minutes in config to seconds
|
# Converts the interval provided in minutes in config to seconds
|
||||||
self.markets_refresh_interval: int = exchange_config.get(
|
self.markets_refresh_interval: int = exchange_conf.get(
|
||||||
"markets_refresh_interval", 60) * 60
|
"markets_refresh_interval", 60) * 60
|
||||||
|
|
||||||
if self.trading_mode != TradingMode.SPOT and load_leverage_tiers:
|
if self.trading_mode != TradingMode.SPOT and load_leverage_tiers:
|
||||||
|
@ -212,6 +212,11 @@ class Exchange:
|
||||||
if self.loop and not self.loop.is_closed():
|
if self.loop and not self.loop.is_closed():
|
||||||
self.loop.close()
|
self.loop.close()
|
||||||
|
|
||||||
|
def _init_async_loop(self) -> asyncio.AbstractEventLoop:
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
return loop
|
||||||
|
|
||||||
def validate_config(self, config):
|
def validate_config(self, config):
|
||||||
# Check if timeframe is available
|
# Check if timeframe is available
|
||||||
self.validate_timeframes(config.get('timeframe'))
|
self.validate_timeframes(config.get('timeframe'))
|
||||||
|
@ -486,7 +491,7 @@ class Exchange:
|
||||||
try:
|
try:
|
||||||
self._markets = self._api.load_markets(params={})
|
self._markets = self._api.load_markets(params={})
|
||||||
self._load_async_markets()
|
self._load_async_markets()
|
||||||
self._last_markets_refresh = arrow.utcnow().int_timestamp
|
self._last_markets_refresh = dt_ts()
|
||||||
if self._ft_has['needs_trading_fees']:
|
if self._ft_has['needs_trading_fees']:
|
||||||
self._trading_fees = self.fetch_trading_fees()
|
self._trading_fees = self.fetch_trading_fees()
|
||||||
|
|
||||||
|
@ -497,15 +502,14 @@ class Exchange:
|
||||||
"""Reload markets both sync and async if refresh interval has passed """
|
"""Reload markets both sync and async if refresh interval has passed """
|
||||||
# Check whether markets have to be reloaded
|
# Check whether markets have to be reloaded
|
||||||
if (self._last_markets_refresh > 0) and (
|
if (self._last_markets_refresh > 0) and (
|
||||||
self._last_markets_refresh + self.markets_refresh_interval
|
self._last_markets_refresh + self.markets_refresh_interval > dt_ts()):
|
||||||
> arrow.utcnow().int_timestamp):
|
|
||||||
return None
|
return None
|
||||||
logger.debug("Performing scheduled market reload..")
|
logger.debug("Performing scheduled market reload..")
|
||||||
try:
|
try:
|
||||||
self._markets = self._api.load_markets(reload=True, params={})
|
self._markets = self._api.load_markets(reload=True, params={})
|
||||||
# Also reload async markets to avoid issues with newly listed pairs
|
# Also reload async markets to avoid issues with newly listed pairs
|
||||||
self._load_async_markets(reload=True)
|
self._load_async_markets(reload=True)
|
||||||
self._last_markets_refresh = arrow.utcnow().int_timestamp
|
self._last_markets_refresh = dt_ts()
|
||||||
self.fill_leverage_tiers()
|
self.fill_leverage_tiers()
|
||||||
except ccxt.BaseError:
|
except ccxt.BaseError:
|
||||||
logger.exception("Could not reload markets.")
|
logger.exception("Could not reload markets.")
|
||||||
|
@ -839,7 +843,8 @@ class Exchange:
|
||||||
def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float,
|
def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float,
|
||||||
rate: float, leverage: float, params: Dict = {},
|
rate: float, leverage: float, params: Dict = {},
|
||||||
stop_loss: bool = False) -> Dict[str, Any]:
|
stop_loss: bool = False) -> Dict[str, Any]:
|
||||||
order_id = f'dry_run_{side}_{datetime.now().timestamp()}'
|
now = dt_now()
|
||||||
|
order_id = f'dry_run_{side}_{now.timestamp()}'
|
||||||
# Rounding here must respect to contract sizes
|
# Rounding here must respect to contract sizes
|
||||||
_amount = self._contracts_to_amount(
|
_amount = self._contracts_to_amount(
|
||||||
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)))
|
pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)))
|
||||||
|
@ -854,8 +859,8 @@ class Exchange:
|
||||||
'side': side,
|
'side': side,
|
||||||
'filled': 0,
|
'filled': 0,
|
||||||
'remaining': _amount,
|
'remaining': _amount,
|
||||||
'datetime': arrow.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
|
'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
|
||||||
'timestamp': arrow.utcnow().int_timestamp * 1000,
|
'timestamp': dt_ts(now),
|
||||||
'status': "open",
|
'status': "open",
|
||||||
'fee': None,
|
'fee': None,
|
||||||
'info': {},
|
'info': {},
|
||||||
|
@ -863,7 +868,7 @@ class Exchange:
|
||||||
}
|
}
|
||||||
if stop_loss:
|
if stop_loss:
|
||||||
dry_order["info"] = {"stopPrice": dry_order["price"]}
|
dry_order["info"] = {"stopPrice": dry_order["price"]}
|
||||||
dry_order["stopPrice"] = dry_order["price"]
|
dry_order[self._ft_has['stop_price_param']] = dry_order["price"]
|
||||||
# Workaround to avoid filling stoploss orders immediately
|
# Workaround to avoid filling stoploss orders immediately
|
||||||
dry_order["ft_order_type"] = "stoploss"
|
dry_order["ft_order_type"] = "stoploss"
|
||||||
orderbook: Optional[OrderBook] = None
|
orderbook: Optional[OrderBook] = None
|
||||||
|
@ -1015,7 +1020,7 @@ class Exchange:
|
||||||
from freqtrade.persistence import Order
|
from freqtrade.persistence import Order
|
||||||
order = Order.order_by_id(order_id)
|
order = Order.order_by_id(order_id)
|
||||||
if order:
|
if order:
|
||||||
ccxt_order = order.to_ccxt_object()
|
ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_param'])
|
||||||
self._dry_run_open_orders[order_id] = ccxt_order
|
self._dry_run_open_orders[order_id] = ccxt_order
|
||||||
return ccxt_order
|
return ccxt_order
|
||||||
# Gracefully handle errors with dry-run orders.
|
# Gracefully handle errors with dry-run orders.
|
||||||
|
@ -1428,6 +1433,47 @@ class Exchange:
|
||||||
except ccxt.BaseError as e:
|
except ccxt.BaseError as e:
|
||||||
raise OperationalException(e) from e
|
raise OperationalException(e) from e
|
||||||
|
|
||||||
|
@retrier(retries=0)
|
||||||
|
def fetch_orders(self, pair: str, since: datetime) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Fetch all orders for a pair "since"
|
||||||
|
:param pair: Pair for the query
|
||||||
|
:param since: Starting time for the query
|
||||||
|
"""
|
||||||
|
if self._config['dry_run']:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def fetch_orders_emulate() -> List[Dict]:
|
||||||
|
orders = []
|
||||||
|
if self.exchange_has('fetchClosedOrders'):
|
||||||
|
orders = self._api.fetch_closed_orders(pair, since=since_ms)
|
||||||
|
if self.exchange_has('fetchOpenOrders'):
|
||||||
|
orders_open = self._api.fetch_open_orders(pair, since=since_ms)
|
||||||
|
orders.extend(orders_open)
|
||||||
|
return orders
|
||||||
|
|
||||||
|
try:
|
||||||
|
since_ms = int((since.timestamp() - 10) * 1000)
|
||||||
|
if self.exchange_has('fetchOrders'):
|
||||||
|
try:
|
||||||
|
orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms)
|
||||||
|
except ccxt.NotSupported:
|
||||||
|
# Some exchanges don't support fetchOrders
|
||||||
|
# attempt to fetch open and closed orders separately
|
||||||
|
orders = fetch_orders_emulate()
|
||||||
|
else:
|
||||||
|
orders = fetch_orders_emulate()
|
||||||
|
self._log_exchange_response('fetch_orders', orders)
|
||||||
|
orders = [self._order_contracts_to_amount(o) for o in orders]
|
||||||
|
return orders
|
||||||
|
except ccxt.DDoSProtection as e:
|
||||||
|
raise DDosProtection(e) from e
|
||||||
|
except (ccxt.NetworkError, ccxt.ExchangeError) as e:
|
||||||
|
raise TemporaryError(
|
||||||
|
f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e
|
||||||
|
except ccxt.BaseError as e:
|
||||||
|
raise OperationalException(e) from e
|
||||||
|
|
||||||
@retrier
|
@retrier
|
||||||
def fetch_trading_fees(self) -> Dict[str, Any]:
|
def fetch_trading_fees(self) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
|
@ -1885,11 +1931,11 @@ class Exchange:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"one_call: %s msecs (%s)",
|
"one_call: %s msecs (%s)",
|
||||||
one_call,
|
one_call,
|
||||||
arrow.utcnow().shift(seconds=one_call // 1000).humanize(only_distance=True)
|
dt_humanize(dt_now() - timedelta(milliseconds=one_call), only_distance=True)
|
||||||
)
|
)
|
||||||
input_coroutines = [self._async_get_candle_history(
|
input_coroutines = [self._async_get_candle_history(
|
||||||
pair, timeframe, candle_type, since) for since in
|
pair, timeframe, candle_type, since) for since in
|
||||||
range(since_ms, until_ms or (arrow.utcnow().int_timestamp * 1000), one_call)]
|
range(since_ms, until_ms or dt_ts(), one_call)]
|
||||||
|
|
||||||
data: List = []
|
data: List = []
|
||||||
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
# Chunk requests into batches of 100 to avoid overwelming ccxt Throttling
|
||||||
|
@ -2072,7 +2118,7 @@ class Exchange:
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
# Fetch OHLCV asynchronously
|
# Fetch OHLCV asynchronously
|
||||||
s = '(' + arrow.get(since_ms // 1000).isoformat() + ') ' if since_ms is not None else ''
|
s = '(' + dt_from_ts(since_ms).isoformat() + ') ' if since_ms is not None else ''
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Fetching pair %s, %s, interval %s, since %s %s...",
|
"Fetching pair %s, %s, interval %s, since %s %s...",
|
||||||
pair, candle_type, timeframe, since_ms, s
|
pair, candle_type, timeframe, since_ms, s
|
||||||
|
@ -2162,7 +2208,7 @@ class Exchange:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"Fetching trades for pair %s, since %s %s...",
|
"Fetching trades for pair %s, since %s %s...",
|
||||||
pair, since,
|
pair, since,
|
||||||
'(' + arrow.get(since // 1000).isoformat() + ') ' if since is not None else ''
|
'(' + dt_from_ts(since).isoformat() + ') ' if since is not None else ''
|
||||||
)
|
)
|
||||||
trades = await self._api_async.fetch_trades(pair, since=since, limit=1000)
|
trades = await self._api_async.fetch_trades(pair, since=since, limit=1000)
|
||||||
trades = self._trades_contracts_to_amount(trades)
|
trades = self._trades_contracts_to_amount(trades)
|
||||||
|
@ -2896,8 +2942,8 @@ class Exchange:
|
||||||
if nominal_value >= tier['minNotional']:
|
if nominal_value >= tier['minNotional']:
|
||||||
return (tier['maintenanceMarginRate'], tier['maintAmt'])
|
return (tier['maintenanceMarginRate'], tier['maintAmt'])
|
||||||
|
|
||||||
raise OperationalException("nominal value can not be lower than 0")
|
raise ExchangeError("nominal value can not be lower than 0")
|
||||||
# The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it
|
# The lowest notional_floor for any pair in fetch_leverage_tiers is always 0 because it
|
||||||
# describes the min amt for a tier, and the lowest tier will always go down to 0
|
# describes the min amt for a tier, and the lowest tier will always go down to 0
|
||||||
else:
|
else:
|
||||||
raise OperationalException(f"Cannot get maintenance ratio using {self.name}")
|
raise ExchangeError(f"Cannot get maintenance ratio using {self.name}")
|
||||||
|
|
|
@ -11,6 +11,7 @@ from ccxt import (DECIMAL_PLACES, ROUND, ROUND_DOWN, ROUND_UP, SIGNIFICANT_DIGIT
|
||||||
|
|
||||||
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
|
from freqtrade.exchange.common import BAD_EXCHANGES, EXCHANGE_HAS_OPTIONAL, EXCHANGE_HAS_REQUIRED
|
||||||
from freqtrade.util import FtPrecise
|
from freqtrade.util import FtPrecise
|
||||||
|
from freqtrade.util.datetime_helpers import dt_from_ts, dt_ts
|
||||||
|
|
||||||
|
|
||||||
CcxtModuleType = Any
|
CcxtModuleType = Any
|
||||||
|
@ -99,9 +100,8 @@ def timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> d
|
||||||
if not date:
|
if not date:
|
||||||
date = datetime.now(timezone.utc)
|
date = datetime.now(timezone.utc)
|
||||||
|
|
||||||
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, date.timestamp() * 1000,
|
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_DOWN) // 1000
|
||||||
ROUND_DOWN) // 1000
|
return dt_from_ts(new_timestamp)
|
||||||
return datetime.fromtimestamp(new_timestamp, tz=timezone.utc)
|
|
||||||
|
|
||||||
|
|
||||||
def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
|
def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:
|
||||||
|
@ -113,9 +113,8 @@ def timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> d
|
||||||
"""
|
"""
|
||||||
if not date:
|
if not date:
|
||||||
date = datetime.now(timezone.utc)
|
date = datetime.now(timezone.utc)
|
||||||
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, date.timestamp() * 1000,
|
new_timestamp = ccxt.Exchange.round_timeframe(timeframe, dt_ts(date), ROUND_UP) // 1000
|
||||||
ROUND_UP) // 1000
|
return dt_from_ts(new_timestamp)
|
||||||
return datetime.fromtimestamp(new_timestamp, tz=timezone.utc)
|
|
||||||
|
|
||||||
|
|
||||||
def date_minus_candles(
|
def date_minus_candles(
|
||||||
|
|
|
@ -169,6 +169,22 @@ class Okx(Exchange):
|
||||||
params['posSide'] = self._get_posSide(side, True)
|
params['posSide'] = self._get_posSide(side, True)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
def _convert_stop_order(self, pair: str, order_id: str, order: Dict) -> Dict:
|
||||||
|
if (
|
||||||
|
order['status'] == 'closed'
|
||||||
|
and (real_order_id := order.get('info', {}).get('ordId')) is not None
|
||||||
|
):
|
||||||
|
# Once a order triggered, we fetch the regular followup order.
|
||||||
|
order_reg = self.fetch_order(real_order_id, pair)
|
||||||
|
self._log_exchange_response('fetch_stoploss_order1', order_reg)
|
||||||
|
order_reg['id_stop'] = order_reg['id']
|
||||||
|
order_reg['id'] = order_id
|
||||||
|
order_reg['type'] = 'stoploss'
|
||||||
|
order_reg['status_stop'] = 'triggered'
|
||||||
|
return order_reg
|
||||||
|
order['type'] = 'stoploss'
|
||||||
|
return order
|
||||||
|
|
||||||
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict:
|
||||||
if self._config['dry_run']:
|
if self._config['dry_run']:
|
||||||
return self.fetch_dry_run_order(order_id)
|
return self.fetch_dry_run_order(order_id)
|
||||||
|
@ -177,7 +193,7 @@ class Okx(Exchange):
|
||||||
params1 = {'stop': True}
|
params1 = {'stop': True}
|
||||||
order_reg = self._api.fetch_order(order_id, pair, params=params1)
|
order_reg = self._api.fetch_order(order_id, pair, params=params1)
|
||||||
self._log_exchange_response('fetch_stoploss_order', order_reg)
|
self._log_exchange_response('fetch_stoploss_order', order_reg)
|
||||||
return order_reg
|
return self._convert_stop_order(pair, order_id, order_reg)
|
||||||
except ccxt.OrderNotFound:
|
except ccxt.OrderNotFound:
|
||||||
pass
|
pass
|
||||||
params2 = {'stop': True, 'ordType': 'conditional'}
|
params2 = {'stop': True, 'ordType': 'conditional'}
|
||||||
|
@ -188,18 +204,7 @@ class Okx(Exchange):
|
||||||
orders_f = [order for order in orders if order['id'] == order_id]
|
orders_f = [order for order in orders if order['id'] == order_id]
|
||||||
if orders_f:
|
if orders_f:
|
||||||
order = orders_f[0]
|
order = orders_f[0]
|
||||||
if (order['status'] == 'closed'
|
return self._convert_stop_order(pair, order_id, order)
|
||||||
and (real_order_id := order.get('info', {}).get('ordId')) is not None):
|
|
||||||
# Once a order triggered, we fetch the regular followup order.
|
|
||||||
order_reg = self.fetch_order(real_order_id, pair)
|
|
||||||
self._log_exchange_response('fetch_stoploss_order1', order_reg)
|
|
||||||
order_reg['id_stop'] = order_reg['id']
|
|
||||||
order_reg['id'] = order_id
|
|
||||||
order_reg['type'] = 'stoploss'
|
|
||||||
order_reg['status_stop'] = 'triggered'
|
|
||||||
return order_reg
|
|
||||||
order['type'] = 'stoploss'
|
|
||||||
return order
|
|
||||||
except ccxt.BaseError:
|
except ccxt.BaseError:
|
||||||
pass
|
pass
|
||||||
raise RetryableOrderError(
|
raise RetryableOrderError(
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from gym import spaces
|
from gymnasium import spaces
|
||||||
|
|
||||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||||
|
|
||||||
|
@ -94,9 +94,12 @@ class Base3ActionRLEnv(BaseEnvironment):
|
||||||
|
|
||||||
observation = self._get_observation()
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
# user can play with time if they want
|
||||||
|
truncated = False
|
||||||
|
|
||||||
self._update_history(info)
|
self._update_history(info)
|
||||||
|
|
||||||
return observation, step_reward, self._done, info
|
return observation, step_reward, self._done, truncated, info
|
||||||
|
|
||||||
def is_tradesignal(self, action: int) -> bool:
|
def is_tradesignal(self, action: int) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from gym import spaces
|
from gymnasium import spaces
|
||||||
|
|
||||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||||
|
|
||||||
|
@ -96,9 +96,12 @@ class Base4ActionRLEnv(BaseEnvironment):
|
||||||
|
|
||||||
observation = self._get_observation()
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
# user can play with time if they want
|
||||||
|
truncated = False
|
||||||
|
|
||||||
self._update_history(info)
|
self._update_history(info)
|
||||||
|
|
||||||
return observation, step_reward, self._done, info
|
return observation, step_reward, self._done, truncated, info
|
||||||
|
|
||||||
def is_tradesignal(self, action: int) -> bool:
|
def is_tradesignal(self, action: int) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from gym import spaces
|
from gymnasium import spaces
|
||||||
|
|
||||||
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment, Positions
|
||||||
|
|
||||||
|
@ -101,10 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment):
|
||||||
)
|
)
|
||||||
|
|
||||||
observation = self._get_observation()
|
observation = self._get_observation()
|
||||||
|
# user can play with time if they want
|
||||||
|
truncated = False
|
||||||
|
|
||||||
self._update_history(info)
|
self._update_history(info)
|
||||||
|
|
||||||
return observation, step_reward, self._done, info
|
return observation, step_reward, self._done, truncated, info
|
||||||
|
|
||||||
def is_tradesignal(self, action: int) -> bool:
|
def is_tradesignal(self, action: int) -> bool:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -4,11 +4,11 @@ from abc import abstractmethod
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Type, Union
|
from typing import Optional, Type, Union
|
||||||
|
|
||||||
import gym
|
import gymnasium as gym
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
from gym import spaces
|
from gymnasium import spaces
|
||||||
from gym.utils import seeding
|
from gymnasium.utils import seeding
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
|
||||||
|
@ -127,6 +127,14 @@ class BaseEnvironment(gym.Env):
|
||||||
self.history: dict = {}
|
self.history: dict = {}
|
||||||
self.trade_history: list = []
|
self.trade_history: list = []
|
||||||
|
|
||||||
|
def get_attr(self, attr: str):
|
||||||
|
"""
|
||||||
|
Returns the attribute of the environment
|
||||||
|
:param attr: attribute to return
|
||||||
|
:return: attribute
|
||||||
|
"""
|
||||||
|
return getattr(self, attr)
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def set_action_space(self):
|
def set_action_space(self):
|
||||||
"""
|
"""
|
||||||
|
@ -172,7 +180,7 @@ class BaseEnvironment(gym.Env):
|
||||||
def reset_tensorboard_log(self):
|
def reset_tensorboard_log(self):
|
||||||
self.tensorboard_metrics = {}
|
self.tensorboard_metrics = {}
|
||||||
|
|
||||||
def reset(self):
|
def reset(self, seed=None):
|
||||||
"""
|
"""
|
||||||
Reset is called at the beginning of every episode
|
Reset is called at the beginning of every episode
|
||||||
"""
|
"""
|
||||||
|
@ -203,7 +211,7 @@ class BaseEnvironment(gym.Env):
|
||||||
self.close_trade_profit = []
|
self.close_trade_profit = []
|
||||||
self._total_unrealized_profit = 1
|
self._total_unrealized_profit = 1
|
||||||
|
|
||||||
return self._get_observation()
|
return self._get_observation(), self.history
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def step(self, action: int):
|
def step(self, action: int):
|
||||||
|
@ -298,6 +306,12 @@ class BaseEnvironment(gym.Env):
|
||||||
"""
|
"""
|
||||||
An example reward function. This is the one function that users will likely
|
An example reward function. This is the one function that users will likely
|
||||||
wish to inject their own creativity into.
|
wish to inject their own creativity into.
|
||||||
|
|
||||||
|
Warning!
|
||||||
|
This is function is a showcase of functionality designed to show as many possible
|
||||||
|
environment control features as possible. It is also designed to run quickly
|
||||||
|
on small computers. This is a benchmark, it is *not* for live production.
|
||||||
|
|
||||||
:param action: int = The action made by the agent for the current candle.
|
:param action: int = The action made by the agent for the current candle.
|
||||||
:return:
|
:return:
|
||||||
float = the reward to give to the agent for current step (used for optimization
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
|
|
@ -6,7 +6,7 @@ from datetime import datetime, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
|
||||||
|
|
||||||
import gym
|
import gymnasium as gym
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import numpy.typing as npt
|
import numpy.typing as npt
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
@ -16,14 +16,14 @@ from pandas import DataFrame
|
||||||
from stable_baselines3.common.callbacks import EvalCallback
|
from stable_baselines3.common.callbacks import EvalCallback
|
||||||
from stable_baselines3.common.monitor import Monitor
|
from stable_baselines3.common.monitor import Monitor
|
||||||
from stable_baselines3.common.utils import set_random_seed
|
from stable_baselines3.common.utils import set_random_seed
|
||||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
|
||||||
|
|
||||||
from freqtrade.exceptions import OperationalException
|
from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
from freqtrade.freqai.freqai_interface import IFreqaiModel
|
||||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
|
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv
|
||||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions
|
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment, Positions
|
||||||
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
|
from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
|
||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
|
|
||||||
|
|
||||||
|
@ -46,8 +46,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
|
'cpu_count', 1), max(int(self.max_system_threads / 2), 1))
|
||||||
th.set_num_threads(self.max_threads)
|
th.set_num_threads(self.max_threads)
|
||||||
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
|
self.reward_params = self.freqai_info['rl_config']['model_reward_parameters']
|
||||||
self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
self.train_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
|
||||||
self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env()
|
self.eval_env: Union[VecMonitor, SubprocVecEnv, gym.Env] = gym.Env()
|
||||||
self.eval_callback: Optional[EvalCallback] = None
|
self.eval_callback: Optional[EvalCallback] = None
|
||||||
self.model_type = self.freqai_info['rl_config']['model_type']
|
self.model_type = self.freqai_info['rl_config']['model_type']
|
||||||
self.rl_config = self.freqai_info['rl_config']
|
self.rl_config = self.freqai_info['rl_config']
|
||||||
|
@ -371,6 +371,12 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
"""
|
"""
|
||||||
An example reward function. This is the one function that users will likely
|
An example reward function. This is the one function that users will likely
|
||||||
wish to inject their own creativity into.
|
wish to inject their own creativity into.
|
||||||
|
|
||||||
|
Warning!
|
||||||
|
This is function is a showcase of functionality designed to show as many possible
|
||||||
|
environment control features as possible. It is also designed to run quickly
|
||||||
|
on small computers. This is a benchmark, it is *not* for live production.
|
||||||
|
|
||||||
:param action: int = The action made by the agent for the current candle.
|
:param action: int = The action made by the agent for the current candle.
|
||||||
:return:
|
:return:
|
||||||
float = the reward to give to the agent for current step (used for optimization
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
@ -431,9 +437,8 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
return 0.
|
return 0.
|
||||||
|
|
||||||
|
|
||||||
def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
def make_env(MyRLEnv: Type[BaseEnvironment], env_id: str, rank: int,
|
||||||
seed: int, train_df: DataFrame, price: DataFrame,
|
seed: int, train_df: DataFrame, price: DataFrame,
|
||||||
monitor: bool = False,
|
|
||||||
env_info: Dict[str, Any] = {}) -> Callable:
|
env_info: Dict[str, Any] = {}) -> Callable:
|
||||||
"""
|
"""
|
||||||
Utility function for multiprocessed env.
|
Utility function for multiprocessed env.
|
||||||
|
@ -450,8 +455,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int,
|
||||||
|
|
||||||
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
|
env = MyRLEnv(df=train_df, prices=price, id=env_id, seed=seed + rank,
|
||||||
**env_info)
|
**env_info)
|
||||||
if monitor:
|
|
||||||
env = Monitor(env)
|
|
||||||
return env
|
return env
|
||||||
set_random_seed(seed)
|
set_random_seed(seed)
|
||||||
return _init
|
return _init
|
||||||
|
|
|
@ -45,6 +45,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
||||||
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
) -> Tuple[DataFrame, npt.NDArray[np.int_]]:
|
||||||
"""
|
"""
|
||||||
Filter the prediction features data and predict with it.
|
Filter the prediction features data and predict with it.
|
||||||
|
:param dk: dk: The datakitchen object
|
||||||
:param unfiltered_df: Full dataframe for the current backtest period.
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||||
:return:
|
:return:
|
||||||
:pred_df: dataframe containing the predictions
|
:pred_df: dataframe containing the predictions
|
||||||
|
@ -74,11 +75,14 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
||||||
dk.data_dictionary["prediction_features"],
|
dk.data_dictionary["prediction_features"],
|
||||||
device=self.device
|
device=self.device
|
||||||
)
|
)
|
||||||
|
self.model.model.eval()
|
||||||
logits = self.model.model(x)
|
logits = self.model.model(x)
|
||||||
probs = F.softmax(logits, dim=-1)
|
probs = F.softmax(logits, dim=-1)
|
||||||
predicted_classes = torch.argmax(probs, dim=-1)
|
predicted_classes = torch.argmax(probs, dim=-1)
|
||||||
predicted_classes_str = self.decode_class_names(predicted_classes)
|
predicted_classes_str = self.decode_class_names(predicted_classes)
|
||||||
pred_df_prob = DataFrame(probs.detach().numpy(), columns=class_names)
|
# used .tolist to convert probs into an iterable, in this way Tensors
|
||||||
|
# are automatically moved to the CPU first if necessary.
|
||||||
|
pred_df_prob = DataFrame(probs.detach().tolist(), columns=class_names)
|
||||||
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
|
pred_df = DataFrame(predicted_classes_str, columns=[dk.label_list[0]])
|
||||||
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
|
||||||
return (pred_df, dk.do_predict)
|
return (pred_df, dk.do_predict)
|
||||||
|
|
|
@ -27,6 +27,7 @@ class BasePyTorchModel(IFreqaiModel, ABC):
|
||||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
|
test_size = self.freqai_info.get('data_split_parameters', {}).get('test_size')
|
||||||
self.splits = ["train", "test"] if test_size != 0 else ["train"]
|
self.splits = ["train", "test"] if test_size != 0 else ["train"]
|
||||||
|
self.window_size = self.freqai_info.get("conv_width", 1)
|
||||||
|
|
||||||
def train(
|
def train(
|
||||||
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
self, unfiltered_df: DataFrame, pair: str, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
|
|
@ -44,7 +44,8 @@ class BasePyTorchRegressor(BasePyTorchModel):
|
||||||
dk.data_dictionary["prediction_features"],
|
dk.data_dictionary["prediction_features"],
|
||||||
device=self.device
|
device=self.device
|
||||||
)
|
)
|
||||||
|
self.model.model.eval()
|
||||||
y = self.model.model(x)
|
y = self.model.model(x)
|
||||||
y = y.cpu()
|
pred_df = DataFrame(y.detach().tolist(), columns=[dk.label_list[0]])
|
||||||
pred_df = DataFrame(y.detach().numpy(), columns=[dk.label_list[0]])
|
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||||
return (pred_df, dk.do_predict)
|
return (pred_df, dk.do_predict)
|
||||||
|
|
|
@ -21,7 +21,7 @@ from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange import timeframe_to_seconds
|
from freqtrade.exchange import timeframe_to_seconds
|
||||||
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
from freqtrade.freqai.utils import plot_feature_importance, record_params
|
from freqtrade.freqai.utils import get_tb_logger, plot_feature_importance, record_params
|
||||||
from freqtrade.strategy.interface import IStrategy
|
from freqtrade.strategy.interface import IStrategy
|
||||||
|
|
||||||
|
|
||||||
|
@ -80,6 +80,7 @@ class IFreqaiModel(ABC):
|
||||||
if self.keras and self.ft_params.get("DI_threshold", 0):
|
if self.keras and self.ft_params.get("DI_threshold", 0):
|
||||||
self.ft_params["DI_threshold"] = 0
|
self.ft_params["DI_threshold"] = 0
|
||||||
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
logger.warning("DI threshold is not configured for Keras models yet. Deactivating.")
|
||||||
|
|
||||||
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
self.CONV_WIDTH = self.freqai_info.get('conv_width', 1)
|
||||||
if self.ft_params.get("inlier_metric_window", 0):
|
if self.ft_params.get("inlier_metric_window", 0):
|
||||||
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
self.CONV_WIDTH = self.ft_params.get("inlier_metric_window", 0) * 2
|
||||||
|
@ -109,6 +110,7 @@ class IFreqaiModel(ABC):
|
||||||
if self.ft_params.get('principal_component_analysis', False) and self.continual_learning:
|
if self.ft_params.get('principal_component_analysis', False) and self.continual_learning:
|
||||||
self.ft_params.update({'principal_component_analysis': False})
|
self.ft_params.update({'principal_component_analysis': False})
|
||||||
logger.warning('User tried to use PCA with continual learning. Deactivating PCA.')
|
logger.warning('User tried to use PCA with continual learning. Deactivating PCA.')
|
||||||
|
self.activate_tensorboard: bool = self.freqai_info.get('activate_tensorboard', True)
|
||||||
|
|
||||||
record_params(config, self.full_path)
|
record_params(config, self.full_path)
|
||||||
|
|
||||||
|
@ -242,8 +244,8 @@ class IFreqaiModel(ABC):
|
||||||
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
new_trained_timerange, pair, strategy, dk, data_load_timerange
|
||||||
)
|
)
|
||||||
except Exception as msg:
|
except Exception as msg:
|
||||||
logger.warning(f"Training {pair} raised exception {msg.__class__.__name__}. "
|
logger.exception(f"Training {pair} raised exception {msg.__class__.__name__}. "
|
||||||
f"Message: {msg}, skipping.")
|
f"Message: {msg}, skipping.")
|
||||||
|
|
||||||
self.train_timer('stop', pair)
|
self.train_timer('stop', pair)
|
||||||
|
|
||||||
|
@ -306,10 +308,11 @@ class IFreqaiModel(ABC):
|
||||||
if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
|
if dk.check_if_backtest_prediction_is_valid(len_backtest_df):
|
||||||
if check_features:
|
if check_features:
|
||||||
self.dd.load_metadata(dk)
|
self.dd.load_metadata(dk)
|
||||||
dataframe_dummy_features = self.dk.use_strategy_to_populate_indicators(
|
df_fts = self.dk.use_strategy_to_populate_indicators(
|
||||||
strategy, prediction_dataframe=dataframe.tail(1), pair=pair
|
strategy, prediction_dataframe=dataframe.tail(1), pair=pair
|
||||||
)
|
)
|
||||||
dk.find_features(dataframe_dummy_features)
|
df_fts = dk.remove_special_chars_from_feature_names(df_fts)
|
||||||
|
dk.find_features(df_fts)
|
||||||
self.check_if_feature_list_matches_strategy(dk)
|
self.check_if_feature_list_matches_strategy(dk)
|
||||||
check_features = False
|
check_features = False
|
||||||
append_df = dk.get_backtesting_prediction()
|
append_df = dk.get_backtesting_prediction()
|
||||||
|
@ -342,7 +345,10 @@ class IFreqaiModel(ABC):
|
||||||
dk.find_labels(dataframe_train)
|
dk.find_labels(dataframe_train)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
|
||||||
|
self.activate_tensorboard)
|
||||||
self.model = self.train(dataframe_train, pair, dk)
|
self.model = self.train(dataframe_train, pair, dk)
|
||||||
|
self.tb_logger.close()
|
||||||
except Exception as msg:
|
except Exception as msg:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Training {pair} raised exception {msg.__class__.__name__}. "
|
f"Training {pair} raised exception {msg.__class__.__name__}. "
|
||||||
|
@ -489,9 +495,9 @@ class IFreqaiModel(ABC):
|
||||||
if dk.training_features_list != feature_list:
|
if dk.training_features_list != feature_list:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
"Trying to access pretrained model with `identifier` "
|
"Trying to access pretrained model with `identifier` "
|
||||||
"but found different features furnished by current strategy."
|
"but found different features furnished by current strategy. "
|
||||||
"Change `identifier` to train from scratch, or ensure the"
|
"Change `identifier` to train from scratch, or ensure the "
|
||||||
"strategy is furnishing the same features as the pretrained"
|
"strategy is furnishing the same features as the pretrained "
|
||||||
"model. In case of --strategy-list, please be aware that FreqAI "
|
"model. In case of --strategy-list, please be aware that FreqAI "
|
||||||
"requires all strategies to maintain identical "
|
"requires all strategies to maintain identical "
|
||||||
"feature_engineering_* functions"
|
"feature_engineering_* functions"
|
||||||
|
@ -620,18 +626,23 @@ class IFreqaiModel(ABC):
|
||||||
strategy, corr_dataframes, base_dataframes, pair
|
strategy, corr_dataframes, base_dataframes, pair
|
||||||
)
|
)
|
||||||
|
|
||||||
new_trained_timerange = dk.buffer_timerange(new_trained_timerange)
|
trained_timestamp = new_trained_timerange.stopts
|
||||||
|
|
||||||
unfiltered_dataframe = dk.slice_dataframe(new_trained_timerange, unfiltered_dataframe)
|
buffered_timerange = dk.buffer_timerange(new_trained_timerange)
|
||||||
|
|
||||||
|
unfiltered_dataframe = dk.slice_dataframe(buffered_timerange, unfiltered_dataframe)
|
||||||
|
|
||||||
# find the features indicated by strategy and store in datakitchen
|
# find the features indicated by strategy and store in datakitchen
|
||||||
dk.find_features(unfiltered_dataframe)
|
dk.find_features(unfiltered_dataframe)
|
||||||
dk.find_labels(unfiltered_dataframe)
|
dk.find_labels(unfiltered_dataframe)
|
||||||
|
|
||||||
|
self.tb_logger = get_tb_logger(self.dd.model_type, dk.data_path,
|
||||||
|
self.activate_tensorboard)
|
||||||
model = self.train(unfiltered_dataframe, pair, dk)
|
model = self.train(unfiltered_dataframe, pair, dk)
|
||||||
|
self.tb_logger.close()
|
||||||
|
|
||||||
self.dd.pair_dict[pair]["trained_timestamp"] = new_trained_timerange.stopts
|
self.dd.pair_dict[pair]["trained_timestamp"] = trained_timestamp
|
||||||
dk.set_new_model_names(pair, new_trained_timerange.stopts)
|
dk.set_new_model_names(pair, trained_timestamp)
|
||||||
self.dd.save_data(model, pair, dk)
|
self.dd.save_data(model, pair, dk)
|
||||||
|
|
||||||
if self.plot_features:
|
if self.plot_features:
|
||||||
|
|
|
@ -74,16 +74,18 @@ class PyTorchMLPClassifier(BasePyTorchClassifier):
|
||||||
model.to(self.device)
|
model.to(self.device)
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
criterion = torch.nn.CrossEntropyLoss()
|
criterion = torch.nn.CrossEntropyLoss()
|
||||||
init_model = self.get_init_model(dk.pair)
|
# check if continual_learning is activated, and retreive the model to continue training
|
||||||
trainer = PyTorchModelTrainer(
|
trainer = self.get_init_model(dk.pair)
|
||||||
model=model,
|
if trainer is None:
|
||||||
optimizer=optimizer,
|
trainer = PyTorchModelTrainer(
|
||||||
criterion=criterion,
|
model=model,
|
||||||
model_meta_data={"class_names": class_names},
|
optimizer=optimizer,
|
||||||
device=self.device,
|
criterion=criterion,
|
||||||
init_model=init_model,
|
model_meta_data={"class_names": class_names},
|
||||||
data_convertor=self.data_convertor,
|
device=self.device,
|
||||||
**self.trainer_kwargs,
|
data_convertor=self.data_convertor,
|
||||||
)
|
tb_logger=self.tb_logger,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
trainer.fit(data_dictionary, self.splits)
|
trainer.fit(data_dictionary, self.splits)
|
||||||
return trainer
|
return trainer
|
||||||
|
|
|
@ -69,15 +69,17 @@ class PyTorchMLPRegressor(BasePyTorchRegressor):
|
||||||
model.to(self.device)
|
model.to(self.device)
|
||||||
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
criterion = torch.nn.MSELoss()
|
criterion = torch.nn.MSELoss()
|
||||||
init_model = self.get_init_model(dk.pair)
|
# check if continual_learning is activated, and retreive the model to continue training
|
||||||
trainer = PyTorchModelTrainer(
|
trainer = self.get_init_model(dk.pair)
|
||||||
model=model,
|
if trainer is None:
|
||||||
optimizer=optimizer,
|
trainer = PyTorchModelTrainer(
|
||||||
criterion=criterion,
|
model=model,
|
||||||
device=self.device,
|
optimizer=optimizer,
|
||||||
init_model=init_model,
|
criterion=criterion,
|
||||||
data_convertor=self.data_convertor,
|
device=self.device,
|
||||||
**self.trainer_kwargs,
|
data_convertor=self.data_convertor,
|
||||||
)
|
tb_logger=self.tb_logger,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
trainer.fit(data_dictionary, self.splits)
|
trainer.fit(data_dictionary, self.splits)
|
||||||
return trainer
|
return trainer
|
||||||
|
|
|
@ -0,0 +1,140 @@
|
||||||
|
from typing import Any, Dict, Tuple
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from freqtrade.freqai.base_models.BasePyTorchRegressor import BasePyTorchRegressor
|
||||||
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.torch.PyTorchDataConvertor import (DefaultPyTorchDataConvertor,
|
||||||
|
PyTorchDataConvertor)
|
||||||
|
from freqtrade.freqai.torch.PyTorchModelTrainer import PyTorchTransformerTrainer
|
||||||
|
from freqtrade.freqai.torch.PyTorchTransformerModel import PyTorchTransformerModel
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchTransformerRegressor(BasePyTorchRegressor):
|
||||||
|
"""
|
||||||
|
This class implements the fit method of IFreqaiModel.
|
||||||
|
in the fit method we initialize the model and trainer objects.
|
||||||
|
the only requirement from the model is to be aligned to PyTorchRegressor
|
||||||
|
predict method that expects the model to predict tensor of type float.
|
||||||
|
the trainer defines the training loop.
|
||||||
|
|
||||||
|
parameters are passed via `model_training_parameters` under the freqai
|
||||||
|
section in the config file. e.g:
|
||||||
|
{
|
||||||
|
...
|
||||||
|
"freqai": {
|
||||||
|
...
|
||||||
|
"model_training_parameters" : {
|
||||||
|
"learning_rate": 3e-4,
|
||||||
|
"trainer_kwargs": {
|
||||||
|
"max_iters": 5000,
|
||||||
|
"batch_size": 64,
|
||||||
|
"max_n_eval_batches": null
|
||||||
|
},
|
||||||
|
"model_kwargs": {
|
||||||
|
"hidden_dim": 512,
|
||||||
|
"dropout_percent": 0.2,
|
||||||
|
"n_layer": 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data_convertor(self) -> PyTorchDataConvertor:
|
||||||
|
return DefaultPyTorchDataConvertor(target_tensor_type=torch.float)
|
||||||
|
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
config = self.freqai_info.get("model_training_parameters", {})
|
||||||
|
self.learning_rate: float = config.get("learning_rate", 3e-4)
|
||||||
|
self.model_kwargs: Dict[str, Any] = config.get("model_kwargs", {})
|
||||||
|
self.trainer_kwargs: Dict[str, Any] = config.get("trainer_kwargs", {})
|
||||||
|
|
||||||
|
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
|
||||||
|
"""
|
||||||
|
User sets up the training and test data to fit their desired model here
|
||||||
|
:param data_dictionary: the dictionary holding all data for train, test,
|
||||||
|
labels, weights
|
||||||
|
:param dk: The datakitchen object for the current coin/model
|
||||||
|
"""
|
||||||
|
|
||||||
|
n_features = data_dictionary["train_features"].shape[-1]
|
||||||
|
n_labels = data_dictionary["train_labels"].shape[-1]
|
||||||
|
model = PyTorchTransformerModel(
|
||||||
|
input_dim=n_features,
|
||||||
|
output_dim=n_labels,
|
||||||
|
time_window=self.window_size,
|
||||||
|
**self.model_kwargs
|
||||||
|
)
|
||||||
|
model.to(self.device)
|
||||||
|
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
|
||||||
|
criterion = torch.nn.MSELoss()
|
||||||
|
# check if continual_learning is activated, and retreive the model to continue training
|
||||||
|
trainer = self.get_init_model(dk.pair)
|
||||||
|
if trainer is None:
|
||||||
|
trainer = PyTorchTransformerTrainer(
|
||||||
|
model=model,
|
||||||
|
optimizer=optimizer,
|
||||||
|
criterion=criterion,
|
||||||
|
device=self.device,
|
||||||
|
data_convertor=self.data_convertor,
|
||||||
|
window_size=self.window_size,
|
||||||
|
tb_logger=self.tb_logger,
|
||||||
|
**self.trainer_kwargs,
|
||||||
|
)
|
||||||
|
trainer.fit(data_dictionary, self.splits)
|
||||||
|
return trainer
|
||||||
|
|
||||||
|
def predict(
|
||||||
|
self, unfiltered_df: pd.DataFrame, dk: FreqaiDataKitchen, **kwargs
|
||||||
|
) -> Tuple[pd.DataFrame, npt.NDArray[np.int_]]:
|
||||||
|
"""
|
||||||
|
Filter the prediction features data and predict with it.
|
||||||
|
:param unfiltered_df: Full dataframe for the current backtest period.
|
||||||
|
:return:
|
||||||
|
:pred_df: dataframe containing the predictions
|
||||||
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
||||||
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
||||||
|
"""
|
||||||
|
|
||||||
|
dk.find_features(unfiltered_df)
|
||||||
|
filtered_df, _ = dk.filter_features(
|
||||||
|
unfiltered_df, dk.training_features_list, training_filter=False
|
||||||
|
)
|
||||||
|
filtered_df = dk.normalize_data_from_metadata(filtered_df)
|
||||||
|
dk.data_dictionary["prediction_features"] = filtered_df
|
||||||
|
|
||||||
|
self.data_cleaning_predict(dk)
|
||||||
|
x = self.data_convertor.convert_x(
|
||||||
|
dk.data_dictionary["prediction_features"],
|
||||||
|
device=self.device
|
||||||
|
)
|
||||||
|
# if user is asking for multiple predictions, slide the window
|
||||||
|
# along the tensor
|
||||||
|
x = x.unsqueeze(0)
|
||||||
|
# create empty torch tensor
|
||||||
|
self.model.model.eval()
|
||||||
|
yb = torch.empty(0).to(self.device)
|
||||||
|
if x.shape[1] > 1:
|
||||||
|
ws = self.window_size
|
||||||
|
for i in range(0, x.shape[1] - ws):
|
||||||
|
xb = x[:, i:i + ws, :].to(self.device)
|
||||||
|
y = self.model.model(xb)
|
||||||
|
yb = torch.cat((yb, y), dim=0)
|
||||||
|
else:
|
||||||
|
yb = self.model.model(x)
|
||||||
|
|
||||||
|
yb = yb.cpu().squeeze()
|
||||||
|
pred_df = pd.DataFrame(yb.detach().numpy(), columns=dk.label_list)
|
||||||
|
pred_df = dk.denormalize_labels_from_metadata(pred_df)
|
||||||
|
|
||||||
|
if x.shape[1] > 1:
|
||||||
|
zeros_df = pd.DataFrame(np.zeros((x.shape[1] - len(pred_df), len(pred_df.columns))),
|
||||||
|
columns=pred_df.columns)
|
||||||
|
pred_df = pd.concat([zeros_df, pred_df], axis=0, ignore_index=True)
|
||||||
|
return (pred_df, dk.do_predict)
|
|
@ -1,11 +1,12 @@
|
||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict, Type
|
||||||
|
|
||||||
import torch as th
|
import torch as th
|
||||||
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv, Positions
|
||||||
|
from freqtrade.freqai.RL.BaseEnvironment import BaseEnvironment
|
||||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
from freqtrade.freqai.RL.BaseReinforcementLearningModel import BaseReinforcementLearningModel
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,10 +58,14 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||||
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
policy_kwargs = dict(activation_fn=th.nn.ReLU,
|
||||||
net_arch=self.net_arch)
|
net_arch=self.net_arch)
|
||||||
|
|
||||||
|
if self.activate_tensorboard:
|
||||||
|
tb_path = Path(dk.full_path / "tensorboard" / dk.pair.split('/')[0])
|
||||||
|
else:
|
||||||
|
tb_path = None
|
||||||
|
|
||||||
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
if dk.pair not in self.dd.model_dictionary or not self.continual_learning:
|
||||||
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
model = self.MODELCLASS(self.policy_type, self.train_env, policy_kwargs=policy_kwargs,
|
||||||
tensorboard_log=Path(
|
tensorboard_log=tb_path,
|
||||||
dk.full_path / "tensorboard" / dk.pair.split('/')[0]),
|
|
||||||
**self.freqai_info.get('model_training_parameters', {})
|
**self.freqai_info.get('model_training_parameters', {})
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
@ -84,7 +89,9 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
class MyRLEnv(Base5ActionRLEnv):
|
MyRLEnv: Type[BaseEnvironment]
|
||||||
|
|
||||||
|
class MyRLEnv(Base5ActionRLEnv): # type: ignore[no-redef]
|
||||||
"""
|
"""
|
||||||
User can override any function in BaseRLEnv and gym.Env. Here the user
|
User can override any function in BaseRLEnv and gym.Env. Here the user
|
||||||
sets a custom reward based on profit and trade duration.
|
sets a custom reward based on profit and trade duration.
|
||||||
|
@ -94,6 +101,12 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||||
"""
|
"""
|
||||||
An example reward function. This is the one function that users will likely
|
An example reward function. This is the one function that users will likely
|
||||||
wish to inject their own creativity into.
|
wish to inject their own creativity into.
|
||||||
|
|
||||||
|
Warning!
|
||||||
|
This is function is a showcase of functionality designed to show as many possible
|
||||||
|
environment control features as possible. It is also designed to run quickly
|
||||||
|
on small computers. This is a benchmark, it is *not* for live production.
|
||||||
|
|
||||||
:param action: int = The action made by the agent for the current candle.
|
:param action: int = The action made by the agent for the current candle.
|
||||||
:return:
|
:return:
|
||||||
float = the reward to give to the agent for current step (used for optimization
|
float = the reward to give to the agent for current step (used for optimization
|
||||||
|
|
|
@ -3,12 +3,12 @@ from typing import Any, Dict
|
||||||
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
from stable_baselines3.common.callbacks import EvalCallback
|
from stable_baselines3.common.callbacks import EvalCallback
|
||||||
from stable_baselines3.common.vec_env import SubprocVecEnv
|
from stable_baselines3.common.vec_env import SubprocVecEnv, VecMonitor
|
||||||
|
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner
|
||||||
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
|
from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env
|
||||||
from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback
|
from freqtrade.freqai.tensorboard.TensorboardCallback import TensorboardCallback
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -41,22 +41,25 @@ class ReinforcementLearner_multiproc(ReinforcementLearner):
|
||||||
|
|
||||||
env_info = self.pack_env_dict(dk.pair)
|
env_info = self.pack_env_dict(dk.pair)
|
||||||
|
|
||||||
|
eval_freq = len(train_df) // self.max_threads
|
||||||
|
|
||||||
env_id = "train_env"
|
env_id = "train_env"
|
||||||
self.train_env = SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
|
self.train_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, env_id, i, 1,
|
||||||
train_df, prices_train,
|
train_df, prices_train,
|
||||||
monitor=True,
|
env_info=env_info) for i
|
||||||
env_info=env_info) for i
|
in range(self.max_threads)]))
|
||||||
in range(self.max_threads)])
|
|
||||||
|
|
||||||
eval_env_id = 'eval_env'
|
eval_env_id = 'eval_env'
|
||||||
self.eval_env = SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
self.eval_env = VecMonitor(SubprocVecEnv([make_env(self.MyRLEnv, eval_env_id, i, 1,
|
||||||
test_df, prices_test,
|
test_df, prices_test,
|
||||||
monitor=True,
|
env_info=env_info) for i
|
||||||
env_info=env_info) for i
|
in range(self.max_threads)]))
|
||||||
in range(self.max_threads)])
|
|
||||||
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
self.eval_callback = EvalCallback(self.eval_env, deterministic=True,
|
||||||
render=False, eval_freq=len(train_df),
|
render=False, eval_freq=eval_freq,
|
||||||
best_model_save_path=str(dk.data_path))
|
best_model_save_path=str(dk.data_path))
|
||||||
|
|
||||||
|
# TENSORBOARD CALLBACK DOES NOT RECOMMENDED TO USE WITH MULTIPLE ENVS,
|
||||||
|
# IT WILL RETURN FALSE INFORMATIONS, NEVERTHLESS NOT THREAD SAFE WITH SB3!!!
|
||||||
actions = self.train_env.env_method("get_actions")[0]
|
actions = self.train_env.env_method("get_actions")[0]
|
||||||
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
|
self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions)
|
||||||
|
|
|
@ -5,6 +5,7 @@ from xgboost import XGBRegressor
|
||||||
|
|
||||||
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
from freqtrade.freqai.base_models.BaseRegressionModel import BaseRegressionModel
|
||||||
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
||||||
|
from freqtrade.freqai.tensorboard import TBCallback
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -44,7 +45,10 @@ class XGBoostRegressor(BaseRegressionModel):
|
||||||
|
|
||||||
model = XGBRegressor(**self.model_training_parameters)
|
model = XGBRegressor(**self.model_training_parameters)
|
||||||
|
|
||||||
|
model.set_params(callbacks=[TBCallback(dk.data_path)], activate=self.activate_tensorboard)
|
||||||
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
|
model.fit(X=X, y=y, sample_weight=sample_weight, eval_set=eval_set,
|
||||||
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
|
sample_weight_eval_set=eval_weights, xgb_model=xgb_model)
|
||||||
|
# set the callbacks to empty so that we can serialize to disk later
|
||||||
|
model.set_params(callbacks=[])
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
|
@ -3,8 +3,9 @@ from typing import Any, Dict, Type, Union
|
||||||
|
|
||||||
from stable_baselines3.common.callbacks import BaseCallback
|
from stable_baselines3.common.callbacks import BaseCallback
|
||||||
from stable_baselines3.common.logger import HParam
|
from stable_baselines3.common.logger import HParam
|
||||||
|
from stable_baselines3.common.vec_env import VecEnv
|
||||||
|
|
||||||
from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment
|
from freqtrade.freqai.RL.BaseEnvironment import BaseActions
|
||||||
|
|
||||||
|
|
||||||
class TensorboardCallback(BaseCallback):
|
class TensorboardCallback(BaseCallback):
|
||||||
|
@ -12,11 +13,13 @@ class TensorboardCallback(BaseCallback):
|
||||||
Custom callback for plotting additional values in tensorboard and
|
Custom callback for plotting additional values in tensorboard and
|
||||||
episodic summary reports.
|
episodic summary reports.
|
||||||
"""
|
"""
|
||||||
|
# Override training_env type to fix type errors
|
||||||
|
training_env: Union[VecEnv, None] = None
|
||||||
|
|
||||||
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
def __init__(self, verbose=1, actions: Type[Enum] = BaseActions):
|
||||||
super().__init__(verbose)
|
super().__init__(verbose)
|
||||||
self.model: Any = None
|
self.model: Any = None
|
||||||
self.logger = None # type: Any
|
self.logger: Any = None
|
||||||
self.training_env: BaseEnvironment = None # type: ignore
|
|
||||||
self.actions: Type[Enum] = actions
|
self.actions: Type[Enum] = actions
|
||||||
|
|
||||||
def _on_training_start(self) -> None:
|
def _on_training_start(self) -> None:
|
||||||
|
@ -44,6 +47,8 @@ class TensorboardCallback(BaseCallback):
|
||||||
def _on_step(self) -> bool:
|
def _on_step(self) -> bool:
|
||||||
|
|
||||||
local_info = self.locals["infos"][0]
|
local_info = self.locals["infos"][0]
|
||||||
|
if self.training_env is None:
|
||||||
|
return True
|
||||||
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
tensorboard_metrics = self.training_env.get_attr("tensorboard_metrics")[0]
|
||||||
|
|
||||||
for metric in local_info:
|
for metric in local_info:
|
15
freqtrade/freqai/tensorboard/__init__.py
Normal file
15
freqtrade/freqai/tensorboard/__init__.py
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# ensure users can still use a non-torch freqai version
|
||||||
|
try:
|
||||||
|
from freqtrade.freqai.tensorboard.tensorboard import TensorBoardCallback, TensorboardLogger
|
||||||
|
TBLogger = TensorboardLogger
|
||||||
|
TBCallback = TensorBoardCallback
|
||||||
|
except ModuleNotFoundError:
|
||||||
|
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
|
||||||
|
BaseTensorboardLogger)
|
||||||
|
TBLogger = BaseTensorboardLogger # type: ignore
|
||||||
|
TBCallback = BaseTensorBoardCallback # type: ignore
|
||||||
|
|
||||||
|
__all__ = (
|
||||||
|
"TBLogger",
|
||||||
|
"TBCallback"
|
||||||
|
)
|
33
freqtrade/freqai/tensorboard/base_tensorboard.py
Normal file
33
freqtrade/freqai/tensorboard/base_tensorboard.py
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from xgboost.callback import TrainingCallback
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTensorboardLogger:
|
||||||
|
def __init__(self, logdir: Path, activate: bool = True):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def log_scalar(self, tag: str, scalar_value: Any, step: int):
|
||||||
|
return
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class BaseTensorBoardCallback(TrainingCallback):
|
||||||
|
|
||||||
|
def __init__(self, logdir: Path, activate: bool = True):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def after_iteration(
|
||||||
|
self, model, epoch: int, evals_log: TrainingCallback.EvalsLog
|
||||||
|
) -> bool:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def after_training(self, model):
|
||||||
|
return model
|
62
freqtrade/freqai/tensorboard/tensorboard.py
Normal file
62
freqtrade/freqai/tensorboard/tensorboard.py
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
from xgboost import callback
|
||||||
|
|
||||||
|
from freqtrade.freqai.tensorboard.base_tensorboard import (BaseTensorBoardCallback,
|
||||||
|
BaseTensorboardLogger)
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class TensorboardLogger(BaseTensorboardLogger):
|
||||||
|
def __init__(self, logdir: Path, activate: bool = True):
|
||||||
|
self.activate = activate
|
||||||
|
if self.activate:
|
||||||
|
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
|
||||||
|
|
||||||
|
def log_scalar(self, tag: str, scalar_value: Any, step: int):
|
||||||
|
if self.activate:
|
||||||
|
self.writer.add_scalar(tag, scalar_value, step)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self.activate:
|
||||||
|
self.writer.flush()
|
||||||
|
self.writer.close()
|
||||||
|
|
||||||
|
|
||||||
|
class TensorBoardCallback(BaseTensorBoardCallback):
|
||||||
|
|
||||||
|
def __init__(self, logdir: Path, activate: bool = True):
|
||||||
|
self.activate = activate
|
||||||
|
if self.activate:
|
||||||
|
self.writer: SummaryWriter = SummaryWriter(f"{str(logdir)}/tensorboard")
|
||||||
|
|
||||||
|
def after_iteration(
|
||||||
|
self, model, epoch: int, evals_log: callback.TrainingCallback.EvalsLog
|
||||||
|
) -> bool:
|
||||||
|
if not self.activate:
|
||||||
|
return False
|
||||||
|
if not evals_log:
|
||||||
|
return False
|
||||||
|
|
||||||
|
for data, metric in evals_log.items():
|
||||||
|
for metric_name, log in metric.items():
|
||||||
|
score = log[-1][0] if isinstance(log[-1], tuple) else log[-1]
|
||||||
|
if data == "train":
|
||||||
|
self.writer.add_scalar("train_loss", score, epoch)
|
||||||
|
else:
|
||||||
|
self.writer.add_scalar("valid_loss", score, epoch)
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def after_training(self, model):
|
||||||
|
if not self.activate:
|
||||||
|
return model
|
||||||
|
self.writer.flush()
|
||||||
|
self.writer.close()
|
||||||
|
|
||||||
|
return model
|
|
@ -1,5 +1,5 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from typing import List, Optional
|
from typing import Optional
|
||||||
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import torch
|
import torch
|
||||||
|
@ -12,14 +12,14 @@ class PyTorchDataConvertor(ABC):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
|
||||||
"""
|
"""
|
||||||
:param df: "*_features" dataframe.
|
:param df: "*_features" dataframe.
|
||||||
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
|
||||||
"""
|
"""
|
||||||
:param df: "*_labels" dataframe.
|
:param df: "*_labels" dataframe.
|
||||||
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
:param device: The device to use for training (e.g. 'cpu', 'cuda').
|
||||||
|
@ -45,14 +45,14 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
|
||||||
self._target_tensor_type = target_tensor_type
|
self._target_tensor_type = target_tensor_type
|
||||||
self._squeeze_target_tensor = squeeze_target_tensor
|
self._squeeze_target_tensor = squeeze_target_tensor
|
||||||
|
|
||||||
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
def convert_x(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
|
||||||
x = torch.from_numpy(df.values).float()
|
x = torch.from_numpy(df.values).float()
|
||||||
if device:
|
if device:
|
||||||
x = x.to(device)
|
x = x.to(device)
|
||||||
|
|
||||||
return [x]
|
return x
|
||||||
|
|
||||||
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> List[torch.Tensor]:
|
def convert_y(self, df: pd.DataFrame, device: Optional[str] = None) -> torch.Tensor:
|
||||||
y = torch.from_numpy(df.values)
|
y = torch.from_numpy(df.values)
|
||||||
|
|
||||||
if self._target_tensor_type:
|
if self._target_tensor_type:
|
||||||
|
@ -64,4 +64,4 @@ class DefaultPyTorchDataConvertor(PyTorchDataConvertor):
|
||||||
if device:
|
if device:
|
||||||
y = y.to(device)
|
y = y.to(device)
|
||||||
|
|
||||||
return [y]
|
return y
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
import logging
|
import logging
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from torch import nn
|
from torch import nn
|
||||||
|
@ -47,8 +46,8 @@ class PyTorchMLPModel(nn.Module):
|
||||||
self.relu = nn.ReLU()
|
self.relu = nn.ReLU()
|
||||||
self.dropout = nn.Dropout(p=dropout_percent)
|
self.dropout = nn.Dropout(p=dropout_percent)
|
||||||
|
|
||||||
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
|
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
||||||
x: torch.Tensor = tensors[0]
|
# x: torch.Tensor = tensors[0]
|
||||||
x = self.relu(self.input_layer(x))
|
x = self.relu(self.input_layer(x))
|
||||||
x = self.dropout(x)
|
x = self.dropout(x)
|
||||||
x = self.blocks(x)
|
x = self.blocks(x)
|
||||||
|
|
|
@ -12,6 +12,8 @@ from torch.utils.data import DataLoader, TensorDataset
|
||||||
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
from freqtrade.freqai.torch.PyTorchDataConvertor import PyTorchDataConvertor
|
||||||
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
|
from freqtrade.freqai.torch.PyTorchTrainerInterface import PyTorchTrainerInterface
|
||||||
|
|
||||||
|
from .datasets import WindowDataset
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
@ -23,9 +25,10 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
optimizer: Optimizer,
|
optimizer: Optimizer,
|
||||||
criterion: nn.Module,
|
criterion: nn.Module,
|
||||||
device: str,
|
device: str,
|
||||||
init_model: Dict,
|
|
||||||
data_convertor: PyTorchDataConvertor,
|
data_convertor: PyTorchDataConvertor,
|
||||||
model_meta_data: Dict[str, Any] = {},
|
model_meta_data: Dict[str, Any] = {},
|
||||||
|
window_size: int = 1,
|
||||||
|
tb_logger: Any = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
@ -52,8 +55,8 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
self.batch_size: int = kwargs.get("batch_size", 64)
|
self.batch_size: int = kwargs.get("batch_size", 64)
|
||||||
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
|
self.max_n_eval_batches: Optional[int] = kwargs.get("max_n_eval_batches", None)
|
||||||
self.data_convertor = data_convertor
|
self.data_convertor = data_convertor
|
||||||
if init_model:
|
self.window_size: int = window_size
|
||||||
self.load_from_checkpoint(init_model)
|
self.tb_logger = tb_logger
|
||||||
|
|
||||||
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
|
def fit(self, data_dictionary: Dict[str, pd.DataFrame], splits: List[str]):
|
||||||
"""
|
"""
|
||||||
|
@ -75,36 +78,28 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
batch_size=self.batch_size,
|
batch_size=self.batch_size,
|
||||||
n_iters=self.max_iters
|
n_iters=self.max_iters
|
||||||
)
|
)
|
||||||
|
self.model.train()
|
||||||
for epoch in range(1, epochs + 1):
|
for epoch in range(1, epochs + 1):
|
||||||
# training
|
|
||||||
losses = []
|
|
||||||
for i, batch_data in enumerate(data_loaders_dictionary["train"]):
|
for i, batch_data in enumerate(data_loaders_dictionary["train"]):
|
||||||
|
|
||||||
for tensor in batch_data:
|
xb, yb = batch_data
|
||||||
tensor.to(self.device)
|
xb.to(self.device)
|
||||||
|
yb.to(self.device)
|
||||||
xb = batch_data[:-1]
|
|
||||||
yb = batch_data[-1]
|
|
||||||
yb_pred = self.model(xb)
|
yb_pred = self.model(xb)
|
||||||
loss = self.criterion(yb_pred, yb)
|
loss = self.criterion(yb_pred, yb)
|
||||||
|
|
||||||
self.optimizer.zero_grad(set_to_none=True)
|
self.optimizer.zero_grad(set_to_none=True)
|
||||||
loss.backward()
|
loss.backward()
|
||||||
self.optimizer.step()
|
self.optimizer.step()
|
||||||
losses.append(loss.item())
|
self.tb_logger.log_scalar("train_loss", loss.item(), i)
|
||||||
train_loss = sum(losses) / len(losses)
|
|
||||||
log_message = f"epoch {epoch}/{epochs}: train loss {train_loss:.4f}"
|
|
||||||
|
|
||||||
# evaluation
|
# evaluation
|
||||||
if "test" in splits:
|
if "test" in splits:
|
||||||
test_loss = self.estimate_loss(
|
self.estimate_loss(
|
||||||
data_loaders_dictionary,
|
data_loaders_dictionary,
|
||||||
self.max_n_eval_batches,
|
self.max_n_eval_batches,
|
||||||
"test"
|
"test"
|
||||||
)
|
)
|
||||||
log_message += f" ; test loss {test_loss:.4f}"
|
|
||||||
|
|
||||||
logger.info(log_message)
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def estimate_loss(
|
def estimate_loss(
|
||||||
|
@ -112,26 +107,22 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
data_loader_dictionary: Dict[str, DataLoader],
|
data_loader_dictionary: Dict[str, DataLoader],
|
||||||
max_n_eval_batches: Optional[int],
|
max_n_eval_batches: Optional[int],
|
||||||
split: str,
|
split: str,
|
||||||
) -> float:
|
) -> None:
|
||||||
self.model.eval()
|
self.model.eval()
|
||||||
n_batches = 0
|
n_batches = 0
|
||||||
losses = []
|
|
||||||
for i, batch_data in enumerate(data_loader_dictionary[split]):
|
for i, batch_data in enumerate(data_loader_dictionary[split]):
|
||||||
if max_n_eval_batches and i > max_n_eval_batches:
|
if max_n_eval_batches and i > max_n_eval_batches:
|
||||||
n_batches += 1
|
n_batches += 1
|
||||||
break
|
break
|
||||||
|
xb, yb = batch_data
|
||||||
|
xb.to(self.device)
|
||||||
|
yb.to(self.device)
|
||||||
|
|
||||||
for tensor in batch_data:
|
|
||||||
tensor.to(self.device)
|
|
||||||
|
|
||||||
xb = batch_data[:-1]
|
|
||||||
yb = batch_data[-1]
|
|
||||||
yb_pred = self.model(xb)
|
yb_pred = self.model(xb)
|
||||||
loss = self.criterion(yb_pred, yb)
|
loss = self.criterion(yb_pred, yb)
|
||||||
losses.append(loss.item())
|
self.tb_logger.log_scalar(f"{split}_loss", loss.item(), i)
|
||||||
|
|
||||||
self.model.train()
|
self.model.train()
|
||||||
return sum(losses) / len(losses)
|
|
||||||
|
|
||||||
def create_data_loaders_dictionary(
|
def create_data_loaders_dictionary(
|
||||||
self,
|
self,
|
||||||
|
@ -145,7 +136,7 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
for split in splits:
|
for split in splits:
|
||||||
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
|
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
|
||||||
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
|
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
|
||||||
dataset = TensorDataset(*x, *y)
|
dataset = TensorDataset(x, y)
|
||||||
data_loader = DataLoader(
|
data_loader = DataLoader(
|
||||||
dataset,
|
dataset,
|
||||||
batch_size=self.batch_size,
|
batch_size=self.batch_size,
|
||||||
|
@ -206,3 +197,33 @@ class PyTorchModelTrainer(PyTorchTrainerInterface):
|
||||||
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
|
||||||
self.model_meta_data = checkpoint["model_meta_data"]
|
self.model_meta_data = checkpoint["model_meta_data"]
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchTransformerTrainer(PyTorchModelTrainer):
|
||||||
|
"""
|
||||||
|
Creating a trainer for the Transformer model.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def create_data_loaders_dictionary(
|
||||||
|
self,
|
||||||
|
data_dictionary: Dict[str, pd.DataFrame],
|
||||||
|
splits: List[str]
|
||||||
|
) -> Dict[str, DataLoader]:
|
||||||
|
"""
|
||||||
|
Converts the input data to PyTorch tensors using a data loader.
|
||||||
|
"""
|
||||||
|
data_loader_dictionary = {}
|
||||||
|
for split in splits:
|
||||||
|
x = self.data_convertor.convert_x(data_dictionary[f"{split}_features"], self.device)
|
||||||
|
y = self.data_convertor.convert_y(data_dictionary[f"{split}_labels"], self.device)
|
||||||
|
dataset = WindowDataset(x, y, self.window_size)
|
||||||
|
data_loader = DataLoader(
|
||||||
|
dataset,
|
||||||
|
batch_size=self.batch_size,
|
||||||
|
shuffle=False,
|
||||||
|
drop_last=True,
|
||||||
|
num_workers=0,
|
||||||
|
)
|
||||||
|
data_loader_dictionary[split] = data_loader
|
||||||
|
|
||||||
|
return data_loader_dictionary
|
||||||
|
|
93
freqtrade/freqai/torch/PyTorchTransformerModel.py
Normal file
93
freqtrade/freqai/torch/PyTorchTransformerModel.py
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
import math
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
The architecture is based on the paper “Attention Is All You Need”.
|
||||||
|
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
|
||||||
|
Lukasz Kaiser, and Illia Polosukhin. 2017.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class PyTorchTransformerModel(nn.Module):
|
||||||
|
"""
|
||||||
|
A transformer approach to time series modeling using positional encoding.
|
||||||
|
The architecture is based on the paper “Attention Is All You Need”.
|
||||||
|
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez,
|
||||||
|
Lukasz Kaiser, and Illia Polosukhin. 2017.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, input_dim: int = 7, output_dim: int = 7, hidden_dim=1024,
|
||||||
|
n_layer=2, dropout_percent=0.1, time_window=10, nhead=8):
|
||||||
|
super().__init__()
|
||||||
|
self.time_window = time_window
|
||||||
|
# ensure the input dimension to the transformer is divisible by nhead
|
||||||
|
self.dim_val = input_dim - (input_dim % nhead)
|
||||||
|
self.input_net = nn.Sequential(
|
||||||
|
nn.Dropout(dropout_percent), nn.Linear(input_dim, self.dim_val)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Encode the timeseries with Positional encoding
|
||||||
|
self.positional_encoding = PositionalEncoding(d_model=self.dim_val, max_len=self.dim_val)
|
||||||
|
|
||||||
|
# Define the encoder block of the Transformer
|
||||||
|
self.encoder_layer = nn.TransformerEncoderLayer(
|
||||||
|
d_model=self.dim_val, nhead=nhead, dropout=dropout_percent, batch_first=True)
|
||||||
|
self.transformer = nn.TransformerEncoder(self.encoder_layer, num_layers=n_layer)
|
||||||
|
|
||||||
|
# the pseudo decoding FC
|
||||||
|
self.output_net = nn.Sequential(
|
||||||
|
nn.Linear(self.dim_val * time_window, int(hidden_dim)),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(dropout_percent),
|
||||||
|
nn.Linear(int(hidden_dim), int(hidden_dim / 2)),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(dropout_percent),
|
||||||
|
nn.Linear(int(hidden_dim / 2), int(hidden_dim / 4)),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(dropout_percent),
|
||||||
|
nn.Linear(int(hidden_dim / 4), output_dim)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x, mask=None, add_positional_encoding=True):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
x: Input features of shape [Batch, SeqLen, input_dim]
|
||||||
|
mask: Mask to apply on the attention outputs (optional)
|
||||||
|
add_positional_encoding: If True, we add the positional encoding to the input.
|
||||||
|
Might not be desired for some tasks.
|
||||||
|
"""
|
||||||
|
x = self.input_net(x)
|
||||||
|
if add_positional_encoding:
|
||||||
|
x = self.positional_encoding(x)
|
||||||
|
x = self.transformer(x, mask=mask)
|
||||||
|
x = x.reshape(-1, 1, self.time_window * x.shape[-1])
|
||||||
|
x = self.output_net(x)
|
||||||
|
return x
|
||||||
|
|
||||||
|
|
||||||
|
class PositionalEncoding(nn.Module):
|
||||||
|
def __init__(self, d_model, max_len=5000):
|
||||||
|
"""
|
||||||
|
Args
|
||||||
|
d_model: Hidden dimensionality of the input.
|
||||||
|
max_len: Maximum length of a sequence to expect.
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
# Create matrix of [SeqLen, HiddenDim] representing the positional encoding
|
||||||
|
# for max_len inputs
|
||||||
|
pe = torch.zeros(max_len, d_model)
|
||||||
|
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
||||||
|
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
||||||
|
pe[:, 0::2] = torch.sin(position * div_term)
|
||||||
|
pe[:, 1::2] = torch.cos(position * div_term)
|
||||||
|
pe = pe.unsqueeze(0)
|
||||||
|
|
||||||
|
self.register_buffer("pe", pe, persistent=False)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = x + self.pe[:, : x.size(1)]
|
||||||
|
return x
|
19
freqtrade/freqai/torch/datasets.py
Normal file
19
freqtrade/freqai/torch/datasets.py
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
class WindowDataset(torch.utils.data.Dataset):
|
||||||
|
def __init__(self, xs, ys, window_size):
|
||||||
|
self.xs = xs
|
||||||
|
self.ys = ys
|
||||||
|
self.window_size = window_size
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.xs) - self.window_size
|
||||||
|
|
||||||
|
def __getitem__(self, index):
|
||||||
|
idx_rev = len(self.xs) - self.window_size - index - 1
|
||||||
|
window_x = self.xs[idx_rev:idx_rev + self.window_size, :]
|
||||||
|
# Beware of indexing, these two window_x and window_y are aimed at the same row!
|
||||||
|
# this is what happens when you use :
|
||||||
|
window_y = self.ys[idx_rev + self.window_size - 1, :].unsqueeze(0)
|
||||||
|
return window_x, window_y
|
|
@ -92,55 +92,6 @@ def get_required_data_timerange(config: Config) -> TimeRange:
|
||||||
return data_load_timerange
|
return data_load_timerange
|
||||||
|
|
||||||
|
|
||||||
# Keep below for when we wish to download heterogeneously lengthed data for FreqAI.
|
|
||||||
# def download_all_data_for_training(dp: DataProvider, config: Config) -> None:
|
|
||||||
# """
|
|
||||||
# Called only once upon start of bot to download the necessary data for
|
|
||||||
# populating indicators and training a FreqAI model.
|
|
||||||
# :param timerange: TimeRange = The full data timerange for populating the indicators
|
|
||||||
# and training the model.
|
|
||||||
# :param dp: DataProvider instance attached to the strategy
|
|
||||||
# """
|
|
||||||
|
|
||||||
# if dp._exchange is not None:
|
|
||||||
# markets = [p for p, m in dp._exchange.markets.items() if market_is_active(m)
|
|
||||||
# or config.get('include_inactive')]
|
|
||||||
# else:
|
|
||||||
# # This should not occur:
|
|
||||||
# raise OperationalException('No exchange object found.')
|
|
||||||
|
|
||||||
# all_pairs = dynamic_expand_pairlist(config, markets)
|
|
||||||
|
|
||||||
# if not dp._exchange:
|
|
||||||
# # Not realistic - this is only called in live mode.
|
|
||||||
# raise OperationalException("Dataprovider did not have an exchange attached.")
|
|
||||||
|
|
||||||
# time = datetime.now(tz=timezone.utc).timestamp()
|
|
||||||
|
|
||||||
# for tf in config["freqai"]["feature_parameters"].get("include_timeframes"):
|
|
||||||
# timerange = TimeRange()
|
|
||||||
# timerange.startts = int(time)
|
|
||||||
# timerange.stopts = int(time)
|
|
||||||
# startup_candles = dp.get_required_startup(str(tf))
|
|
||||||
# tf_seconds = timeframe_to_seconds(str(tf))
|
|
||||||
# timerange.subtract_start(tf_seconds * startup_candles)
|
|
||||||
# new_pairs_days = int((timerange.stopts - timerange.startts) / 86400)
|
|
||||||
# # FIXME: now that we are looping on `refresh_backtest_ohlcv_data`, the function
|
|
||||||
# # redownloads the funding rate for each pair.
|
|
||||||
# refresh_backtest_ohlcv_data(
|
|
||||||
# dp._exchange,
|
|
||||||
# pairs=all_pairs,
|
|
||||||
# timeframes=[tf],
|
|
||||||
# datadir=config["datadir"],
|
|
||||||
# timerange=timerange,
|
|
||||||
# new_pairs_days=new_pairs_days,
|
|
||||||
# erase=False,
|
|
||||||
# data_format=config.get("dataformat_ohlcv", "json"),
|
|
||||||
# trading_mode=config.get("trading_mode", "spot"),
|
|
||||||
# prepend=config.get("prepend_data", False),
|
|
||||||
# )
|
|
||||||
|
|
||||||
|
|
||||||
def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen,
|
def plot_feature_importance(model: Any, pair: str, dk: FreqaiDataKitchen,
|
||||||
count_max: int = 25) -> None:
|
count_max: int = 25) -> None:
|
||||||
"""
|
"""
|
||||||
|
@ -233,3 +184,13 @@ def get_timerange_backtest_live_models(config: Config) -> str:
|
||||||
dd = FreqaiDataDrawer(models_path, config)
|
dd = FreqaiDataDrawer(models_path, config)
|
||||||
timerange = dd.get_timerange_from_live_historic_predictions()
|
timerange = dd.get_timerange_from_live_historic_predictions()
|
||||||
return timerange.timerange_str
|
return timerange.timerange_str
|
||||||
|
|
||||||
|
|
||||||
|
def get_tb_logger(model_type: str, path: Path, activate: bool) -> Any:
|
||||||
|
|
||||||
|
if model_type == "pytorch" and activate:
|
||||||
|
from freqtrade.freqai.tensorboard import TBLogger
|
||||||
|
return TBLogger(path, activate)
|
||||||
|
else:
|
||||||
|
from freqtrade.freqai.tensorboard.base_tensorboard import BaseTensorboardLogger
|
||||||
|
return BaseTensorboardLogger(path, activate)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
"""
|
"""
|
||||||
Freqtrade is the main module of this bot. It contains the class Freqtrade()
|
Freqtrade is the main module of this bot. It contains the class Freqtrade()
|
||||||
"""
|
"""
|
||||||
import copy
|
|
||||||
import logging
|
import logging
|
||||||
import traceback
|
import traceback
|
||||||
|
from copy import deepcopy
|
||||||
from datetime import datetime, time, timedelta, timezone
|
from datetime import datetime, time, timedelta, timezone
|
||||||
from math import isclose
|
from math import isclose
|
||||||
from threading import Lock
|
from threading import Lock
|
||||||
|
@ -13,7 +13,7 @@ from schedule import Scheduler
|
||||||
|
|
||||||
from freqtrade import constants
|
from freqtrade import constants
|
||||||
from freqtrade.configuration import validate_config_consistency
|
from freqtrade.configuration import validate_config_consistency
|
||||||
from freqtrade.constants import BuySell, Config, LongShort
|
from freqtrade.constants import BuySell, Config, ExchangeConfig, LongShort
|
||||||
from freqtrade.data.converter import order_book_to_dataframe
|
from freqtrade.data.converter import order_book_to_dataframe
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.edge import Edge
|
from freqtrade.edge import Edge
|
||||||
|
@ -23,6 +23,7 @@ from freqtrade.exceptions import (DependencyException, ExchangeError, Insufficie
|
||||||
InvalidOrderException, PricingError)
|
InvalidOrderException, PricingError)
|
||||||
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date,
|
from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, timeframe_to_minutes, timeframe_to_next_date,
|
||||||
timeframe_to_seconds)
|
timeframe_to_seconds)
|
||||||
|
from freqtrade.exchange.common import remove_exchange_credentials
|
||||||
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
|
from freqtrade.misc import safe_value_fallback, safe_value_fallback2
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
from freqtrade.persistence import Order, PairLocks, Trade, init_db
|
from freqtrade.persistence import Order, PairLocks, Trade, init_db
|
||||||
|
@ -63,6 +64,9 @@ class FreqtradeBot(LoggingMixin):
|
||||||
|
|
||||||
# Init objects
|
# Init objects
|
||||||
self.config = config
|
self.config = config
|
||||||
|
exchange_config: ExchangeConfig = deepcopy(config['exchange'])
|
||||||
|
# Remove credentials from original exchange config to avoid accidental credentail exposure
|
||||||
|
remove_exchange_credentials(config['exchange'], True)
|
||||||
|
|
||||||
self.strategy: IStrategy = StrategyResolver.load_strategy(self.config)
|
self.strategy: IStrategy = StrategyResolver.load_strategy(self.config)
|
||||||
|
|
||||||
|
@ -70,7 +74,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
validate_config_consistency(config)
|
validate_config_consistency(config)
|
||||||
|
|
||||||
self.exchange = ExchangeResolver.load_exchange(
|
self.exchange = ExchangeResolver.load_exchange(
|
||||||
self.config['exchange']['name'], self.config, load_leverage_tiers=True)
|
self.config, exchange_config=exchange_config, load_leverage_tiers=True)
|
||||||
|
|
||||||
init_db(self.config['db_url'])
|
init_db(self.config['db_url'])
|
||||||
|
|
||||||
|
@ -420,7 +424,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
"""
|
"""
|
||||||
Try refinding a lost trade.
|
Try refinding a lost trade.
|
||||||
Only used when InsufficientFunds appears on exit orders (stoploss or long sell/short buy).
|
Only used when InsufficientFunds appears on exit orders (stoploss or long sell/short buy).
|
||||||
Tries to walk the stored orders and sell them off eventually.
|
Tries to walk the stored orders and updates the trade state if necessary.
|
||||||
"""
|
"""
|
||||||
logger.info(f"Trying to refind lost order for {trade}")
|
logger.info(f"Trying to refind lost order for {trade}")
|
||||||
for order in trade.orders:
|
for order in trade.orders:
|
||||||
|
@ -451,6 +455,42 @@ class FreqtradeBot(LoggingMixin):
|
||||||
except ExchangeError:
|
except ExchangeError:
|
||||||
logger.warning(f"Error updating {order.order_id}.")
|
logger.warning(f"Error updating {order.order_id}.")
|
||||||
|
|
||||||
|
def handle_onexchange_order(self, trade: Trade):
|
||||||
|
"""
|
||||||
|
Try refinding a order that is not in the database.
|
||||||
|
Only used balance disappeared, which would make exiting impossible.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
orders = self.exchange.fetch_orders(trade.pair, trade.open_date_utc)
|
||||||
|
for order in orders:
|
||||||
|
trade_order = [o for o in trade.orders if o.order_id == order['id']]
|
||||||
|
if trade_order:
|
||||||
|
continue
|
||||||
|
logger.info(f"Found previously unknown order {order['id']} for {trade.pair}.")
|
||||||
|
|
||||||
|
order_obj = Order.parse_from_ccxt_object(order, trade.pair, order['side'])
|
||||||
|
order_obj.order_filled_date = datetime.fromtimestamp(
|
||||||
|
safe_value_fallback(order, 'lastTradeTimestamp', 'timestamp') // 1000,
|
||||||
|
tz=timezone.utc)
|
||||||
|
trade.orders.append(order_obj)
|
||||||
|
# TODO: how do we handle open_order_id ...
|
||||||
|
Trade.commit()
|
||||||
|
prev_exit_reason = trade.exit_reason
|
||||||
|
trade.exit_reason = ExitType.SOLD_ON_EXCHANGE.value
|
||||||
|
self.update_trade_state(trade, order['id'], order)
|
||||||
|
|
||||||
|
logger.info(f"handled order {order['id']}")
|
||||||
|
if not trade.is_open:
|
||||||
|
# Trade was just closed
|
||||||
|
trade.close_date = order_obj.order_filled_date
|
||||||
|
Trade.commit()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
trade.exit_reason = prev_exit_reason
|
||||||
|
Trade.commit()
|
||||||
|
|
||||||
|
except ExchangeError:
|
||||||
|
logger.warning("Error finding onexchange order")
|
||||||
#
|
#
|
||||||
# BUY / enter positions / open trades logic and methods
|
# BUY / enter positions / open trades logic and methods
|
||||||
#
|
#
|
||||||
|
@ -461,7 +501,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
"""
|
"""
|
||||||
trades_created = 0
|
trades_created = 0
|
||||||
|
|
||||||
whitelist = copy.deepcopy(self.active_pair_whitelist)
|
whitelist = deepcopy(self.active_pair_whitelist)
|
||||||
if not whitelist:
|
if not whitelist:
|
||||||
self.log_once("Active pair whitelist is empty.", logger.info)
|
self.log_once("Active pair whitelist is empty.", logger.info)
|
||||||
return trades_created
|
return trades_created
|
||||||
|
@ -490,7 +530,8 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# Create entity and execute trade for each pair from whitelist
|
# Create entity and execute trade for each pair from whitelist
|
||||||
for pair in whitelist:
|
for pair in whitelist:
|
||||||
try:
|
try:
|
||||||
trades_created += self.create_trade(pair)
|
with self._exit_lock:
|
||||||
|
trades_created += self.create_trade(pair)
|
||||||
except DependencyException as exception:
|
except DependencyException as exception:
|
||||||
logger.warning('Unable to create trade for %s: %s', pair, exception)
|
logger.warning('Unable to create trade for %s: %s', pair, exception)
|
||||||
|
|
||||||
|
@ -981,7 +1022,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
||||||
'fiat_currency': self.config.get('fiat_display_currency', None),
|
'fiat_currency': self.config.get('fiat_display_currency', None),
|
||||||
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
|
'amount': order.safe_amount_after_fee if fill else (order.amount or trade.amount),
|
||||||
'open_date': trade.open_date or datetime.utcnow(),
|
'open_date': trade.open_date_utc or datetime.now(timezone.utc),
|
||||||
'current_rate': current_rate,
|
'current_rate': current_rate,
|
||||||
'sub_trade': sub_trade,
|
'sub_trade': sub_trade,
|
||||||
}
|
}
|
||||||
|
@ -1033,6 +1074,13 @@ class FreqtradeBot(LoggingMixin):
|
||||||
"""
|
"""
|
||||||
trades_closed = 0
|
trades_closed = 0
|
||||||
for trade in trades:
|
for trade in trades:
|
||||||
|
|
||||||
|
if trade.open_order_id is None and not self.wallets.check_exit_amount(trade):
|
||||||
|
logger.warning(
|
||||||
|
f'Not enough {trade.safe_base_currency} in wallet to exit {trade}. '
|
||||||
|
'Trying to recover.')
|
||||||
|
self.handle_onexchange_order(trade)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
try:
|
try:
|
||||||
if (self.strategy.order_types.get('stoploss_on_exchange') and
|
if (self.strategy.order_types.get('stoploss_on_exchange') and
|
||||||
|
@ -1535,13 +1583,13 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# Update wallets to ensure amounts tied up in a stoploss is now free!
|
# Update wallets to ensure amounts tied up in a stoploss is now free!
|
||||||
self.wallets.update()
|
self.wallets.update()
|
||||||
if self.trading_mode == TradingMode.FUTURES:
|
if self.trading_mode == TradingMode.FUTURES:
|
||||||
|
# A safe exit amount isn't needed for futures, you can just exit/close the position
|
||||||
return amount
|
return amount
|
||||||
|
|
||||||
trade_base_currency = self.exchange.get_pair_base_currency(pair)
|
trade_base_currency = self.exchange.get_pair_base_currency(pair)
|
||||||
wallet_amount = self.wallets.get_free(trade_base_currency)
|
wallet_amount = self.wallets.get_free(trade_base_currency)
|
||||||
logger.debug(f"{pair} - Wallet: {wallet_amount} - Trade-amount: {amount}")
|
logger.debug(f"{pair} - Wallet: {wallet_amount} - Trade-amount: {amount}")
|
||||||
if wallet_amount >= amount:
|
if wallet_amount >= amount:
|
||||||
# A safe exit amount isn't needed for futures, you can just exit/close the position
|
|
||||||
return amount
|
return amount
|
||||||
elif wallet_amount > amount * 0.98:
|
elif wallet_amount > amount * 0.98:
|
||||||
logger.info(f"{pair} - Falling back to wallet-amount {wallet_amount} -> {amount}.")
|
logger.info(f"{pair} - Falling back to wallet-amount {wallet_amount} -> {amount}.")
|
||||||
|
@ -1697,8 +1745,8 @@ class FreqtradeBot(LoggingMixin):
|
||||||
'enter_tag': trade.enter_tag,
|
'enter_tag': trade.enter_tag,
|
||||||
'sell_reason': trade.exit_reason, # Deprecated
|
'sell_reason': trade.exit_reason, # Deprecated
|
||||||
'exit_reason': trade.exit_reason,
|
'exit_reason': trade.exit_reason,
|
||||||
'open_date': trade.open_date,
|
'open_date': trade.open_date_utc,
|
||||||
'close_date': trade.close_date or datetime.utcnow(),
|
'close_date': trade.close_date_utc or datetime.now(timezone.utc),
|
||||||
'stake_amount': trade.stake_amount,
|
'stake_amount': trade.stake_amount,
|
||||||
'stake_currency': self.config['stake_currency'],
|
'stake_currency': self.config['stake_currency'],
|
||||||
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
'base_currency': self.exchange.get_pair_base_currency(trade.pair),
|
||||||
|
@ -1720,10 +1768,8 @@ class FreqtradeBot(LoggingMixin):
|
||||||
else:
|
else:
|
||||||
trade.exit_order_status = reason
|
trade.exit_order_status = reason
|
||||||
|
|
||||||
order = trade.select_order_by_order_id(order_id)
|
order_or_none = trade.select_order_by_order_id(order_id)
|
||||||
if not order:
|
order = self.order_obj_or_raise(order_id, order_or_none)
|
||||||
raise DependencyException(
|
|
||||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
|
||||||
|
|
||||||
profit_rate: float = trade.safe_close_rate
|
profit_rate: float = trade.safe_close_rate
|
||||||
profit_trade = trade.calc_profit(rate=profit_rate)
|
profit_trade = trade.calc_profit(rate=profit_rate)
|
||||||
|
@ -1764,6 +1810,12 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# Send the message
|
# Send the message
|
||||||
self.rpc.send_msg(msg)
|
self.rpc.send_msg(msg)
|
||||||
|
|
||||||
|
def order_obj_or_raise(self, order_id: str, order_obj: Optional[Order]) -> Order:
|
||||||
|
if not order_obj:
|
||||||
|
raise DependencyException(
|
||||||
|
f"Order_obj not found for {order_id}. This should not have happened.")
|
||||||
|
return order_obj
|
||||||
|
|
||||||
#
|
#
|
||||||
# Common update trade state methods
|
# Common update trade state methods
|
||||||
#
|
#
|
||||||
|
@ -1802,10 +1854,8 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# Handling of this will happen in check_handle_timedout.
|
# Handling of this will happen in check_handle_timedout.
|
||||||
return True
|
return True
|
||||||
|
|
||||||
order_obj = trade.select_order_by_order_id(order_id)
|
order_obj_or_none = trade.select_order_by_order_id(order_id)
|
||||||
if not order_obj:
|
order_obj = self.order_obj_or_raise(order_id, order_obj_or_none)
|
||||||
raise DependencyException(
|
|
||||||
f"Order_obj not found for {order_id}. This should not have happened.")
|
|
||||||
|
|
||||||
self.handle_order_fee(trade, order_obj, order)
|
self.handle_order_fee(trade, order_obj, order)
|
||||||
|
|
||||||
|
@ -1823,16 +1873,18 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# Must also run for partial exits
|
# Must also run for partial exits
|
||||||
# TODO: Margin will need to use interest_rate as well.
|
# TODO: Margin will need to use interest_rate as well.
|
||||||
# interest_rate = self.exchange.get_interest_rate()
|
# interest_rate = self.exchange.get_interest_rate()
|
||||||
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
try:
|
||||||
pair=trade.pair,
|
trade.set_liquidation_price(self.exchange.get_liquidation_price(
|
||||||
open_rate=trade.open_rate,
|
pair=trade.pair,
|
||||||
is_short=trade.is_short,
|
open_rate=trade.open_rate,
|
||||||
amount=trade.amount,
|
is_short=trade.is_short,
|
||||||
stake_amount=trade.stake_amount,
|
amount=trade.amount,
|
||||||
leverage=trade.leverage,
|
stake_amount=trade.stake_amount,
|
||||||
wallet_balance=trade.stake_amount,
|
leverage=trade.leverage,
|
||||||
))
|
wallet_balance=trade.stake_amount,
|
||||||
|
))
|
||||||
|
except DependencyException:
|
||||||
|
logger.warning('Unable to calculate liquidation price')
|
||||||
# Updating wallets when order is closed
|
# Updating wallets when order is closed
|
||||||
self.wallets.update()
|
self.wallets.update()
|
||||||
Trade.commit()
|
Trade.commit()
|
||||||
|
|
|
@ -32,6 +32,7 @@ def _set_loggers(verbosity: int = 0, api_verbosity: str = 'info') -> None:
|
||||||
logging.INFO if verbosity <= 2 else logging.DEBUG
|
logging.INFO if verbosity <= 2 else logging.DEBUG
|
||||||
)
|
)
|
||||||
logging.getLogger('telegram').setLevel(logging.INFO)
|
logging.getLogger('telegram').setLevel(logging.INFO)
|
||||||
|
logging.getLogger('httpx').setLevel(logging.INFO)
|
||||||
|
|
||||||
logging.getLogger('werkzeug').setLevel(
|
logging.getLogger('werkzeug').setLevel(
|
||||||
logging.ERROR if api_verbosity == 'error' else logging.INFO
|
logging.ERROR if api_verbosity == 'error' else logging.INFO
|
||||||
|
|
|
@ -3,13 +3,11 @@ Various tool function for Freqtrade and scripts
|
||||||
"""
|
"""
|
||||||
import gzip
|
import gzip
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
import orjson
|
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import rapidjson
|
import rapidjson
|
||||||
|
|
||||||
|
@ -48,18 +46,6 @@ def round_coin_value(
|
||||||
return val
|
return val
|
||||||
|
|
||||||
|
|
||||||
def shorten_date(_date: str) -> str:
|
|
||||||
"""
|
|
||||||
Trim the date so it fits on small screens
|
|
||||||
"""
|
|
||||||
new_date = re.sub('seconds?', 'sec', _date)
|
|
||||||
new_date = re.sub('minutes?', 'min', new_date)
|
|
||||||
new_date = re.sub('hours?', 'h', new_date)
|
|
||||||
new_date = re.sub('days?', 'd', new_date)
|
|
||||||
new_date = re.sub('^an?', '1', new_date)
|
|
||||||
return new_date
|
|
||||||
|
|
||||||
|
|
||||||
def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:
|
def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:
|
||||||
"""
|
"""
|
||||||
Dump JSON data into a file
|
Dump JSON data into a file
|
||||||
|
@ -262,17 +248,7 @@ def dataframe_to_json(dataframe: pd.DataFrame) -> str:
|
||||||
:param dataframe: A pandas DataFrame
|
:param dataframe: A pandas DataFrame
|
||||||
:returns: A JSON string of the pandas DataFrame
|
:returns: A JSON string of the pandas DataFrame
|
||||||
"""
|
"""
|
||||||
# https://github.com/pandas-dev/pandas/issues/24889
|
return dataframe.to_json(orient='split')
|
||||||
# https://github.com/pandas-dev/pandas/issues/40443
|
|
||||||
# We need to convert to a dict to avoid mem leak
|
|
||||||
def default(z):
|
|
||||||
if isinstance(z, pd.Timestamp):
|
|
||||||
return z.timestamp() * 1e3
|
|
||||||
if z is pd.NaT:
|
|
||||||
return 'NaT'
|
|
||||||
raise TypeError
|
|
||||||
|
|
||||||
return str(orjson.dumps(dataframe.to_dict(orient='split'), default=default), 'utf-8')
|
|
||||||
|
|
||||||
|
|
||||||
def json_to_dataframe(data: str) -> pd.DataFrame:
|
def json_to_dataframe(data: str) -> pd.DataFrame:
|
||||||
|
|
|
@ -9,7 +9,6 @@ from copy import deepcopy
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Any, Dict, List, Optional, Tuple
|
from typing import Any, Dict, List, Optional, Tuple
|
||||||
|
|
||||||
import pandas as pd
|
|
||||||
from numpy import nan
|
from numpy import nan
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
@ -28,8 +27,10 @@ from freqtrade.exchange import (amount_to_contract_precision, price_to_precision
|
||||||
from freqtrade.mixins import LoggingMixin
|
from freqtrade.mixins import LoggingMixin
|
||||||
from freqtrade.optimize.backtest_caching import get_strategy_run_id
|
from freqtrade.optimize.backtest_caching import get_strategy_run_id
|
||||||
from freqtrade.optimize.bt_progress import BTProgress
|
from freqtrade.optimize.bt_progress import BTProgress
|
||||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, show_backtest_results,
|
from freqtrade.optimize.optimize_reports import (generate_backtest_stats, generate_rejected_signals,
|
||||||
store_backtest_signal_candles,
|
generate_trade_signal_candles,
|
||||||
|
show_backtest_results,
|
||||||
|
store_backtest_analysis_results,
|
||||||
store_backtest_stats)
|
store_backtest_stats)
|
||||||
from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade
|
from freqtrade.persistence import LocalTrade, Order, PairLocks, Trade
|
||||||
from freqtrade.plugins.pairlistmanager import PairListManager
|
from freqtrade.plugins.pairlistmanager import PairListManager
|
||||||
|
@ -84,10 +85,11 @@ class Backtesting:
|
||||||
self.strategylist: List[IStrategy] = []
|
self.strategylist: List[IStrategy] = []
|
||||||
self.all_results: Dict[str, Dict] = {}
|
self.all_results: Dict[str, Dict] = {}
|
||||||
self.processed_dfs: Dict[str, Dict] = {}
|
self.processed_dfs: Dict[str, Dict] = {}
|
||||||
|
self.rejected_dict: Dict[str, List] = {}
|
||||||
|
self.rejected_df: Dict[str, Dict] = {}
|
||||||
|
|
||||||
self._exchange_name = self.config['exchange']['name']
|
self._exchange_name = self.config['exchange']['name']
|
||||||
self.exchange = ExchangeResolver.load_exchange(
|
self.exchange = ExchangeResolver.load_exchange(self.config, load_leverage_tiers=True)
|
||||||
self._exchange_name, self.config, load_leverage_tiers=True)
|
|
||||||
self.dataprovider = DataProvider(self.config, self.exchange)
|
self.dataprovider = DataProvider(self.config, self.exchange)
|
||||||
|
|
||||||
if self.config.get('strategy_list'):
|
if self.config.get('strategy_list'):
|
||||||
|
@ -1056,6 +1058,18 @@ class Backtesting:
|
||||||
return None
|
return None
|
||||||
return row
|
return row
|
||||||
|
|
||||||
|
def _collate_rejected(self, pair, row):
|
||||||
|
"""
|
||||||
|
Temporarily store rejected signal information for downstream use in backtesting_analysis
|
||||||
|
"""
|
||||||
|
# It could be fun to enable hyperopt mode to write
|
||||||
|
# a loss function to reduce rejected signals
|
||||||
|
if (self.config.get('export', 'none') == 'signals' and
|
||||||
|
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||||
|
if pair not in self.rejected_dict:
|
||||||
|
self.rejected_dict[pair] = []
|
||||||
|
self.rejected_dict[pair].append([row[DATE_IDX], row[ENTER_TAG_IDX]])
|
||||||
|
|
||||||
def backtest_loop(
|
def backtest_loop(
|
||||||
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
|
self, row: Tuple, pair: str, current_time: datetime, end_date: datetime,
|
||||||
open_trade_count_start: int, trade_dir: Optional[LongShort],
|
open_trade_count_start: int, trade_dir: Optional[LongShort],
|
||||||
|
@ -1081,20 +1095,22 @@ class Backtesting:
|
||||||
if (
|
if (
|
||||||
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
|
(self._position_stacking or len(LocalTrade.bt_trades_open_pp[pair]) == 0)
|
||||||
and is_first
|
and is_first
|
||||||
and self.trade_slot_available(open_trade_count_start)
|
|
||||||
and current_time != end_date
|
and current_time != end_date
|
||||||
and trade_dir is not None
|
and trade_dir is not None
|
||||||
and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir)
|
and not PairLocks.is_pair_locked(pair, row[DATE_IDX], trade_dir)
|
||||||
):
|
):
|
||||||
trade = self._enter_trade(pair, row, trade_dir)
|
if (self.trade_slot_available(open_trade_count_start)):
|
||||||
if trade:
|
trade = self._enter_trade(pair, row, trade_dir)
|
||||||
# TODO: hacky workaround to avoid opening > max_open_trades
|
if trade:
|
||||||
# This emulates previous behavior - not sure if this is correct
|
# TODO: hacky workaround to avoid opening > max_open_trades
|
||||||
# Prevents entering if the trade-slot was freed in this candle
|
# This emulates previous behavior - not sure if this is correct
|
||||||
open_trade_count_start += 1
|
# Prevents entering if the trade-slot was freed in this candle
|
||||||
# logger.debug(f"{pair} - Emulate creation of new trade: {trade}.")
|
open_trade_count_start += 1
|
||||||
LocalTrade.add_bt_trade(trade)
|
# logger.debug(f"{pair} - Emulate creation of new trade: {trade}.")
|
||||||
self.wallets.update()
|
LocalTrade.add_bt_trade(trade)
|
||||||
|
self.wallets.update()
|
||||||
|
else:
|
||||||
|
self._collate_rejected(pair, row)
|
||||||
|
|
||||||
for trade in list(LocalTrade.bt_trades_open_pp[pair]):
|
for trade in list(LocalTrade.bt_trades_open_pp[pair]):
|
||||||
# 3. Process entry orders.
|
# 3. Process entry orders.
|
||||||
|
@ -1236,8 +1252,8 @@ class Backtesting:
|
||||||
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame],
|
def backtest_one_strategy(self, strat: IStrategy, data: Dict[str, DataFrame],
|
||||||
timerange: TimeRange):
|
timerange: TimeRange):
|
||||||
self.progress.init_step(BacktestState.ANALYZE, 0)
|
self.progress.init_step(BacktestState.ANALYZE, 0)
|
||||||
|
strategy_name = strat.get_strategy_name()
|
||||||
logger.info(f"Running backtesting for Strategy {strat.get_strategy_name()}")
|
logger.info(f"Running backtesting for Strategy {strategy_name}")
|
||||||
backtest_start_time = datetime.now(timezone.utc)
|
backtest_start_time = datetime.now(timezone.utc)
|
||||||
self._set_strategy(strat)
|
self._set_strategy(strat)
|
||||||
|
|
||||||
|
@ -1272,37 +1288,21 @@ class Backtesting:
|
||||||
)
|
)
|
||||||
backtest_end_time = datetime.now(timezone.utc)
|
backtest_end_time = datetime.now(timezone.utc)
|
||||||
results.update({
|
results.update({
|
||||||
'run_id': self.run_ids.get(strat.get_strategy_name(), ''),
|
'run_id': self.run_ids.get(strategy_name, ''),
|
||||||
'backtest_start_time': int(backtest_start_time.timestamp()),
|
'backtest_start_time': int(backtest_start_time.timestamp()),
|
||||||
'backtest_end_time': int(backtest_end_time.timestamp()),
|
'backtest_end_time': int(backtest_end_time.timestamp()),
|
||||||
})
|
})
|
||||||
self.all_results[self.strategy.get_strategy_name()] = results
|
self.all_results[strategy_name] = results
|
||||||
|
|
||||||
if (self.config.get('export', 'none') == 'signals' and
|
if (self.config.get('export', 'none') == 'signals' and
|
||||||
self.dataprovider.runmode == RunMode.BACKTEST):
|
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||||
self._generate_trade_signal_candles(preprocessed_tmp, results)
|
self.processed_dfs[strategy_name] = generate_trade_signal_candles(
|
||||||
|
preprocessed_tmp, results)
|
||||||
|
self.rejected_df[strategy_name] = generate_rejected_signals(
|
||||||
|
preprocessed_tmp, self.rejected_dict)
|
||||||
|
|
||||||
return min_date, max_date
|
return min_date, max_date
|
||||||
|
|
||||||
def _generate_trade_signal_candles(self, preprocessed_df, bt_results):
|
|
||||||
signal_candles_only = {}
|
|
||||||
for pair in preprocessed_df.keys():
|
|
||||||
signal_candles_only_df = DataFrame()
|
|
||||||
|
|
||||||
pairdf = preprocessed_df[pair]
|
|
||||||
resdf = bt_results['results']
|
|
||||||
pairresults = resdf.loc[(resdf["pair"] == pair)]
|
|
||||||
|
|
||||||
if pairdf.shape[0] > 0:
|
|
||||||
for t, v in pairresults.open_date.items():
|
|
||||||
allinds = pairdf.loc[(pairdf['date'] < v)]
|
|
||||||
signal_inds = allinds.iloc[[-1]]
|
|
||||||
signal_candles_only_df = pd.concat([signal_candles_only_df, signal_inds])
|
|
||||||
|
|
||||||
signal_candles_only[pair] = signal_candles_only_df
|
|
||||||
|
|
||||||
self.processed_dfs[self.strategy.get_strategy_name()] = signal_candles_only
|
|
||||||
|
|
||||||
def _get_min_cached_backtest_date(self):
|
def _get_min_cached_backtest_date(self):
|
||||||
min_backtest_date = None
|
min_backtest_date = None
|
||||||
backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT)
|
backtest_cache_age = self.config.get('backtest_cache', constants.BACKTEST_CACHE_DEFAULT)
|
||||||
|
@ -1365,8 +1365,9 @@ class Backtesting:
|
||||||
|
|
||||||
if (self.config.get('export', 'none') == 'signals' and
|
if (self.config.get('export', 'none') == 'signals' and
|
||||||
self.dataprovider.runmode == RunMode.BACKTEST):
|
self.dataprovider.runmode == RunMode.BACKTEST):
|
||||||
store_backtest_signal_candles(
|
store_backtest_analysis_results(
|
||||||
self.config['exportfilename'], self.processed_dfs, dt_appendix)
|
self.config['exportfilename'], self.processed_dfs, self.rejected_df,
|
||||||
|
dt_appendix)
|
||||||
|
|
||||||
# Results may be mixed up now. Sort them so they follow --strategy-list order.
|
# Results may be mixed up now. Sort them so they follow --strategy-list order.
|
||||||
if 'strategy_list' in self.config and len(self.results) > 0:
|
if 'strategy_list' in self.config and len(self.results) > 0:
|
||||||
|
|
|
@ -32,7 +32,7 @@ class EdgeCli:
|
||||||
# Ensure using dry-run
|
# Ensure using dry-run
|
||||||
self.config['dry_run'] = True
|
self.config['dry_run'] = True
|
||||||
self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
|
self.config['stake_amount'] = constants.UNLIMITED_STAKE_AMOUNT
|
||||||
self.exchange = ExchangeResolver.load_exchange(self.config['exchange']['name'], self.config)
|
self.exchange = ExchangeResolver.load_exchange(self.config)
|
||||||
self.strategy = StrategyResolver.load_strategy(self.config)
|
self.strategy = StrategyResolver.load_strategy(self.config)
|
||||||
self.strategy.dp = DataProvider(config, self.exchange)
|
self.strategy.dp = DataProvider(config, self.exchange)
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ from datetime import datetime, timedelta, timezone
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, Dict, List, Union
|
||||||
|
|
||||||
from pandas import DataFrame, to_datetime
|
from pandas import DataFrame, concat, to_datetime
|
||||||
from tabulate import tabulate
|
from tabulate import tabulate
|
||||||
|
|
||||||
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
|
from freqtrade.constants import (BACKTEST_BREAKDOWNS, DATETIME_PRINT_FORMAT, LAST_BT_RESULT_FN,
|
||||||
|
@ -46,29 +46,80 @@ def store_backtest_stats(
|
||||||
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
|
||||||
|
|
||||||
|
|
||||||
def store_backtest_signal_candles(
|
def _store_backtest_analysis_data(
|
||||||
recordfilename: Path, candles: Dict[str, Dict], dtappendix: str) -> Path:
|
recordfilename: Path, data: Dict[str, Dict],
|
||||||
|
dtappendix: str, name: str) -> Path:
|
||||||
"""
|
"""
|
||||||
Stores backtest trade signal candles
|
Stores backtest trade candles for analysis
|
||||||
:param recordfilename: Path object, which can either be a filename or a directory.
|
:param recordfilename: Path object, which can either be a filename or a directory.
|
||||||
Filenames will be appended with a timestamp right before the suffix
|
Filenames will be appended with a timestamp right before the suffix
|
||||||
while for directories, <directory>/backtest-result-<datetime>_signals.pkl will be used
|
while for directories, <directory>/backtest-result-<datetime>_<name>.pkl will be used
|
||||||
as filename
|
as filename
|
||||||
:param stats: Dict containing the backtesting signal candles
|
:param candles: Dict containing the backtesting data for analysis
|
||||||
:param dtappendix: Datetime to use for the filename
|
:param dtappendix: Datetime to use for the filename
|
||||||
|
:param name: Name to use for the file, e.g. signals, rejected
|
||||||
"""
|
"""
|
||||||
if recordfilename.is_dir():
|
if recordfilename.is_dir():
|
||||||
filename = (recordfilename / f'backtest-result-{dtappendix}_signals.pkl')
|
filename = (recordfilename / f'backtest-result-{dtappendix}_{name}.pkl')
|
||||||
else:
|
else:
|
||||||
filename = Path.joinpath(
|
filename = Path.joinpath(
|
||||||
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_signals.pkl'
|
recordfilename.parent, f'{recordfilename.stem}-{dtappendix}_{name}.pkl'
|
||||||
)
|
)
|
||||||
|
|
||||||
file_dump_joblib(filename, candles)
|
file_dump_joblib(filename, data)
|
||||||
|
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
|
|
||||||
|
def store_backtest_analysis_results(
|
||||||
|
recordfilename: Path, candles: Dict[str, Dict], trades: Dict[str, Dict],
|
||||||
|
dtappendix: str) -> None:
|
||||||
|
_store_backtest_analysis_data(recordfilename, candles, dtappendix, "signals")
|
||||||
|
_store_backtest_analysis_data(recordfilename, trades, dtappendix, "rejected")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_trade_signal_candles(preprocessed_df: Dict[str, DataFrame],
|
||||||
|
bt_results: Dict[str, Any]) -> DataFrame:
|
||||||
|
signal_candles_only = {}
|
||||||
|
for pair in preprocessed_df.keys():
|
||||||
|
signal_candles_only_df = DataFrame()
|
||||||
|
|
||||||
|
pairdf = preprocessed_df[pair]
|
||||||
|
resdf = bt_results['results']
|
||||||
|
pairresults = resdf.loc[(resdf["pair"] == pair)]
|
||||||
|
|
||||||
|
if pairdf.shape[0] > 0:
|
||||||
|
for t, v in pairresults.open_date.items():
|
||||||
|
allinds = pairdf.loc[(pairdf['date'] < v)]
|
||||||
|
signal_inds = allinds.iloc[[-1]]
|
||||||
|
signal_candles_only_df = concat([
|
||||||
|
signal_candles_only_df.infer_objects(),
|
||||||
|
signal_inds.infer_objects()])
|
||||||
|
|
||||||
|
signal_candles_only[pair] = signal_candles_only_df
|
||||||
|
return signal_candles_only
|
||||||
|
|
||||||
|
|
||||||
|
def generate_rejected_signals(preprocessed_df: Dict[str, DataFrame],
|
||||||
|
rejected_dict: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
|
||||||
|
rejected_candles_only = {}
|
||||||
|
for pair, signals in rejected_dict.items():
|
||||||
|
rejected_signals_only_df = DataFrame()
|
||||||
|
pairdf = preprocessed_df[pair]
|
||||||
|
|
||||||
|
for t in signals:
|
||||||
|
data_df_row = pairdf.loc[(pairdf['date'] == t[0])].copy()
|
||||||
|
data_df_row['pair'] = pair
|
||||||
|
data_df_row['enter_tag'] = t[1]
|
||||||
|
|
||||||
|
rejected_signals_only_df = concat([
|
||||||
|
rejected_signals_only_df.infer_objects(),
|
||||||
|
data_df_row.infer_objects()])
|
||||||
|
|
||||||
|
rejected_candles_only[pair] = rejected_signals_only_df
|
||||||
|
return rejected_candles_only
|
||||||
|
|
||||||
|
|
||||||
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
def _get_line_floatfmt(stake_currency: str) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Generate floatformat (goes in line with _generate_result_line())
|
Generate floatformat (goes in line with _generate_result_line())
|
||||||
|
|
|
@ -36,7 +36,7 @@ class _KeyValueStoreModel(ModelBase):
|
||||||
|
|
||||||
value_type: Mapped[ValueTypesEnum] = mapped_column(String(20), nullable=False)
|
value_type: Mapped[ValueTypesEnum] = mapped_column(String(20), nullable=False)
|
||||||
|
|
||||||
string_value: Mapped[Optional[str]]
|
string_value: Mapped[Optional[str]] = mapped_column(String(255), nullable=True)
|
||||||
datetime_value: Mapped[Optional[datetime]]
|
datetime_value: Mapped[Optional[datetime]]
|
||||||
float_value: Mapped[Optional[float]]
|
float_value: Mapped[Optional[float]]
|
||||||
int_value: Mapped[Optional[int]]
|
int_value: Mapped[Optional[int]]
|
||||||
|
|
|
@ -19,7 +19,7 @@ from freqtrade.exchange import (ROUND_DOWN, ROUND_UP, amount_to_contract_precisi
|
||||||
price_to_precision)
|
price_to_precision)
|
||||||
from freqtrade.leverage import interest
|
from freqtrade.leverage import interest
|
||||||
from freqtrade.persistence.base import ModelBase, SessionType
|
from freqtrade.persistence.base import ModelBase, SessionType
|
||||||
from freqtrade.util import FtPrecise
|
from freqtrade.util import FtPrecise, dt_now
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -68,7 +68,7 @@ class Order(ModelBase):
|
||||||
remaining: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
remaining: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
||||||
cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
cost: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
||||||
stop_price: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
stop_price: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
||||||
order_date: Mapped[datetime] = mapped_column(nullable=True, default=datetime.utcnow)
|
order_date: Mapped[datetime] = mapped_column(nullable=True, default=dt_now)
|
||||||
order_filled_date: Mapped[Optional[datetime]] = mapped_column(nullable=True)
|
order_filled_date: Mapped[Optional[datetime]] = mapped_column(nullable=True)
|
||||||
order_update_date: Mapped[Optional[datetime]] = mapped_column(nullable=True)
|
order_update_date: Mapped[Optional[datetime]] = mapped_column(nullable=True)
|
||||||
funding_fee: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
funding_fee: Mapped[Optional[float]] = mapped_column(Float(), nullable=True)
|
||||||
|
@ -158,7 +158,7 @@ class Order(ModelBase):
|
||||||
self.order_filled_date = datetime.now(timezone.utc)
|
self.order_filled_date = datetime.now(timezone.utc)
|
||||||
self.order_update_date = datetime.now(timezone.utc)
|
self.order_update_date = datetime.now(timezone.utc)
|
||||||
|
|
||||||
def to_ccxt_object(self) -> Dict[str, Any]:
|
def to_ccxt_object(self, stopPriceName: str = 'stopPrice') -> Dict[str, Any]:
|
||||||
order: Dict[str, Any] = {
|
order: Dict[str, Any] = {
|
||||||
'id': self.order_id,
|
'id': self.order_id,
|
||||||
'symbol': self.ft_pair,
|
'symbol': self.ft_pair,
|
||||||
|
@ -170,7 +170,6 @@ class Order(ModelBase):
|
||||||
'side': self.ft_order_side,
|
'side': self.ft_order_side,
|
||||||
'filled': self.filled,
|
'filled': self.filled,
|
||||||
'remaining': self.remaining,
|
'remaining': self.remaining,
|
||||||
'stopPrice': self.stop_price,
|
|
||||||
'datetime': self.order_date_utc.strftime('%Y-%m-%dT%H:%M:%S.%f'),
|
'datetime': self.order_date_utc.strftime('%Y-%m-%dT%H:%M:%S.%f'),
|
||||||
'timestamp': int(self.order_date_utc.timestamp() * 1000),
|
'timestamp': int(self.order_date_utc.timestamp() * 1000),
|
||||||
'status': self.status,
|
'status': self.status,
|
||||||
|
@ -178,7 +177,11 @@ class Order(ModelBase):
|
||||||
'info': {},
|
'info': {},
|
||||||
}
|
}
|
||||||
if self.ft_order_side == 'stoploss':
|
if self.ft_order_side == 'stoploss':
|
||||||
order['ft_order_type'] = 'stoploss'
|
order.update({
|
||||||
|
stopPriceName: self.stop_price,
|
||||||
|
'ft_order_type': 'stoploss',
|
||||||
|
})
|
||||||
|
|
||||||
return order
|
return order
|
||||||
|
|
||||||
def to_json(self, entry_side: str, minified: bool = False) -> Dict[str, Any]:
|
def to_json(self, entry_side: str, minified: bool = False) -> Dict[str, Any]:
|
||||||
|
@ -422,7 +425,7 @@ class LocalTrade():
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def close_date_utc(self):
|
def close_date_utc(self):
|
||||||
return self.close_date.replace(tzinfo=timezone.utc)
|
return self.close_date.replace(tzinfo=timezone.utc) if self.close_date else None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def entry_side(self) -> str:
|
def entry_side(self) -> str:
|
||||||
|
@ -708,7 +711,10 @@ class LocalTrade():
|
||||||
if order.ft_order_side != self.entry_side:
|
if order.ft_order_side != self.entry_side:
|
||||||
amount_tr = amount_to_contract_precision(self.amount, self.amount_precision,
|
amount_tr = amount_to_contract_precision(self.amount, self.amount_precision,
|
||||||
self.precision_mode, self.contract_size)
|
self.precision_mode, self.contract_size)
|
||||||
if isclose(order.safe_amount_after_fee, amount_tr, abs_tol=MATH_CLOSE_PREC):
|
if (
|
||||||
|
isclose(order.safe_amount_after_fee, amount_tr, abs_tol=MATH_CLOSE_PREC)
|
||||||
|
or order.safe_amount_after_fee > amount_tr
|
||||||
|
):
|
||||||
self.close(order.safe_price)
|
self.close(order.safe_price)
|
||||||
else:
|
else:
|
||||||
self.recalc_trade_from_orders()
|
self.recalc_trade_from_orders()
|
||||||
|
|
|
@ -633,7 +633,7 @@ def load_and_plot_trades(config: Config):
|
||||||
"""
|
"""
|
||||||
strategy = StrategyResolver.load_strategy(config)
|
strategy = StrategyResolver.load_strategy(config)
|
||||||
|
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)
|
exchange = ExchangeResolver.load_exchange(config)
|
||||||
IStrategy.dp = DataProvider(config, exchange)
|
IStrategy.dp = DataProvider(config, exchange)
|
||||||
strategy.ft_bot_start()
|
strategy.ft_bot_start()
|
||||||
strategy.bot_loop_start(datetime.now(timezone.utc))
|
strategy.bot_loop_start(datetime.now(timezone.utc))
|
||||||
|
@ -678,7 +678,7 @@ def plot_profit(config: Config) -> None:
|
||||||
if 'timeframe' not in config:
|
if 'timeframe' not in config:
|
||||||
raise OperationalException('Timeframe must be set in either config or via --timeframe.')
|
raise OperationalException('Timeframe must be set in either config or via --timeframe.')
|
||||||
|
|
||||||
exchange = ExchangeResolver.load_exchange(config['exchange']['name'], config)
|
exchange = ExchangeResolver.load_exchange(config)
|
||||||
plot_elements = init_plotscript(config, list(exchange.markets))
|
plot_elements = init_plotscript(config, list(exchange.markets))
|
||||||
trades = plot_elements['trades']
|
trades = plot_elements['trades']
|
||||||
# Filter trades to relevant pairs
|
# Filter trades to relevant pairs
|
||||||
|
|
|
@ -3,9 +3,9 @@ Minimum age (days listed) pair list filter
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import arrow
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.constants import Config, ListPairsWithTimeframes
|
from freqtrade.constants import Config, ListPairsWithTimeframes
|
||||||
|
@ -13,7 +13,7 @@ from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
from freqtrade.misc import plural
|
from freqtrade.misc import plural
|
||||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
from freqtrade.util import PeriodicCache
|
from freqtrade.util import PeriodicCache, dt_floor_day, dt_now, dt_ts
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -84,10 +84,7 @@ class AgeFilter(IPairList):
|
||||||
since_days = -(
|
since_days = -(
|
||||||
self._max_days_listed if self._max_days_listed else self._min_days_listed
|
self._max_days_listed if self._max_days_listed else self._min_days_listed
|
||||||
) - 1
|
) - 1
|
||||||
since_ms = int(arrow.utcnow()
|
since_ms = dt_ts(dt_floor_day(dt_now()) + timedelta(days=since_days))
|
||||||
.floor('day')
|
|
||||||
.shift(days=since_days)
|
|
||||||
.float_timestamp) * 1000
|
|
||||||
candles = self._exchange.refresh_latest_ohlcv(needed_pairs, since_ms=since_ms, cache=False)
|
candles = self._exchange.refresh_latest_ohlcv(needed_pairs, since_ms=since_ms, cache=False)
|
||||||
if self._enabled:
|
if self._enabled:
|
||||||
for p in deepcopy(pairlist):
|
for p in deepcopy(pairlist):
|
||||||
|
@ -116,7 +113,7 @@ class AgeFilter(IPairList):
|
||||||
):
|
):
|
||||||
# We have fetched at least the minimum required number of daily candles
|
# We have fetched at least the minimum required number of daily candles
|
||||||
# Add to cache, store the time we last checked this symbol
|
# Add to cache, store the time we last checked this symbol
|
||||||
self._symbolsChecked[pair] = arrow.utcnow().int_timestamp * 1000
|
self._symbolsChecked[pair] = dt_ts()
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.log_once((
|
self.log_once((
|
||||||
|
@ -127,6 +124,6 @@ class AgeFilter(IPairList):
|
||||||
" or more than "
|
" or more than "
|
||||||
f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}"
|
f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}"
|
||||||
) if self._max_days_listed else ''), logger.info)
|
) if self._max_days_listed else ''), logger.info)
|
||||||
self._symbolsCheckFailed[pair] = arrow.utcnow().int_timestamp * 1000
|
self._symbolsCheckFailed[pair] = dt_ts()
|
||||||
return False
|
return False
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -4,9 +4,9 @@ Volatility pairlist filter
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import arrow
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from cachetools import TTLCache
|
from cachetools import TTLCache
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
@ -16,6 +16,7 @@ from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
from freqtrade.misc import plural
|
from freqtrade.misc import plural
|
||||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
|
from freqtrade.util import dt_floor_day, dt_now, dt_ts
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -73,10 +74,7 @@ class VolatilityFilter(IPairList):
|
||||||
needed_pairs: ListPairsWithTimeframes = [
|
needed_pairs: ListPairsWithTimeframes = [
|
||||||
(p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache]
|
(p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache]
|
||||||
|
|
||||||
since_ms = (arrow.utcnow()
|
since_ms = dt_ts(dt_floor_day(dt_now()) - timedelta(days=self._days))
|
||||||
.floor('day')
|
|
||||||
.shift(days=-self._days - 1)
|
|
||||||
.int_timestamp) * 1000
|
|
||||||
# Get all candles
|
# Get all candles
|
||||||
candles = {}
|
candles = {}
|
||||||
if needed_pairs:
|
if needed_pairs:
|
||||||
|
@ -105,7 +103,7 @@ class VolatilityFilter(IPairList):
|
||||||
|
|
||||||
result = False
|
result = False
|
||||||
if daily_candles is not None and not daily_candles.empty:
|
if daily_candles is not None and not daily_candles.empty:
|
||||||
returns = (np.log(daily_candles.close / daily_candles.close.shift(-1)))
|
returns = (np.log(daily_candles["close"].shift(1) / daily_candles["close"]))
|
||||||
returns.fillna(0, inplace=True)
|
returns.fillna(0, inplace=True)
|
||||||
|
|
||||||
volatility_series = returns.rolling(window=self._days).std() * np.sqrt(self._days)
|
volatility_series = returns.rolling(window=self._days).std() * np.sqrt(self._days)
|
||||||
|
|
|
@ -4,7 +4,7 @@ Volume PairList provider
|
||||||
Provides dynamic pair list based on trade volumes
|
Provides dynamic pair list based on trade volumes
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, Literal
|
from typing import Any, Dict, List, Literal
|
||||||
|
|
||||||
from cachetools import TTLCache
|
from cachetools import TTLCache
|
||||||
|
@ -15,6 +15,7 @@ from freqtrade.exchange import timeframe_to_minutes, timeframe_to_prev_date
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
from freqtrade.misc import format_ms_time
|
from freqtrade.misc import format_ms_time
|
||||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
|
from freqtrade.util import dt_now
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -161,13 +162,13 @@ class VolumePairList(IPairList):
|
||||||
# get lookback period in ms, for exchange ohlcv fetch
|
# get lookback period in ms, for exchange ohlcv fetch
|
||||||
since_ms = int(timeframe_to_prev_date(
|
since_ms = int(timeframe_to_prev_date(
|
||||||
self._lookback_timeframe,
|
self._lookback_timeframe,
|
||||||
datetime.now(timezone.utc) + timedelta(
|
dt_now() + timedelta(
|
||||||
minutes=-(self._lookback_period * self._tf_in_min) - self._tf_in_min)
|
minutes=-(self._lookback_period * self._tf_in_min) - self._tf_in_min)
|
||||||
).timestamp()) * 1000
|
).timestamp()) * 1000
|
||||||
|
|
||||||
to_ms = int(timeframe_to_prev_date(
|
to_ms = int(timeframe_to_prev_date(
|
||||||
self._lookback_timeframe,
|
self._lookback_timeframe,
|
||||||
datetime.now(timezone.utc) - timedelta(minutes=self._tf_in_min)
|
dt_now() - timedelta(minutes=self._tf_in_min)
|
||||||
).timestamp()) * 1000
|
).timestamp()) * 1000
|
||||||
|
|
||||||
# todo: utc date output for starting date
|
# todo: utc date output for starting date
|
||||||
|
|
|
@ -3,9 +3,9 @@ Rate of change pairlist filter
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
import arrow
|
|
||||||
from cachetools import TTLCache
|
from cachetools import TTLCache
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ from freqtrade.exceptions import OperationalException
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
from freqtrade.misc import plural
|
from freqtrade.misc import plural
|
||||||
from freqtrade.plugins.pairlist.IPairList import IPairList
|
from freqtrade.plugins.pairlist.IPairList import IPairList
|
||||||
|
from freqtrade.util import dt_floor_day, dt_now, dt_ts
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -71,10 +72,7 @@ class RangeStabilityFilter(IPairList):
|
||||||
needed_pairs: ListPairsWithTimeframes = [
|
needed_pairs: ListPairsWithTimeframes = [
|
||||||
(p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache]
|
(p, '1d', self._def_candletype) for p in pairlist if p not in self._pair_cache]
|
||||||
|
|
||||||
since_ms = (arrow.utcnow()
|
since_ms = dt_ts(dt_floor_day(dt_now()) - timedelta(days=self._days - 1))
|
||||||
.floor('day')
|
|
||||||
.shift(days=-self._days - 1)
|
|
||||||
.int_timestamp) * 1000
|
|
||||||
# Get all candles
|
# Get all candles
|
||||||
candles = {}
|
candles = {}
|
||||||
if needed_pairs:
|
if needed_pairs:
|
||||||
|
|
|
@ -2,9 +2,10 @@
|
||||||
This module loads custom exchanges
|
This module loads custom exchanges
|
||||||
"""
|
"""
|
||||||
import logging
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
import freqtrade.exchange as exchanges
|
import freqtrade.exchange as exchanges
|
||||||
from freqtrade.constants import Config
|
from freqtrade.constants import Config, ExchangeConfig
|
||||||
from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, Exchange
|
from freqtrade.exchange import MAP_EXCHANGE_CHILDCLASS, Exchange
|
||||||
from freqtrade.resolvers import IResolver
|
from freqtrade.resolvers import IResolver
|
||||||
|
|
||||||
|
@ -19,13 +20,14 @@ class ExchangeResolver(IResolver):
|
||||||
object_type = Exchange
|
object_type = Exchange
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_exchange(exchange_name: str, config: Config, validate: bool = True,
|
def load_exchange(config: Config, *, exchange_config: Optional[ExchangeConfig] = None,
|
||||||
load_leverage_tiers: bool = False) -> Exchange:
|
validate: bool = True, load_leverage_tiers: bool = False) -> Exchange:
|
||||||
"""
|
"""
|
||||||
Load the custom class from config parameter
|
Load the custom class from config parameter
|
||||||
:param exchange_name: name of the Exchange to load
|
:param exchange_name: name of the Exchange to load
|
||||||
:param config: configuration dictionary
|
:param config: configuration dictionary
|
||||||
"""
|
"""
|
||||||
|
exchange_name: str = config['exchange']['name']
|
||||||
# Map exchange name to avoid duplicate classes for identical exchanges
|
# Map exchange name to avoid duplicate classes for identical exchanges
|
||||||
exchange_name = MAP_EXCHANGE_CHILDCLASS.get(exchange_name, exchange_name)
|
exchange_name = MAP_EXCHANGE_CHILDCLASS.get(exchange_name, exchange_name)
|
||||||
exchange_name = exchange_name.title()
|
exchange_name = exchange_name.title()
|
||||||
|
@ -36,13 +38,14 @@ class ExchangeResolver(IResolver):
|
||||||
kwargs={
|
kwargs={
|
||||||
'config': config,
|
'config': config,
|
||||||
'validate': validate,
|
'validate': validate,
|
||||||
|
'exchange_config': exchange_config,
|
||||||
'load_leverage_tiers': load_leverage_tiers}
|
'load_leverage_tiers': load_leverage_tiers}
|
||||||
)
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
logger.info(
|
logger.info(
|
||||||
f"No {exchange_name} specific subclass found. Using the generic class instead.")
|
f"No {exchange_name} specific subclass found. Using the generic class instead.")
|
||||||
if not exchange:
|
if not exchange:
|
||||||
exchange = Exchange(config, validate=validate)
|
exchange = Exchange(config, validate=validate, exchange_config=exchange_config,)
|
||||||
return exchange
|
return exchange
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
|
@ -8,14 +8,16 @@ from fastapi import APIRouter, BackgroundTasks, Depends
|
||||||
from fastapi.exceptions import HTTPException
|
from fastapi.exceptions import HTTPException
|
||||||
|
|
||||||
from freqtrade.configuration.config_validation import validate_config_consistency
|
from freqtrade.configuration.config_validation import validate_config_consistency
|
||||||
|
from freqtrade.constants import Config
|
||||||
from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result
|
from freqtrade.data.btanalysis import get_backtest_resultlist, load_and_merge_backtest_result
|
||||||
from freqtrade.enums import BacktestState
|
from freqtrade.enums import BacktestState
|
||||||
from freqtrade.exceptions import DependencyException, OperationalException
|
from freqtrade.exceptions import DependencyException, OperationalException
|
||||||
|
from freqtrade.exchange.common import remove_exchange_credentials
|
||||||
from freqtrade.misc import deep_merge_dicts
|
from freqtrade.misc import deep_merge_dicts
|
||||||
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
|
from freqtrade.rpc.api_server.api_schemas import (BacktestHistoryEntry, BacktestRequest,
|
||||||
BacktestResponse)
|
BacktestResponse)
|
||||||
from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode
|
from freqtrade.rpc.api_server.deps import get_config, is_webserver_mode
|
||||||
from freqtrade.rpc.api_server.webserver import ApiServer
|
from freqtrade.rpc.api_server.webserver_bgwork import ApiBG
|
||||||
from freqtrade.rpc.rpc import RPCException
|
from freqtrade.rpc.rpc import RPCException
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,19 +27,92 @@ logger = logging.getLogger(__name__)
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
|
def __run_backtest_bg(btconfig: Config):
|
||||||
|
from freqtrade.optimize.optimize_reports import generate_backtest_stats, store_backtest_stats
|
||||||
|
from freqtrade.resolvers import StrategyResolver
|
||||||
|
asyncio.set_event_loop(asyncio.new_event_loop())
|
||||||
|
try:
|
||||||
|
# Reload strategy
|
||||||
|
lastconfig = ApiBG.bt['last_config']
|
||||||
|
strat = StrategyResolver.load_strategy(btconfig)
|
||||||
|
validate_config_consistency(btconfig)
|
||||||
|
|
||||||
|
if (
|
||||||
|
not ApiBG.bt['bt']
|
||||||
|
or lastconfig.get('timeframe') != strat.timeframe
|
||||||
|
or lastconfig.get('timeframe_detail') != btconfig.get('timeframe_detail')
|
||||||
|
or lastconfig.get('timerange') != btconfig['timerange']
|
||||||
|
):
|
||||||
|
from freqtrade.optimize.backtesting import Backtesting
|
||||||
|
ApiBG.bt['bt'] = Backtesting(btconfig)
|
||||||
|
ApiBG.bt['bt'].load_bt_data_detail()
|
||||||
|
else:
|
||||||
|
ApiBG.bt['bt'].config = btconfig
|
||||||
|
ApiBG.bt['bt'].init_backtest()
|
||||||
|
# Only reload data if timeframe changed.
|
||||||
|
if (
|
||||||
|
not ApiBG.bt['data']
|
||||||
|
or not ApiBG.bt['timerange']
|
||||||
|
or lastconfig.get('timeframe') != strat.timeframe
|
||||||
|
or lastconfig.get('timerange') != btconfig['timerange']
|
||||||
|
):
|
||||||
|
ApiBG.bt['data'], ApiBG.bt['timerange'] = ApiBG.bt[
|
||||||
|
'bt'].load_bt_data()
|
||||||
|
|
||||||
|
lastconfig['timerange'] = btconfig['timerange']
|
||||||
|
lastconfig['timeframe'] = strat.timeframe
|
||||||
|
lastconfig['protections'] = btconfig.get('protections', [])
|
||||||
|
lastconfig['enable_protections'] = btconfig.get('enable_protections')
|
||||||
|
lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet')
|
||||||
|
|
||||||
|
ApiBG.bt['bt'].enable_protections = btconfig.get('enable_protections', False)
|
||||||
|
ApiBG.bt['bt'].strategylist = [strat]
|
||||||
|
ApiBG.bt['bt'].results = {}
|
||||||
|
ApiBG.bt['bt'].load_prior_backtest()
|
||||||
|
|
||||||
|
ApiBG.bt['bt'].abort = False
|
||||||
|
if (ApiBG.bt['bt'].results and
|
||||||
|
strat.get_strategy_name() in ApiBG.bt['bt'].results['strategy']):
|
||||||
|
# When previous result hash matches - reuse that result and skip backtesting.
|
||||||
|
logger.info(f'Reusing result of previous backtest for {strat.get_strategy_name()}')
|
||||||
|
else:
|
||||||
|
min_date, max_date = ApiBG.bt['bt'].backtest_one_strategy(
|
||||||
|
strat, ApiBG.bt['data'], ApiBG.bt['timerange'])
|
||||||
|
|
||||||
|
ApiBG.bt['bt'].results = generate_backtest_stats(
|
||||||
|
ApiBG.bt['data'], ApiBG.bt['bt'].all_results,
|
||||||
|
min_date=min_date, max_date=max_date)
|
||||||
|
|
||||||
|
if btconfig.get('export', 'none') == 'trades':
|
||||||
|
store_backtest_stats(
|
||||||
|
btconfig['exportfilename'], ApiBG.bt['bt'].results,
|
||||||
|
datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info("Backtest finished.")
|
||||||
|
|
||||||
|
except (Exception, OperationalException, DependencyException) as e:
|
||||||
|
logger.exception(f"Backtesting caused an error: {e}")
|
||||||
|
ApiBG.bt['bt_error'] = str(e)
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
ApiBG.bgtask_running = False
|
||||||
|
|
||||||
|
|
||||||
@router.post('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
@router.post('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
||||||
async def api_start_backtest( # noqa: C901
|
async def api_start_backtest(
|
||||||
bt_settings: BacktestRequest, background_tasks: BackgroundTasks,
|
bt_settings: BacktestRequest, background_tasks: BackgroundTasks,
|
||||||
config=Depends(get_config), ws_mode=Depends(is_webserver_mode)):
|
config=Depends(get_config), ws_mode=Depends(is_webserver_mode)):
|
||||||
ApiServer._bt['bt_error'] = None
|
ApiBG.bt['bt_error'] = None
|
||||||
"""Start backtesting if not done so already"""
|
"""Start backtesting if not done so already"""
|
||||||
if ApiServer._bgtask_running:
|
if ApiBG.bgtask_running:
|
||||||
raise RPCException('Bot Background task already running')
|
raise RPCException('Bot Background task already running')
|
||||||
|
|
||||||
if ':' in bt_settings.strategy:
|
if ':' in bt_settings.strategy:
|
||||||
raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.")
|
raise HTTPException(status_code=500, detail="base64 encoded strategies are not allowed.")
|
||||||
|
|
||||||
btconfig = deepcopy(config)
|
btconfig = deepcopy(config)
|
||||||
|
remove_exchange_credentials(btconfig['exchange'], True)
|
||||||
settings = dict(bt_settings)
|
settings = dict(bt_settings)
|
||||||
if settings.get('freqai', None) is not None:
|
if settings.get('freqai', None) is not None:
|
||||||
settings['freqai'] = dict(settings['freqai'])
|
settings['freqai'] = dict(settings['freqai'])
|
||||||
|
@ -54,80 +129,9 @@ async def api_start_backtest( # noqa: C901
|
||||||
|
|
||||||
# Start backtesting
|
# Start backtesting
|
||||||
# Initialize backtesting object
|
# Initialize backtesting object
|
||||||
def run_backtest():
|
|
||||||
from freqtrade.optimize.optimize_reports import (generate_backtest_stats,
|
|
||||||
store_backtest_stats)
|
|
||||||
from freqtrade.resolvers import StrategyResolver
|
|
||||||
asyncio.set_event_loop(asyncio.new_event_loop())
|
|
||||||
try:
|
|
||||||
# Reload strategy
|
|
||||||
lastconfig = ApiServer._bt['last_config']
|
|
||||||
strat = StrategyResolver.load_strategy(btconfig)
|
|
||||||
validate_config_consistency(btconfig)
|
|
||||||
|
|
||||||
if (
|
background_tasks.add_task(__run_backtest_bg, btconfig=btconfig)
|
||||||
not ApiServer._bt['bt']
|
ApiBG.bgtask_running = True
|
||||||
or lastconfig.get('timeframe') != strat.timeframe
|
|
||||||
or lastconfig.get('timeframe_detail') != btconfig.get('timeframe_detail')
|
|
||||||
or lastconfig.get('timerange') != btconfig['timerange']
|
|
||||||
):
|
|
||||||
from freqtrade.optimize.backtesting import Backtesting
|
|
||||||
ApiServer._bt['bt'] = Backtesting(btconfig)
|
|
||||||
ApiServer._bt['bt'].load_bt_data_detail()
|
|
||||||
else:
|
|
||||||
ApiServer._bt['bt'].config = btconfig
|
|
||||||
ApiServer._bt['bt'].init_backtest()
|
|
||||||
# Only reload data if timeframe changed.
|
|
||||||
if (
|
|
||||||
not ApiServer._bt['data']
|
|
||||||
or not ApiServer._bt['timerange']
|
|
||||||
or lastconfig.get('timeframe') != strat.timeframe
|
|
||||||
or lastconfig.get('timerange') != btconfig['timerange']
|
|
||||||
):
|
|
||||||
ApiServer._bt['data'], ApiServer._bt['timerange'] = ApiServer._bt[
|
|
||||||
'bt'].load_bt_data()
|
|
||||||
|
|
||||||
lastconfig['timerange'] = btconfig['timerange']
|
|
||||||
lastconfig['timeframe'] = strat.timeframe
|
|
||||||
lastconfig['protections'] = btconfig.get('protections', [])
|
|
||||||
lastconfig['enable_protections'] = btconfig.get('enable_protections')
|
|
||||||
lastconfig['dry_run_wallet'] = btconfig.get('dry_run_wallet')
|
|
||||||
|
|
||||||
ApiServer._bt['bt'].enable_protections = btconfig.get('enable_protections', False)
|
|
||||||
ApiServer._bt['bt'].strategylist = [strat]
|
|
||||||
ApiServer._bt['bt'].results = {}
|
|
||||||
ApiServer._bt['bt'].load_prior_backtest()
|
|
||||||
|
|
||||||
ApiServer._bt['bt'].abort = False
|
|
||||||
if (ApiServer._bt['bt'].results and
|
|
||||||
strat.get_strategy_name() in ApiServer._bt['bt'].results['strategy']):
|
|
||||||
# When previous result hash matches - reuse that result and skip backtesting.
|
|
||||||
logger.info(f'Reusing result of previous backtest for {strat.get_strategy_name()}')
|
|
||||||
else:
|
|
||||||
min_date, max_date = ApiServer._bt['bt'].backtest_one_strategy(
|
|
||||||
strat, ApiServer._bt['data'], ApiServer._bt['timerange'])
|
|
||||||
|
|
||||||
ApiServer._bt['bt'].results = generate_backtest_stats(
|
|
||||||
ApiServer._bt['data'], ApiServer._bt['bt'].all_results,
|
|
||||||
min_date=min_date, max_date=max_date)
|
|
||||||
|
|
||||||
if btconfig.get('export', 'none') == 'trades':
|
|
||||||
store_backtest_stats(
|
|
||||||
btconfig['exportfilename'], ApiServer._bt['bt'].results,
|
|
||||||
datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info("Backtest finished.")
|
|
||||||
|
|
||||||
except (Exception, OperationalException, DependencyException) as e:
|
|
||||||
logger.exception(f"Backtesting caused an error: {e}")
|
|
||||||
ApiServer._bt['bt_error'] = str(e)
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
ApiServer._bgtask_running = False
|
|
||||||
|
|
||||||
background_tasks.add_task(run_backtest)
|
|
||||||
ApiServer._bgtask_running = True
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"status": "running",
|
"status": "running",
|
||||||
|
@ -145,18 +149,18 @@ def api_get_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
Returns Result after backtesting has been ran.
|
Returns Result after backtesting has been ran.
|
||||||
"""
|
"""
|
||||||
from freqtrade.persistence import LocalTrade
|
from freqtrade.persistence import LocalTrade
|
||||||
if ApiServer._bgtask_running:
|
if ApiBG.bgtask_running:
|
||||||
return {
|
return {
|
||||||
"status": "running",
|
"status": "running",
|
||||||
"running": True,
|
"running": True,
|
||||||
"step": (ApiServer._bt['bt'].progress.action if ApiServer._bt['bt']
|
"step": (ApiBG.bt['bt'].progress.action if ApiBG.bt['bt']
|
||||||
else str(BacktestState.STARTUP)),
|
else str(BacktestState.STARTUP)),
|
||||||
"progress": ApiServer._bt['bt'].progress.progress if ApiServer._bt['bt'] else 0,
|
"progress": ApiBG.bt['bt'].progress.progress if ApiBG.bt['bt'] else 0,
|
||||||
"trade_count": len(LocalTrade.trades),
|
"trade_count": len(LocalTrade.trades),
|
||||||
"status_msg": "Backtest running",
|
"status_msg": "Backtest running",
|
||||||
}
|
}
|
||||||
|
|
||||||
if not ApiServer._bt['bt']:
|
if not ApiBG.bt['bt']:
|
||||||
return {
|
return {
|
||||||
"status": "not_started",
|
"status": "not_started",
|
||||||
"running": False,
|
"running": False,
|
||||||
|
@ -164,13 +168,13 @@ def api_get_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
"progress": 0,
|
"progress": 0,
|
||||||
"status_msg": "Backtest not yet executed"
|
"status_msg": "Backtest not yet executed"
|
||||||
}
|
}
|
||||||
if ApiServer._bt['bt_error']:
|
if ApiBG.bt['bt_error']:
|
||||||
return {
|
return {
|
||||||
"status": "error",
|
"status": "error",
|
||||||
"running": False,
|
"running": False,
|
||||||
"step": "",
|
"step": "",
|
||||||
"progress": 0,
|
"progress": 0,
|
||||||
"status_msg": f"Backtest failed with {ApiServer._bt['bt_error']}"
|
"status_msg": f"Backtest failed with {ApiBG.bt['bt_error']}"
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
@ -179,14 +183,14 @@ def api_get_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
"status_msg": "Backtest ended",
|
"status_msg": "Backtest ended",
|
||||||
"step": "finished",
|
"step": "finished",
|
||||||
"progress": 1,
|
"progress": 1,
|
||||||
"backtest_result": ApiServer._bt['bt'].results,
|
"backtest_result": ApiBG.bt['bt'].results,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@router.delete('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
@router.delete('/backtest', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
||||||
def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
|
def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
"""Reset backtesting"""
|
"""Reset backtesting"""
|
||||||
if ApiServer._bgtask_running:
|
if ApiBG.bgtask_running:
|
||||||
return {
|
return {
|
||||||
"status": "running",
|
"status": "running",
|
||||||
"running": True,
|
"running": True,
|
||||||
|
@ -194,12 +198,12 @@ def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
"progress": 0,
|
"progress": 0,
|
||||||
"status_msg": "Backtest running",
|
"status_msg": "Backtest running",
|
||||||
}
|
}
|
||||||
if ApiServer._bt['bt']:
|
if ApiBG.bt['bt']:
|
||||||
ApiServer._bt['bt'].cleanup()
|
ApiBG.bt['bt'].cleanup()
|
||||||
del ApiServer._bt['bt']
|
del ApiBG.bt['bt']
|
||||||
ApiServer._bt['bt'] = None
|
ApiBG.bt['bt'] = None
|
||||||
del ApiServer._bt['data']
|
del ApiBG.bt['data']
|
||||||
ApiServer._bt['data'] = None
|
ApiBG.bt['data'] = None
|
||||||
logger.info("Backtesting reset")
|
logger.info("Backtesting reset")
|
||||||
return {
|
return {
|
||||||
"status": "reset",
|
"status": "reset",
|
||||||
|
@ -212,7 +216,7 @@ def api_delete_backtest(ws_mode=Depends(is_webserver_mode)):
|
||||||
|
|
||||||
@router.get('/backtest/abort', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
@router.get('/backtest/abort', response_model=BacktestResponse, tags=['webserver', 'backtest'])
|
||||||
def api_backtest_abort(ws_mode=Depends(is_webserver_mode)):
|
def api_backtest_abort(ws_mode=Depends(is_webserver_mode)):
|
||||||
if not ApiServer._bgtask_running:
|
if not ApiBG.bgtask_running:
|
||||||
return {
|
return {
|
||||||
"status": "not_running",
|
"status": "not_running",
|
||||||
"running": False,
|
"running": False,
|
||||||
|
@ -220,7 +224,7 @@ def api_backtest_abort(ws_mode=Depends(is_webserver_mode)):
|
||||||
"progress": 0,
|
"progress": 0,
|
||||||
"status_msg": "Backtest ended",
|
"status_msg": "Backtest ended",
|
||||||
}
|
}
|
||||||
ApiServer._bt['bt'].abort = True
|
ApiBG.bt['bt'].abort = True
|
||||||
return {
|
return {
|
||||||
"status": "stopping",
|
"status": "stopping",
|
||||||
"running": False,
|
"running": False,
|
||||||
|
|
|
@ -36,20 +36,25 @@ class Balance(BaseModel):
|
||||||
free: float
|
free: float
|
||||||
balance: float
|
balance: float
|
||||||
used: float
|
used: float
|
||||||
|
bot_owned: Optional[float]
|
||||||
est_stake: float
|
est_stake: float
|
||||||
|
est_stake_bot: Optional[float]
|
||||||
stake: str
|
stake: str
|
||||||
# Starting with 2.x
|
# Starting with 2.x
|
||||||
side: str
|
side: str
|
||||||
leverage: float
|
leverage: float
|
||||||
is_position: bool
|
is_position: bool
|
||||||
position: float
|
position: float
|
||||||
|
is_bot_managed: bool
|
||||||
|
|
||||||
|
|
||||||
class Balances(BaseModel):
|
class Balances(BaseModel):
|
||||||
currencies: List[Balance]
|
currencies: List[Balance]
|
||||||
total: float
|
total: float
|
||||||
|
total_bot: float
|
||||||
symbol: str
|
symbol: str
|
||||||
value: float
|
value: float
|
||||||
|
value_bot: float
|
||||||
stake: str
|
stake: str
|
||||||
note: str
|
note: str
|
||||||
starting_capital: float
|
starting_capital: float
|
||||||
|
@ -95,8 +100,10 @@ class Profit(BaseModel):
|
||||||
trade_count: int
|
trade_count: int
|
||||||
closed_trade_count: int
|
closed_trade_count: int
|
||||||
first_trade_date: str
|
first_trade_date: str
|
||||||
|
first_trade_humanized: str
|
||||||
first_trade_timestamp: int
|
first_trade_timestamp: int
|
||||||
latest_trade_date: str
|
latest_trade_date: str
|
||||||
|
latest_trade_humanized: str
|
||||||
latest_trade_timestamp: int
|
latest_trade_timestamp: int
|
||||||
avg_duration: str
|
avg_duration: str
|
||||||
best_pair: str
|
best_pair: str
|
||||||
|
|
|
@ -43,7 +43,10 @@ logger = logging.getLogger(__name__)
|
||||||
# 2.23: Allow plot config request in webserver mode
|
# 2.23: Allow plot config request in webserver mode
|
||||||
# 2.24: Add cancel_open_order endpoint
|
# 2.24: Add cancel_open_order endpoint
|
||||||
# 2.25: Add several profit values to /status endpoint
|
# 2.25: Add several profit values to /status endpoint
|
||||||
API_VERSION = 2.25
|
# 2.26: increase /balance output
|
||||||
|
# 2.27: Add /trades/<id>/reload endpoint
|
||||||
|
# 2.28: Switch reload endpoint to Post
|
||||||
|
API_VERSION = 2.28
|
||||||
|
|
||||||
# Public API, requires no auth.
|
# Public API, requires no auth.
|
||||||
router_public = APIRouter()
|
router_public = APIRouter()
|
||||||
|
@ -126,11 +129,17 @@ def trades_delete(tradeid: int, rpc: RPC = Depends(get_rpc)):
|
||||||
|
|
||||||
|
|
||||||
@router.delete('/trades/{tradeid}/open-order', response_model=OpenTradeSchema, tags=['trading'])
|
@router.delete('/trades/{tradeid}/open-order', response_model=OpenTradeSchema, tags=['trading'])
|
||||||
def cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)):
|
def trade_cancel_open_order(tradeid: int, rpc: RPC = Depends(get_rpc)):
|
||||||
rpc._rpc_cancel_open_order(tradeid)
|
rpc._rpc_cancel_open_order(tradeid)
|
||||||
return rpc._rpc_trade_status([tradeid])[0]
|
return rpc._rpc_trade_status([tradeid])[0]
|
||||||
|
|
||||||
|
|
||||||
|
@router.post('/trades/{tradeid}/reload', response_model=OpenTradeSchema, tags=['trading'])
|
||||||
|
def trade_reload(tradeid: int, rpc: RPC = Depends(get_rpc)):
|
||||||
|
rpc._rpc_reload_trade_from_exchange(tradeid)
|
||||||
|
return rpc._rpc_trade_status([tradeid])[0]
|
||||||
|
|
||||||
|
|
||||||
# TODO: Missing response model
|
# TODO: Missing response model
|
||||||
@router.get('/edge', tags=['info'])
|
@router.get('/edge', tags=['info'])
|
||||||
def edge(rpc: RPC = Depends(get_rpc)):
|
def edge(rpc: RPC = Depends(get_rpc)):
|
||||||
|
@ -246,14 +255,17 @@ def pair_candles(
|
||||||
|
|
||||||
@router.get('/pair_history', response_model=PairHistory, tags=['candle data'])
|
@router.get('/pair_history', response_model=PairHistory, tags=['candle data'])
|
||||||
def pair_history(pair: str, timeframe: str, timerange: str, strategy: str,
|
def pair_history(pair: str, timeframe: str, timerange: str, strategy: str,
|
||||||
|
freqaimodel: Optional[str] = None,
|
||||||
config=Depends(get_config), exchange=Depends(get_exchange)):
|
config=Depends(get_config), exchange=Depends(get_exchange)):
|
||||||
# The initial call to this endpoint can be slow, as it may need to initialize
|
# The initial call to this endpoint can be slow, as it may need to initialize
|
||||||
# the exchange class.
|
# the exchange class.
|
||||||
config = deepcopy(config)
|
config = deepcopy(config)
|
||||||
config.update({
|
config.update({
|
||||||
'strategy': strategy,
|
'strategy': strategy,
|
||||||
|
'timerange': timerange,
|
||||||
|
'freqaimodel': freqaimodel if freqaimodel else config.get('freqaimodel'),
|
||||||
})
|
})
|
||||||
return RPC._rpc_analysed_history_full(config, pair, timeframe, timerange, exchange)
|
return RPC._rpc_analysed_history_full(config, pair, timeframe, exchange)
|
||||||
|
|
||||||
|
|
||||||
@router.get('/plot_config', response_model=PlotConfig, tags=['candle data'])
|
@router.get('/plot_config', response_model=PlotConfig, tags=['candle data'])
|
||||||
|
|
|
@ -6,6 +6,7 @@ from fastapi import Depends
|
||||||
from freqtrade.enums import RunMode
|
from freqtrade.enums import RunMode
|
||||||
from freqtrade.persistence import Trade
|
from freqtrade.persistence import Trade
|
||||||
from freqtrade.persistence.models import _request_id_ctx_var
|
from freqtrade.persistence.models import _request_id_ctx_var
|
||||||
|
from freqtrade.rpc.api_server.webserver_bgwork import ApiBG
|
||||||
from freqtrade.rpc.rpc import RPC, RPCException
|
from freqtrade.rpc.rpc import RPC, RPCException
|
||||||
|
|
||||||
from .webserver import ApiServer
|
from .webserver import ApiServer
|
||||||
|
@ -43,11 +44,11 @@ def get_api_config() -> Dict[str, Any]:
|
||||||
|
|
||||||
|
|
||||||
def get_exchange(config=Depends(get_config)):
|
def get_exchange(config=Depends(get_config)):
|
||||||
if not ApiServer._exchange:
|
if not ApiBG.exchange:
|
||||||
from freqtrade.resolvers import ExchangeResolver
|
from freqtrade.resolvers import ExchangeResolver
|
||||||
ApiServer._exchange = ExchangeResolver.load_exchange(
|
ApiBG.exchange = ExchangeResolver.load_exchange(
|
||||||
config['exchange']['name'], config, load_leverage_tiers=False)
|
config, load_leverage_tiers=False)
|
||||||
return ApiServer._exchange
|
return ApiBG.exchange
|
||||||
|
|
||||||
|
|
||||||
def get_message_stream():
|
def get_message_stream():
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import logging
|
import logging
|
||||||
from ipaddress import IPv4Address
|
from ipaddress import IPv4Address
|
||||||
from typing import Any, Dict, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
import orjson
|
import orjson
|
||||||
import uvicorn
|
import uvicorn
|
||||||
|
@ -36,19 +36,8 @@ class ApiServer(RPCHandler):
|
||||||
__initialized = False
|
__initialized = False
|
||||||
|
|
||||||
_rpc: RPC
|
_rpc: RPC
|
||||||
# Backtesting type: Backtesting
|
|
||||||
_bt: Dict[str, Any] = {
|
|
||||||
'bt': None,
|
|
||||||
'data': None,
|
|
||||||
'timerange': None,
|
|
||||||
'last_config': {},
|
|
||||||
'bt_error': None,
|
|
||||||
}
|
|
||||||
_has_rpc: bool = False
|
_has_rpc: bool = False
|
||||||
_bgtask_running: bool = False
|
|
||||||
_config: Config = {}
|
_config: Config = {}
|
||||||
# Exchange - only available in webserver mode.
|
|
||||||
_exchange = None
|
|
||||||
# websocket message stuff
|
# websocket message stuff
|
||||||
_message_stream: Optional[MessageStream] = None
|
_message_stream: Optional[MessageStream] = None
|
||||||
|
|
||||||
|
@ -85,7 +74,7 @@ class ApiServer(RPCHandler):
|
||||||
"""
|
"""
|
||||||
Attach rpc handler
|
Attach rpc handler
|
||||||
"""
|
"""
|
||||||
if not self._has_rpc:
|
if not ApiServer._has_rpc:
|
||||||
ApiServer._rpc = rpc
|
ApiServer._rpc = rpc
|
||||||
ApiServer._has_rpc = True
|
ApiServer._has_rpc = True
|
||||||
else:
|
else:
|
||||||
|
|
16
freqtrade/rpc/api_server/webserver_bgwork.py
Normal file
16
freqtrade/rpc/api_server/webserver_bgwork.py
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
|
||||||
|
from typing import Any, Dict
|
||||||
|
|
||||||
|
|
||||||
|
class ApiBG():
|
||||||
|
# Backtesting type: Backtesting
|
||||||
|
bt: Dict[str, Any] = {
|
||||||
|
'bt': None,
|
||||||
|
'data': None,
|
||||||
|
'timerange': None,
|
||||||
|
'last_config': {},
|
||||||
|
'bt_error': None,
|
||||||
|
}
|
||||||
|
bgtask_running: bool = False
|
||||||
|
# Exchange - only available in webserver mode.
|
||||||
|
exchange = None
|
|
@ -7,7 +7,6 @@ from datetime import date, datetime, timedelta, timezone
|
||||||
from math import isnan
|
from math import isnan
|
||||||
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Union
|
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Union
|
||||||
|
|
||||||
import arrow
|
|
||||||
import psutil
|
import psutil
|
||||||
from dateutil.relativedelta import relativedelta
|
from dateutil.relativedelta import relativedelta
|
||||||
from dateutil.tz import tzlocal
|
from dateutil.tz import tzlocal
|
||||||
|
@ -26,12 +25,13 @@ from freqtrade.exceptions import ExchangeError, PricingError
|
||||||
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_msecs
|
from freqtrade.exchange import timeframe_to_minutes, timeframe_to_msecs
|
||||||
from freqtrade.exchange.types import Tickers
|
from freqtrade.exchange.types import Tickers
|
||||||
from freqtrade.loggers import bufferHandler
|
from freqtrade.loggers import bufferHandler
|
||||||
from freqtrade.misc import decimals_per_coin, shorten_date
|
from freqtrade.misc import decimals_per_coin
|
||||||
from freqtrade.persistence import KeyStoreKeys, KeyValueStore, Order, PairLocks, Trade
|
from freqtrade.persistence import KeyStoreKeys, KeyValueStore, Order, PairLocks, Trade
|
||||||
from freqtrade.persistence.models import PairLock
|
from freqtrade.persistence.models import PairLock
|
||||||
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
|
||||||
from freqtrade.rpc.fiat_convert import CryptoToFiatConverter
|
from freqtrade.rpc.fiat_convert import CryptoToFiatConverter
|
||||||
from freqtrade.rpc.rpc_types import RPCSendMsg
|
from freqtrade.rpc.rpc_types import RPCSendMsg
|
||||||
|
from freqtrade.util import dt_humanize, dt_now, shorten_date
|
||||||
from freqtrade.wallets import PositionWallet, Wallet
|
from freqtrade.wallets import PositionWallet, Wallet
|
||||||
|
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ class RPC:
|
||||||
and open_order.ft_order_side == trade.entry_side) else '')
|
and open_order.ft_order_side == trade.entry_side) else '')
|
||||||
+ ('**' if (open_order and
|
+ ('**' if (open_order and
|
||||||
open_order.ft_order_side == trade.exit_side is not None) else ''),
|
open_order.ft_order_side == trade.exit_side is not None) else ''),
|
||||||
shorten_date(arrow.get(trade.open_date).humanize(only_distance=True)),
|
shorten_date(dt_humanize(trade.open_date, only_distance=True)),
|
||||||
profit_str
|
profit_str
|
||||||
]
|
]
|
||||||
if self._config.get('position_adjustment_enable', False):
|
if self._config.get('position_adjustment_enable', False):
|
||||||
|
@ -420,16 +420,15 @@ class RPC:
|
||||||
else:
|
else:
|
||||||
return 'draws'
|
return 'draws'
|
||||||
trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False)
|
trades = Trade.get_trades([Trade.is_open.is_(False)], include_orders=False)
|
||||||
# Sell reason
|
# Duration
|
||||||
|
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
|
||||||
|
# Exit reason
|
||||||
exit_reasons = {}
|
exit_reasons = {}
|
||||||
for trade in trades:
|
for trade in trades:
|
||||||
if trade.exit_reason not in exit_reasons:
|
if trade.exit_reason not in exit_reasons:
|
||||||
exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0}
|
exit_reasons[trade.exit_reason] = {'wins': 0, 'losses': 0, 'draws': 0}
|
||||||
exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1
|
exit_reasons[trade.exit_reason][trade_win_loss(trade)] += 1
|
||||||
|
|
||||||
# Duration
|
|
||||||
dur: Dict[str, List[float]] = {'wins': [], 'draws': [], 'losses': []}
|
|
||||||
for trade in trades:
|
|
||||||
if trade.close_date is not None and trade.open_date is not None:
|
if trade.close_date is not None and trade.open_date is not None:
|
||||||
trade_dur = (trade.close_date - trade.open_date).total_seconds()
|
trade_dur = (trade.close_date - trade.open_date).total_seconds()
|
||||||
dur[trade_win_loss(trade)].append(trade_dur)
|
dur[trade_win_loss(trade)].append(trade_dur)
|
||||||
|
@ -541,8 +540,8 @@ class RPC:
|
||||||
fiat_display_currency
|
fiat_display_currency
|
||||||
) if self._fiat_converter else 0
|
) if self._fiat_converter else 0
|
||||||
|
|
||||||
first_date = trades[0].open_date if trades else None
|
first_date = trades[0].open_date_utc if trades else None
|
||||||
last_date = trades[-1].open_date if trades else None
|
last_date = trades[-1].open_date_utc if trades else None
|
||||||
num = float(len(durations) or 1)
|
num = float(len(durations) or 1)
|
||||||
bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME)
|
bot_start = KeyValueStore.get_datetime_value(KeyStoreKeys.BOT_START_TIME)
|
||||||
return {
|
return {
|
||||||
|
@ -564,9 +563,11 @@ class RPC:
|
||||||
'profit_all_fiat': profit_all_fiat,
|
'profit_all_fiat': profit_all_fiat,
|
||||||
'trade_count': len(trades),
|
'trade_count': len(trades),
|
||||||
'closed_trade_count': len([t for t in trades if not t.is_open]),
|
'closed_trade_count': len([t for t in trades if not t.is_open]),
|
||||||
'first_trade_date': arrow.get(first_date).humanize() if first_date else '',
|
'first_trade_date': first_date.strftime(DATETIME_PRINT_FORMAT) if first_date else '',
|
||||||
|
'first_trade_humanized': dt_humanize(first_date) if first_date else '',
|
||||||
'first_trade_timestamp': int(first_date.timestamp() * 1000) if first_date else 0,
|
'first_trade_timestamp': int(first_date.timestamp() * 1000) if first_date else 0,
|
||||||
'latest_trade_date': arrow.get(last_date).humanize() if last_date else '',
|
'latest_trade_date': last_date.strftime(DATETIME_PRINT_FORMAT) if last_date else '',
|
||||||
|
'latest_trade_humanized': dt_humanize(last_date) if last_date else '',
|
||||||
'latest_trade_timestamp': int(last_date.timestamp() * 1000) if last_date else 0,
|
'latest_trade_timestamp': int(last_date.timestamp() * 1000) if last_date else 0,
|
||||||
'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0],
|
'avg_duration': str(timedelta(seconds=sum(durations) / num)).split('.')[0],
|
||||||
'best_pair': best_pair[0] if best_pair else '',
|
'best_pair': best_pair[0] if best_pair else '',
|
||||||
|
@ -583,13 +584,16 @@ class RPC:
|
||||||
}
|
}
|
||||||
|
|
||||||
def __balance_get_est_stake(
|
def __balance_get_est_stake(
|
||||||
self, coin: str, stake_currency: str, balance: Wallet, tickers) -> float:
|
self, coin: str, stake_currency: str, amount: float,
|
||||||
|
balance: Wallet, tickers) -> Tuple[float, float]:
|
||||||
est_stake = 0.0
|
est_stake = 0.0
|
||||||
|
est_bot_stake = 0.0
|
||||||
if coin == stake_currency:
|
if coin == stake_currency:
|
||||||
est_stake = balance.total
|
est_stake = balance.total
|
||||||
if self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
|
if self._config.get('trading_mode', TradingMode.SPOT) != TradingMode.SPOT:
|
||||||
# in Futures, "total" includes the locked stake, and therefore all positions
|
# in Futures, "total" includes the locked stake, and therefore all positions
|
||||||
est_stake = balance.free
|
est_stake = balance.free
|
||||||
|
est_bot_stake = amount
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
pair = self._freqtrade.exchange.get_valid_pair_combination(coin, stake_currency)
|
pair = self._freqtrade.exchange.get_valid_pair_combination(coin, stake_currency)
|
||||||
|
@ -598,11 +602,12 @@ class RPC:
|
||||||
if pair.startswith(stake_currency) and not pair.endswith(stake_currency):
|
if pair.startswith(stake_currency) and not pair.endswith(stake_currency):
|
||||||
rate = 1.0 / rate
|
rate = 1.0 / rate
|
||||||
est_stake = rate * balance.total
|
est_stake = rate * balance.total
|
||||||
|
est_bot_stake = rate * amount
|
||||||
except (ExchangeError):
|
except (ExchangeError):
|
||||||
logger.warning(f"Could not get rate for pair {coin}.")
|
logger.warning(f"Could not get rate for pair {coin}.")
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
|
|
||||||
return est_stake
|
return est_stake, est_bot_stake
|
||||||
|
|
||||||
def _rpc_balance(self, stake_currency: str, fiat_display_currency: str) -> Dict:
|
def _rpc_balance(self, stake_currency: str, fiat_display_currency: str) -> Dict:
|
||||||
""" Returns current account balance per crypto """
|
""" Returns current account balance per crypto """
|
||||||
|
@ -615,7 +620,7 @@ class RPC:
|
||||||
raise RPCException('Error getting current tickers.')
|
raise RPCException('Error getting current tickers.')
|
||||||
|
|
||||||
open_trades: List[Trade] = Trade.get_open_trades()
|
open_trades: List[Trade] = Trade.get_open_trades()
|
||||||
open_assets = [t.base_currency for t in open_trades]
|
open_assets: Dict[str, Trade] = {t.safe_base_currency: t for t in open_trades}
|
||||||
self._freqtrade.wallets.update(require_update=False)
|
self._freqtrade.wallets.update(require_update=False)
|
||||||
starting_capital = self._freqtrade.wallets.get_starting_balance()
|
starting_capital = self._freqtrade.wallets.get_starting_balance()
|
||||||
starting_cap_fiat = self._fiat_converter.convert_amount(
|
starting_cap_fiat = self._fiat_converter.convert_amount(
|
||||||
|
@ -625,30 +630,43 @@ class RPC:
|
||||||
for coin, balance in self._freqtrade.wallets.get_all_balances().items():
|
for coin, balance in self._freqtrade.wallets.get_all_balances().items():
|
||||||
if not balance.total:
|
if not balance.total:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
trade = open_assets.get(coin, None)
|
||||||
|
is_bot_managed = coin == stake_currency or trade is not None
|
||||||
|
trade_amount = trade.amount if trade else 0
|
||||||
|
if coin == stake_currency:
|
||||||
|
trade_amount = self._freqtrade.wallets.get_available_stake_amount()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
est_stake = self.__balance_get_est_stake(coin, stake_currency, balance, tickers)
|
est_stake, est_stake_bot = self.__balance_get_est_stake(
|
||||||
|
coin, stake_currency, trade_amount, balance, tickers)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
total += est_stake
|
total += est_stake
|
||||||
if coin == stake_currency or coin in open_assets:
|
|
||||||
total_bot += est_stake
|
if is_bot_managed:
|
||||||
|
total_bot += est_stake_bot
|
||||||
currencies.append({
|
currencies.append({
|
||||||
'currency': coin,
|
'currency': coin,
|
||||||
'free': balance.free,
|
'free': balance.free,
|
||||||
'balance': balance.total,
|
'balance': balance.total,
|
||||||
'used': balance.used,
|
'used': balance.used,
|
||||||
|
'bot_owned': trade_amount,
|
||||||
'est_stake': est_stake or 0,
|
'est_stake': est_stake or 0,
|
||||||
|
'est_stake_bot': est_stake_bot if is_bot_managed else 0,
|
||||||
'stake': stake_currency,
|
'stake': stake_currency,
|
||||||
'side': 'long',
|
'side': 'long',
|
||||||
'leverage': 1,
|
'leverage': 1,
|
||||||
'position': 0,
|
'position': 0,
|
||||||
|
'is_bot_managed': is_bot_managed,
|
||||||
'is_position': False,
|
'is_position': False,
|
||||||
})
|
})
|
||||||
symbol: str
|
symbol: str
|
||||||
position: PositionWallet
|
position: PositionWallet
|
||||||
for symbol, position in self._freqtrade.wallets.get_all_positions().items():
|
for symbol, position in self._freqtrade.wallets.get_all_positions().items():
|
||||||
total += position.collateral
|
total += position.collateral
|
||||||
|
total_bot += position.collateral
|
||||||
|
|
||||||
currencies.append({
|
currencies.append({
|
||||||
'currency': symbol,
|
'currency': symbol,
|
||||||
|
@ -657,9 +675,11 @@ class RPC:
|
||||||
'used': 0,
|
'used': 0,
|
||||||
'position': position.position,
|
'position': position.position,
|
||||||
'est_stake': position.collateral,
|
'est_stake': position.collateral,
|
||||||
|
'est_stake_bot': position.collateral,
|
||||||
'stake': stake_currency,
|
'stake': stake_currency,
|
||||||
'leverage': position.leverage,
|
'leverage': position.leverage,
|
||||||
'side': position.side,
|
'side': position.side,
|
||||||
|
'is_bot_managed': True,
|
||||||
'is_position': True
|
'is_position': True
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -675,8 +695,10 @@ class RPC:
|
||||||
return {
|
return {
|
||||||
'currencies': currencies,
|
'currencies': currencies,
|
||||||
'total': total,
|
'total': total,
|
||||||
|
'total_bot': total_bot,
|
||||||
'symbol': fiat_display_currency,
|
'symbol': fiat_display_currency,
|
||||||
'value': value,
|
'value': value,
|
||||||
|
'value_bot': value_bot,
|
||||||
'stake': stake_currency,
|
'stake': stake_currency,
|
||||||
'starting_capital': starting_capital,
|
'starting_capital': starting_capital,
|
||||||
'starting_capital_ratio': starting_capital_ratio,
|
'starting_capital_ratio': starting_capital_ratio,
|
||||||
|
@ -720,6 +742,18 @@ class RPC:
|
||||||
|
|
||||||
return {'status': 'No more entries will occur from now. Run /reload_config to reset.'}
|
return {'status': 'No more entries will occur from now. Run /reload_config to reset.'}
|
||||||
|
|
||||||
|
def _rpc_reload_trade_from_exchange(self, trade_id: int) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
Handler for reload_trade_from_exchange.
|
||||||
|
Reloads a trade from it's orders, should manual interaction have happened.
|
||||||
|
"""
|
||||||
|
trade = Trade.get_trades(trade_filter=[Trade.id == trade_id]).first()
|
||||||
|
if not trade:
|
||||||
|
raise RPCException(f"Could not find trade with id {trade_id}.")
|
||||||
|
|
||||||
|
self._freqtrade.handle_onexchange_order(trade)
|
||||||
|
return {'status': 'Reloaded from orders from exchange'}
|
||||||
|
|
||||||
def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],
|
def __exec_force_exit(self, trade: Trade, ordertype: Optional[str],
|
||||||
amount: Optional[float] = None) -> None:
|
amount: Optional[float] = None) -> None:
|
||||||
# Check if there is there is an open order
|
# Check if there is there is an open order
|
||||||
|
@ -1195,8 +1229,8 @@ class RPC:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str,
|
def _rpc_analysed_history_full(config: Config, pair: str, timeframe: str,
|
||||||
timerange: str, exchange) -> Dict[str, Any]:
|
exchange) -> Dict[str, Any]:
|
||||||
timerange_parsed = TimeRange.parse_timerange(timerange)
|
timerange_parsed = TimeRange.parse_timerange(config.get('timerange'))
|
||||||
|
|
||||||
_data = load_data(
|
_data = load_data(
|
||||||
datadir=config["datadir"],
|
datadir=config["datadir"],
|
||||||
|
@ -1207,7 +1241,8 @@ class RPC:
|
||||||
candle_type=config.get('candle_type_def', CandleType.SPOT)
|
candle_type=config.get('candle_type_def', CandleType.SPOT)
|
||||||
)
|
)
|
||||||
if pair not in _data:
|
if pair not in _data:
|
||||||
raise RPCException(f"No data for {pair}, {timeframe} in {timerange} found.")
|
raise RPCException(
|
||||||
|
f"No data for {pair}, {timeframe} in {config.get('timerange')} found.")
|
||||||
from freqtrade.data.dataprovider import DataProvider
|
from freqtrade.data.dataprovider import DataProvider
|
||||||
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
from freqtrade.resolvers.strategy_resolver import StrategyResolver
|
||||||
strategy = StrategyResolver.load_strategy(config)
|
strategy = StrategyResolver.load_strategy(config)
|
||||||
|
@ -1217,7 +1252,7 @@ class RPC:
|
||||||
df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})
|
df_analyzed = strategy.analyze_ticker(_data[pair], {'pair': pair})
|
||||||
|
|
||||||
return RPC._convert_dataframe_to_dict(strategy.get_strategy_name(), pair, timeframe,
|
return RPC._convert_dataframe_to_dict(strategy.get_strategy_name(), pair, timeframe,
|
||||||
df_analyzed, arrow.Arrow.utcnow().datetime)
|
df_analyzed, dt_now())
|
||||||
|
|
||||||
def _rpc_plot_config(self) -> Dict[str, Any]:
|
def _rpc_plot_config(self) -> Dict[str, Any]:
|
||||||
if (self._freqtrade.strategy.plot_config and
|
if (self._freqtrade.strategy.plot_config and
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -44,8 +44,11 @@ class Webhook(RPCHandler):
|
||||||
|
|
||||||
def _get_value_dict(self, msg: RPCSendMsg) -> Optional[Dict[str, Any]]:
|
def _get_value_dict(self, msg: RPCSendMsg) -> Optional[Dict[str, Any]]:
|
||||||
whconfig = self._config['webhook']
|
whconfig = self._config['webhook']
|
||||||
|
if msg['type'].value in whconfig:
|
||||||
|
# Explicit types should have priority
|
||||||
|
valuedict = whconfig.get(msg['type'].value)
|
||||||
# Deprecated 2022.10 - only keep generic method.
|
# Deprecated 2022.10 - only keep generic method.
|
||||||
if msg['type'] in [RPCMessageType.ENTRY]:
|
elif msg['type'] in [RPCMessageType.ENTRY]:
|
||||||
valuedict = whconfig.get('webhookentry')
|
valuedict = whconfig.get('webhookentry')
|
||||||
elif msg['type'] in [RPCMessageType.ENTRY_CANCEL]:
|
elif msg['type'] in [RPCMessageType.ENTRY_CANCEL]:
|
||||||
valuedict = whconfig.get('webhookentrycancel')
|
valuedict = whconfig.get('webhookentrycancel')
|
||||||
|
@ -62,9 +65,6 @@ class Webhook(RPCHandler):
|
||||||
RPCMessageType.EXCEPTION,
|
RPCMessageType.EXCEPTION,
|
||||||
RPCMessageType.WARNING):
|
RPCMessageType.WARNING):
|
||||||
valuedict = whconfig.get('webhookstatus')
|
valuedict = whconfig.get('webhookstatus')
|
||||||
elif msg['type'].value in whconfig:
|
|
||||||
# Allow all types ...
|
|
||||||
valuedict = whconfig.get(msg['type'].value)
|
|
||||||
elif msg['type'] in (
|
elif msg['type'] in (
|
||||||
RPCMessageType.PROTECTION_TRIGGER,
|
RPCMessageType.PROTECTION_TRIGGER,
|
||||||
RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
|
RPCMessageType.PROTECTION_TRIGGER_GLOBAL,
|
||||||
|
|
|
@ -7,7 +7,6 @@ from abc import ABC, abstractmethod
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
from typing import Dict, List, Optional, Tuple, Union
|
from typing import Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
import arrow
|
|
||||||
from pandas import DataFrame
|
from pandas import DataFrame
|
||||||
|
|
||||||
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH, Config, IntOrInf, ListPairsWithTimeframes
|
from freqtrade.constants import CUSTOM_TAG_MAX_LENGTH, Config, IntOrInf, ListPairsWithTimeframes
|
||||||
|
@ -23,6 +22,7 @@ from freqtrade.strategy.informative_decorator import (InformativeData, PopulateI
|
||||||
_create_and_merge_informative_pair,
|
_create_and_merge_informative_pair,
|
||||||
_format_pair_name)
|
_format_pair_name)
|
||||||
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
from freqtrade.strategy.strategy_wrapper import strategy_safe_wrapper
|
||||||
|
from freqtrade.util import dt_now
|
||||||
from freqtrade.wallets import Wallets
|
from freqtrade.wallets import Wallets
|
||||||
|
|
||||||
|
|
||||||
|
@ -938,7 +938,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||||
pair: str,
|
pair: str,
|
||||||
timeframe: str,
|
timeframe: str,
|
||||||
dataframe: DataFrame,
|
dataframe: DataFrame,
|
||||||
) -> Tuple[Optional[DataFrame], Optional[arrow.Arrow]]:
|
) -> Tuple[Optional[DataFrame], Optional[datetime]]:
|
||||||
"""
|
"""
|
||||||
Calculates current signal based based on the entry order or exit order
|
Calculates current signal based based on the entry order or exit order
|
||||||
columns of the dataframe.
|
columns of the dataframe.
|
||||||
|
@ -954,16 +954,16 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||||
|
|
||||||
latest_date = dataframe['date'].max()
|
latest_date = dataframe['date'].max()
|
||||||
latest = dataframe.loc[dataframe['date'] == latest_date].iloc[-1]
|
latest = dataframe.loc[dataframe['date'] == latest_date].iloc[-1]
|
||||||
# Explicitly convert to arrow object to ensure the below comparison does not fail
|
# Explicitly convert to datetime object to ensure the below comparison does not fail
|
||||||
latest_date = arrow.get(latest_date)
|
latest_date = latest_date.to_pydatetime()
|
||||||
|
|
||||||
# Check if dataframe is out of date
|
# Check if dataframe is out of date
|
||||||
timeframe_minutes = timeframe_to_minutes(timeframe)
|
timeframe_minutes = timeframe_to_minutes(timeframe)
|
||||||
offset = self.config.get('exchange', {}).get('outdated_offset', 5)
|
offset = self.config.get('exchange', {}).get('outdated_offset', 5)
|
||||||
if latest_date < (arrow.utcnow().shift(minutes=-(timeframe_minutes * 2 + offset))):
|
if latest_date < (dt_now() - timedelta(minutes=timeframe_minutes * 2 + offset)):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
'Outdated history for pair %s. Last tick is %s minutes old',
|
'Outdated history for pair %s. Last tick is %s minutes old',
|
||||||
pair, int((arrow.utcnow() - latest_date).total_seconds() // 60)
|
pair, int((dt_now() - latest_date).total_seconds() // 60)
|
||||||
)
|
)
|
||||||
return None, None
|
return None, None
|
||||||
return latest, latest_date
|
return latest, latest_date
|
||||||
|
@ -1046,8 +1046,8 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||||
timeframe_seconds = timeframe_to_seconds(timeframe)
|
timeframe_seconds = timeframe_to_seconds(timeframe)
|
||||||
|
|
||||||
if self.ignore_expired_candle(
|
if self.ignore_expired_candle(
|
||||||
latest_date=latest_date.datetime,
|
latest_date=latest_date,
|
||||||
current_time=datetime.now(timezone.utc),
|
current_time=dt_now(),
|
||||||
timeframe_seconds=timeframe_seconds,
|
timeframe_seconds=timeframe_seconds,
|
||||||
enter=bool(enter_signal)
|
enter=bool(enter_signal)
|
||||||
):
|
):
|
||||||
|
|
|
@ -15,12 +15,15 @@ logger = logging.getLogger(__name__)
|
||||||
class FreqaiExampleStrategy(IStrategy):
|
class FreqaiExampleStrategy(IStrategy):
|
||||||
"""
|
"""
|
||||||
Example strategy showing how the user connects their own
|
Example strategy showing how the user connects their own
|
||||||
IFreqaiModel to the strategy. Namely, the user uses:
|
IFreqaiModel to the strategy.
|
||||||
self.freqai.start(dataframe, metadata)
|
|
||||||
|
|
||||||
to make predictions on their data. feature_engineering_*() automatically
|
Warning! This is a showcase of functionality,
|
||||||
generate the variety of features indicated by the user in the
|
which means that it is designed to show various functions of FreqAI
|
||||||
canonical freqtrade configuration file under config['freqai'].
|
and it runs on all computers. We use this showcase to help users
|
||||||
|
understand how to build a strategy, and we use it as a benchmark
|
||||||
|
to help debug possible problems.
|
||||||
|
|
||||||
|
This means this is *not* meant to be run live in production.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
minimal_roi = {"0": 0.1, "240": -1}
|
minimal_roi = {"0": 0.1, "240": -1}
|
||||||
|
|
|
@ -1,2 +1,17 @@
|
||||||
from freqtrade.util.ft_precise import FtPrecise # noqa: F401
|
from freqtrade.util.datetime_helpers import (dt_floor_day, dt_from_ts, dt_humanize, dt_now, dt_ts,
|
||||||
from freqtrade.util.periodic_cache import PeriodicCache # noqa: F401
|
dt_utc, shorten_date)
|
||||||
|
from freqtrade.util.ft_precise import FtPrecise
|
||||||
|
from freqtrade.util.periodic_cache import PeriodicCache
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'dt_floor_day',
|
||||||
|
'dt_from_ts',
|
||||||
|
'dt_now',
|
||||||
|
'dt_ts',
|
||||||
|
'dt_utc',
|
||||||
|
'dt_humanize',
|
||||||
|
'shorten_date',
|
||||||
|
'FtPrecise',
|
||||||
|
'PeriodicCache',
|
||||||
|
]
|
||||||
|
|
63
freqtrade/util/datetime_helpers.py
Normal file
63
freqtrade/util/datetime_helpers.py
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
import re
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import arrow
|
||||||
|
|
||||||
|
|
||||||
|
def dt_now() -> datetime:
|
||||||
|
"""Return the current datetime in UTC."""
|
||||||
|
return datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
|
||||||
|
def dt_utc(year: int, month: int, day: int, hour: int = 0, minute: int = 0, second: int = 0,
|
||||||
|
microsecond: int = 0) -> datetime:
|
||||||
|
"""Return a datetime in UTC."""
|
||||||
|
return datetime(year, month, day, hour, minute, second, microsecond, tzinfo=timezone.utc)
|
||||||
|
|
||||||
|
|
||||||
|
def dt_ts(dt: Optional[datetime] = None) -> int:
|
||||||
|
"""
|
||||||
|
Return dt in ms as a timestamp in UTC.
|
||||||
|
If dt is None, return the current datetime in UTC.
|
||||||
|
"""
|
||||||
|
if dt:
|
||||||
|
return int(dt.timestamp() * 1000)
|
||||||
|
return int(dt_now().timestamp() * 1000)
|
||||||
|
|
||||||
|
|
||||||
|
def dt_floor_day(dt: datetime) -> datetime:
|
||||||
|
"""Return the floor of the day for the given datetime."""
|
||||||
|
return dt.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||||
|
|
||||||
|
|
||||||
|
def dt_from_ts(timestamp: float) -> datetime:
|
||||||
|
"""
|
||||||
|
Return a datetime from a timestamp.
|
||||||
|
:param timestamp: timestamp in seconds or milliseconds
|
||||||
|
"""
|
||||||
|
if timestamp > 1e10:
|
||||||
|
# Timezone in ms - convert to seconds
|
||||||
|
timestamp /= 1000
|
||||||
|
return datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
||||||
|
|
||||||
|
|
||||||
|
def shorten_date(_date: str) -> str:
|
||||||
|
"""
|
||||||
|
Trim the date so it fits on small screens
|
||||||
|
"""
|
||||||
|
new_date = re.sub('seconds?', 'sec', _date)
|
||||||
|
new_date = re.sub('minutes?', 'min', new_date)
|
||||||
|
new_date = re.sub('hours?', 'h', new_date)
|
||||||
|
new_date = re.sub('days?', 'd', new_date)
|
||||||
|
new_date = re.sub('^an?', '1', new_date)
|
||||||
|
return new_date
|
||||||
|
|
||||||
|
|
||||||
|
def dt_humanize(dt: datetime, **kwargs) -> str:
|
||||||
|
"""
|
||||||
|
Return a humanized string for the given datetime.
|
||||||
|
:param dt: datetime to humanize
|
||||||
|
:param kwargs: kwargs to pass to arrow's humanize()
|
||||||
|
"""
|
||||||
|
return arrow.get(dt).humanize(**kwargs)
|
|
@ -3,16 +3,16 @@
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from datetime import datetime, timedelta
|
||||||
from typing import Dict, NamedTuple, Optional
|
from typing import Dict, NamedTuple, Optional
|
||||||
|
|
||||||
import arrow
|
|
||||||
|
|
||||||
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
|
from freqtrade.constants import UNLIMITED_STAKE_AMOUNT, Config
|
||||||
from freqtrade.enums import RunMode, TradingMode
|
from freqtrade.enums import RunMode, TradingMode
|
||||||
from freqtrade.exceptions import DependencyException
|
from freqtrade.exceptions import DependencyException
|
||||||
from freqtrade.exchange import Exchange
|
from freqtrade.exchange import Exchange
|
||||||
from freqtrade.misc import safe_value_fallback
|
from freqtrade.misc import safe_value_fallback
|
||||||
from freqtrade.persistence import LocalTrade, Trade
|
from freqtrade.persistence import LocalTrade, Trade
|
||||||
|
from freqtrade.util.datetime_helpers import dt_now
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -43,7 +43,7 @@ class Wallets:
|
||||||
self._wallets: Dict[str, Wallet] = {}
|
self._wallets: Dict[str, Wallet] = {}
|
||||||
self._positions: Dict[str, PositionWallet] = {}
|
self._positions: Dict[str, PositionWallet] = {}
|
||||||
self.start_cap = config['dry_run_wallet']
|
self.start_cap = config['dry_run_wallet']
|
||||||
self._last_wallet_refresh = 0
|
self._last_wallet_refresh: Optional[datetime] = None
|
||||||
self.update()
|
self.update()
|
||||||
|
|
||||||
def get_free(self, currency: str) -> float:
|
def get_free(self, currency: str) -> float:
|
||||||
|
@ -166,14 +166,19 @@ class Wallets:
|
||||||
for trading operations, the latest balance is needed.
|
for trading operations, the latest balance is needed.
|
||||||
:param require_update: Allow skipping an update if balances were recently refreshed
|
:param require_update: Allow skipping an update if balances were recently refreshed
|
||||||
"""
|
"""
|
||||||
if (require_update or (self._last_wallet_refresh + 3600 < arrow.utcnow().int_timestamp)):
|
now = dt_now()
|
||||||
|
if (
|
||||||
|
require_update
|
||||||
|
or self._last_wallet_refresh is None
|
||||||
|
or (self._last_wallet_refresh + timedelta(seconds=3600) < now)
|
||||||
|
):
|
||||||
if (not self._config['dry_run'] or self._config.get('runmode') == RunMode.LIVE):
|
if (not self._config['dry_run'] or self._config.get('runmode') == RunMode.LIVE):
|
||||||
self._update_live()
|
self._update_live()
|
||||||
else:
|
else:
|
||||||
self._update_dry()
|
self._update_dry()
|
||||||
if self._log:
|
if self._log:
|
||||||
logger.info('Wallets synced.')
|
logger.info('Wallets synced.')
|
||||||
self._last_wallet_refresh = arrow.utcnow().int_timestamp
|
self._last_wallet_refresh = dt_now()
|
||||||
|
|
||||||
def get_all_balances(self) -> Dict[str, Wallet]:
|
def get_all_balances(self) -> Dict[str, Wallet]:
|
||||||
return self._wallets
|
return self._wallets
|
||||||
|
@ -181,6 +186,35 @@ class Wallets:
|
||||||
def get_all_positions(self) -> Dict[str, PositionWallet]:
|
def get_all_positions(self) -> Dict[str, PositionWallet]:
|
||||||
return self._positions
|
return self._positions
|
||||||
|
|
||||||
|
def _check_exit_amount(self, trade: Trade) -> bool:
|
||||||
|
if trade.trading_mode != TradingMode.FUTURES:
|
||||||
|
# Slightly higher offset than in safe_exit_amount.
|
||||||
|
wallet_amount: float = self.get_total(trade.safe_base_currency) * (2 - 0.981)
|
||||||
|
else:
|
||||||
|
# wallet_amount: float = self.wallets.get_free(trade.safe_base_currency)
|
||||||
|
position = self._positions.get(trade.pair)
|
||||||
|
if position is None:
|
||||||
|
# We don't own anything :O
|
||||||
|
return False
|
||||||
|
wallet_amount = position.position
|
||||||
|
|
||||||
|
if wallet_amount >= trade.amount:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def check_exit_amount(self, trade: Trade) -> bool:
|
||||||
|
"""
|
||||||
|
Checks if the exit amount is available in the wallet.
|
||||||
|
:param trade: Trade to check
|
||||||
|
:return: True if the exit amount is available, False otherwise
|
||||||
|
"""
|
||||||
|
if not self._check_exit_amount(trade):
|
||||||
|
# Update wallets just to make sure
|
||||||
|
self.update()
|
||||||
|
return self._check_exit_amount(trade)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def get_starting_balance(self) -> float:
|
def get_starting_balance(self) -> float:
|
||||||
"""
|
"""
|
||||||
Retrieves starting balance - based on either available capital,
|
Retrieves starting balance - based on either available capital,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["setuptools >= 46.4.0", "wheel"]
|
requires = ["setuptools >= 64.0.0", "wheel"]
|
||||||
build-backend = "setuptools.build_meta"
|
build-backend = "setuptools.build_meta"
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
-r docs/requirements-docs.txt
|
-r docs/requirements-docs.txt
|
||||||
|
|
||||||
coveralls==3.3.1
|
coveralls==3.3.1
|
||||||
ruff==0.0.262
|
ruff==0.0.269
|
||||||
mypy==1.2.0
|
mypy==1.3.0
|
||||||
pre-commit==3.2.2
|
pre-commit==3.3.2
|
||||||
pytest==7.3.1
|
pytest==7.3.1
|
||||||
pytest-asyncio==0.21.0
|
pytest-asyncio==0.21.0
|
||||||
pytest-cov==4.0.0
|
pytest-cov==4.0.0
|
||||||
|
@ -18,15 +18,13 @@ pytest-random-order==1.1.0
|
||||||
isort==5.12.0
|
isort==5.12.0
|
||||||
# For datetime mocking
|
# For datetime mocking
|
||||||
time-machine==2.9.0
|
time-machine==2.9.0
|
||||||
# fastapi testing
|
|
||||||
httpx==0.24.0
|
|
||||||
|
|
||||||
# Convert jupyter notebooks to markdown documents
|
# Convert jupyter notebooks to markdown documents
|
||||||
nbconvert==7.3.1
|
nbconvert==7.4.0
|
||||||
|
|
||||||
# mypy types
|
# mypy types
|
||||||
types-cachetools==5.3.0.5
|
types-cachetools==5.3.0.5
|
||||||
types-filelock==3.2.7
|
types-filelock==3.2.7
|
||||||
types-requests==2.28.11.17
|
types-requests==2.30.0.0
|
||||||
types-tabulate==0.9.0.2
|
types-tabulate==0.9.0.2
|
||||||
types-python-dateutil==2.8.19.12
|
types-python-dateutil==2.8.19.13
|
||||||
|
|
|
@ -2,11 +2,10 @@
|
||||||
-r requirements-freqai.txt
|
-r requirements-freqai.txt
|
||||||
|
|
||||||
# Required for freqai-rl
|
# Required for freqai-rl
|
||||||
torch==1.13.1; python_version < '3.11'
|
torch==2.0.1
|
||||||
stable-baselines3==1.7.0; python_version < '3.11'
|
#until these branches will be released we can use this
|
||||||
sb3-contrib==1.7.0; python_version < '3.11'
|
gymnasium==0.28.1
|
||||||
# Gym is forced to this version by stable-baselines3.
|
stable_baselines3==2.0.0a10
|
||||||
setuptools==65.5.1 # Should be removed when gym is fixed.
|
sb3_contrib>=2.0.0a9
|
||||||
gym==0.21; python_version < '3.11'
|
|
||||||
# Progress bar for stable-baselines3 and sb3-contrib
|
# Progress bar for stable-baselines3 and sb3-contrib
|
||||||
tqdm==4.65.0; python_version < '3.11'
|
tqdm==4.65.0
|
||||||
|
|
|
@ -5,7 +5,8 @@
|
||||||
# Required for freqai
|
# Required for freqai
|
||||||
scikit-learn==1.1.3
|
scikit-learn==1.1.3
|
||||||
joblib==1.2.0
|
joblib==1.2.0
|
||||||
catboost==1.1.1; platform_machine != 'aarch64' and 'arm' not in platform_machine and python_version < '3.11'
|
catboost==1.1.1; sys_platform == 'darwin' and python_version < '3.9'
|
||||||
|
catboost==1.2; 'arm' not in platform_machine and (sys_platform != 'darwin' or python_version >= '3.9')
|
||||||
lightgbm==3.3.5
|
lightgbm==3.3.5
|
||||||
xgboost==1.7.5
|
xgboost==1.7.5
|
||||||
tensorboard==2.12.2
|
tensorboard==2.13.0
|
||||||
|
|
|
@ -1,16 +1,19 @@
|
||||||
numpy==1.24.3
|
numpy==1.24.3
|
||||||
pandas==1.5.3
|
pandas==2.0.1
|
||||||
pandas-ta==0.3.14b
|
pandas-ta==0.3.14b
|
||||||
|
|
||||||
ccxt==3.0.75
|
ccxt==3.1.5
|
||||||
cryptography==40.0.2
|
cryptography==40.0.2; platform_machine != 'armv7l'
|
||||||
|
cryptography==40.0.1; platform_machine == 'armv7l'
|
||||||
aiohttp==3.8.4
|
aiohttp==3.8.4
|
||||||
SQLAlchemy==2.0.10
|
SQLAlchemy==2.0.15
|
||||||
python-telegram-bot==13.15
|
python-telegram-bot==20.3
|
||||||
|
# can't be hard-pinned due to telegram-bot pinning httpx with ~
|
||||||
|
httpx>=0.23.3
|
||||||
arrow==1.2.3
|
arrow==1.2.3
|
||||||
cachetools==4.2.2
|
cachetools==5.3.0
|
||||||
requests==2.28.2
|
requests==2.31.0
|
||||||
urllib3==1.26.15
|
urllib3==2.0.2
|
||||||
jsonschema==4.17.3
|
jsonschema==4.17.3
|
||||||
TA-Lib==0.4.26
|
TA-Lib==0.4.26
|
||||||
technical==1.4.0
|
technical==1.4.0
|
||||||
|
@ -20,8 +23,8 @@ jinja2==3.1.2
|
||||||
tables==3.8.0
|
tables==3.8.0
|
||||||
blosc==1.11.1
|
blosc==1.11.1
|
||||||
joblib==1.2.0
|
joblib==1.2.0
|
||||||
rich==13.3.4
|
rich==13.3.5
|
||||||
pyarrow==11.0.0; platform_machine != 'armv7l'
|
pyarrow==12.0.0; platform_machine != 'armv7l'
|
||||||
|
|
||||||
# find first, C search in arrays
|
# find first, C search in arrays
|
||||||
py_find_1st==1.1.5
|
py_find_1st==1.1.5
|
||||||
|
@ -29,16 +32,16 @@ py_find_1st==1.1.5
|
||||||
# Load ticker files 30% faster
|
# Load ticker files 30% faster
|
||||||
python-rapidjson==1.10
|
python-rapidjson==1.10
|
||||||
# Properly format api responses
|
# Properly format api responses
|
||||||
orjson==3.8.10
|
orjson==3.8.12
|
||||||
|
|
||||||
# Notify systemd
|
# Notify systemd
|
||||||
sdnotify==0.3.2
|
sdnotify==0.3.2
|
||||||
|
|
||||||
# API Server
|
# API Server
|
||||||
fastapi==0.95.1
|
fastapi==0.95.2
|
||||||
pydantic==1.10.7
|
pydantic==1.10.7
|
||||||
uvicorn==0.21.1
|
uvicorn==0.22.0
|
||||||
pyjwt==2.6.0
|
pyjwt==2.7.0
|
||||||
aiofiles==23.1.0
|
aiofiles==23.1.0
|
||||||
psutil==5.9.5
|
psutil==5.9.5
|
||||||
|
|
||||||
|
@ -54,7 +57,8 @@ python-dateutil==2.8.2
|
||||||
schedule==1.2.0
|
schedule==1.2.0
|
||||||
|
|
||||||
#WS Messages
|
#WS Messages
|
||||||
websockets==11.0.2
|
websockets==11.0.3
|
||||||
janus==1.0.0
|
janus==1.0.0
|
||||||
|
|
||||||
ast-comments==1.0.1
|
ast-comments==1.0.1
|
||||||
|
packaging==23.1
|
||||||
|
|
|
@ -279,8 +279,9 @@ class FtRestClient():
|
||||||
"""
|
"""
|
||||||
data = {"pair": pair,
|
data = {"pair": pair,
|
||||||
"side": side,
|
"side": side,
|
||||||
"price": price,
|
|
||||||
}
|
}
|
||||||
|
if price:
|
||||||
|
data['price'] = price
|
||||||
return self._post("forceenter", data=data)
|
return self._post("forceenter", data=data)
|
||||||
|
|
||||||
def forceexit(self, tradeid, ordertype=None, amount=None):
|
def forceexit(self, tradeid, ordertype=None, amount=None):
|
||||||
|
@ -348,12 +349,13 @@ class FtRestClient():
|
||||||
params['limit'] = limit
|
params['limit'] = limit
|
||||||
return self._get("pair_candles", params=params)
|
return self._get("pair_candles", params=params)
|
||||||
|
|
||||||
def pair_history(self, pair, timeframe, strategy, timerange=None):
|
def pair_history(self, pair, timeframe, strategy, timerange=None, freqaimodel=None):
|
||||||
"""Return historic, analyzed dataframe
|
"""Return historic, analyzed dataframe
|
||||||
|
|
||||||
:param pair: Pair to get data for
|
:param pair: Pair to get data for
|
||||||
:param timeframe: Only pairs with this timeframe available.
|
:param timeframe: Only pairs with this timeframe available.
|
||||||
:param strategy: Strategy to analyze and get values for
|
:param strategy: Strategy to analyze and get values for
|
||||||
|
:param freqaimodel: FreqAI model to use for analysis
|
||||||
:param timerange: Timerange to get data for (same format than --timerange endpoints)
|
:param timerange: Timerange to get data for (same format than --timerange endpoints)
|
||||||
:return: json object
|
:return: json object
|
||||||
"""
|
"""
|
||||||
|
@ -361,6 +363,7 @@ class FtRestClient():
|
||||||
"pair": pair,
|
"pair": pair,
|
||||||
"timeframe": timeframe,
|
"timeframe": timeframe,
|
||||||
"strategy": strategy,
|
"strategy": strategy,
|
||||||
|
"freqaimodel": freqaimodel,
|
||||||
"timerange": timerange if timerange else '',
|
"timerange": timerange if timerange else '',
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user