Merge branch 'develop' into pr/richardjozsa/8336

This commit is contained in:
Matthias 2023-04-19 19:37:51 +02:00
commit f30fc29da0
16 changed files with 82 additions and 43 deletions

View File

@ -85,6 +85,7 @@ Mandatory parameters are marked as **Required** and have to be set in one of the
| `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each. | `net_arch` | Network architecture which is well described in [`stable_baselines3` doc](https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html#examples). In summary: `[<shared layers>, dict(vf=[<non-shared value network layers>], pi=[<non-shared policy network layers>])]`. By default this is set to `[128, 128]`, which defines 2 shared hidden layers with 128 units each.
| `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`. | `randomize_starting_position` | Randomize the starting point of each episode to avoid overfitting. <br> **Datatype:** bool. <br> Default: `False`.
| `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False` | `drop_ohlc_from_features` | Do not include the normalized ohlc data in the feature set passed to the agent during training (ohlc will still be used for driving the environment in all cases) <br> **Datatype:** Boolean. <br> **Default:** `False`
| `progress_bar` | Display a progress bar with the current progress, elapsed time and estimated remaining time. <br> **Datatype:** Boolean. <br> Default: `False`.
### PyTorch parameters ### PyTorch parameters

View File

@ -600,6 +600,7 @@ CONF_SCHEMA = {
"policy_type": {"type": "string", "default": "MlpPolicy"}, "policy_type": {"type": "string", "default": "MlpPolicy"},
"net_arch": {"type": "array", "default": [128, 128]}, "net_arch": {"type": "array", "default": [128, 128]},
"randomize_starting_position": {"type": "boolean", "default": False}, "randomize_starting_position": {"type": "boolean", "default": False},
"progress_bar": {"type": "boolean", "default": True},
"model_reward_parameters": { "model_reward_parameters": {
"type": "object", "type": "object",
"properties": { "properties": {

View File

@ -6,6 +6,7 @@ from freqtrade.exchange.exchange import Exchange
from freqtrade.exchange.binance import Binance from freqtrade.exchange.binance import Binance
from freqtrade.exchange.bitpanda import Bitpanda from freqtrade.exchange.bitpanda import Bitpanda
from freqtrade.exchange.bittrex import Bittrex from freqtrade.exchange.bittrex import Bittrex
from freqtrade.exchange.bitvavo import Bitvavo
from freqtrade.exchange.bybit import Bybit from freqtrade.exchange.bybit import Bybit
from freqtrade.exchange.coinbasepro import Coinbasepro from freqtrade.exchange.coinbasepro import Coinbasepro
from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision, from freqtrade.exchange.exchange_utils import (ROUND_DOWN, ROUND_UP, amount_to_contract_precision,

View File

@ -0,0 +1,23 @@
"""Kucoin exchange subclass."""
import logging
from typing import Dict
from freqtrade.exchange import Exchange
logger = logging.getLogger(__name__)
class Bitvavo(Exchange):
"""Bitvavo exchange class.
Contains adjustments needed for Freqtrade to work with this exchange.
Please note that this exchange is not included in the list of exchanges
officially supported by the Freqtrade development team. So some features
may still not work as expected.
"""
_ft_has: Dict = {
"ohlcv_candle_limit": 1440,
}

View File

@ -2371,12 +2371,12 @@ class Exchange:
# Must fetch the leverage tiers for each market separately # Must fetch the leverage tiers for each market separately
# * This is slow(~45s) on Okx, makes ~90 api calls to load all linear swap markets # * This is slow(~45s) on Okx, makes ~90 api calls to load all linear swap markets
markets = self.markets markets = self.markets
symbols = []
for symbol, market in markets.items(): symbols = [
symbol for symbol, market in markets.items()
if (self.market_is_future(market) if (self.market_is_future(market)
and market['quote'] == self._config['stake_currency']): and market['quote'] == self._config['stake_currency'])
symbols.append(symbol) ]
tiers: Dict[str, List[Dict]] = {} tiers: Dict[str, List[Dict]] = {}
@ -2396,25 +2396,26 @@ class Exchange:
else: else:
logger.info("Using cached leverage_tiers.") logger.info("Using cached leverage_tiers.")
async def gather_results(): async def gather_results(input_coro):
return await asyncio.gather(*input_coro, return_exceptions=True) return await asyncio.gather(*input_coro, return_exceptions=True)
for input_coro in chunks(coros, 100): for input_coro in chunks(coros, 100):
with self._loop_lock: with self._loop_lock:
results = self.loop.run_until_complete(gather_results()) results = self.loop.run_until_complete(gather_results(input_coro))
for symbol, res in results: for res in results:
tiers[symbol] = res if isinstance(res, Exception):
logger.warning(f"Leverage tier exception: {repr(res)}")
continue
symbol, tier = res
tiers[symbol] = tier
if len(coros) > 0: if len(coros) > 0:
self.cache_leverage_tiers(tiers, self._config['stake_currency']) self.cache_leverage_tiers(tiers, self._config['stake_currency'])
logger.info(f"Done initializing {len(symbols)} markets.") logger.info(f"Done initializing {len(symbols)} markets.")
return tiers return tiers
else: return {}
return {}
else:
return {}
def cache_leverage_tiers(self, tiers: Dict[str, List[Dict]], stake_currency: str) -> None: def cache_leverage_tiers(self, tiers: Dict[str, List[Dict]], stake_currency: str) -> None:
@ -2430,14 +2431,17 @@ class Exchange:
def load_cached_leverage_tiers(self, stake_currency: str) -> Optional[Dict[str, List[Dict]]]: def load_cached_leverage_tiers(self, stake_currency: str) -> Optional[Dict[str, List[Dict]]]:
filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json" filename = self._config['datadir'] / "futures" / f"leverage_tiers_{stake_currency}.json"
if filename.is_file(): if filename.is_file():
tiers = file_load_json(filename) try:
updated = tiers.get('updated') tiers = file_load_json(filename)
if updated: updated = tiers.get('updated')
updated_dt = parser.parse(updated) if updated:
if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4): updated_dt = parser.parse(updated)
logger.info("Cached leverage tiers are outdated. Will update.") if updated_dt < datetime.now(timezone.utc) - timedelta(weeks=4):
return None logger.info("Cached leverage tiers are outdated. Will update.")
return tiers['data'] return None
return tiers['data']
except Exception:
logger.exception("Error loading cached leverage tiers. Refreshing.")
return None return None
def fill_leverage_tiers(self) -> None: def fill_leverage_tiers(self) -> None:

View File

@ -71,7 +71,8 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
model.learn( model.learn(
total_timesteps=int(total_timesteps), total_timesteps=int(total_timesteps),
callback=[self.eval_callback, self.tensorboard_callback] callback=[self.eval_callback, self.tensorboard_callback],
progress_bar=self.rl_config.get('progress_bar', False)
) )
if Path(dk.data_path / "best_model.zip").is_file(): if Path(dk.data_path / "best_model.zip").is_file():

View File

@ -273,7 +273,8 @@ def _get_resample_from_period(period: str) -> str:
if period == 'day': if period == 'day':
return '1d' return '1d'
if period == 'week': if period == 'week':
return '1w' # Weekly defaulting to Monday.
return '1W-MON'
if period == 'month': if period == 'month':
return '1M' return '1M'
raise ValueError(f"Period {period} is not supported.") raise ValueError(f"Period {period} is not supported.")

View File

@ -303,11 +303,11 @@ def get_strategy(strategy: str, config=Depends(get_config)):
@router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai']) @router.get('/freqaimodels', response_model=FreqAIModelListResponse, tags=['freqai'])
def list_freqaimodels(config=Depends(get_config)): def list_freqaimodels(config=Depends(get_config)):
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
strategies = FreqaiModelResolver.search_all_objects( models = FreqaiModelResolver.search_all_objects(
config, False) config, False)
strategies = sorted(strategies, key=lambda x: x['name']) models = sorted(models, key=lambda x: x['name'])
return {'freqaimodels': [x['name'] for x in strategies]} return {'freqaimodels': [x['name'] for x in models]}
@router.get('/available_pairs', response_model=AvailablePairs, tags=['candle data']) @router.get('/available_pairs', response_model=AvailablePairs, tags=['candle data'])

View File

@ -68,6 +68,9 @@ target-version = "py38"
extend-select = [ extend-select = [
"C90", # mccabe "C90", # mccabe
# "N", # pep8-naming # "N", # pep8-naming
"F", # pyflakes
"E", # pycodestyle
"W", # pycodestyle
"UP", # pyupgrade "UP", # pyupgrade
"TID", # flake8-tidy-imports "TID", # flake8-tidy-imports
# "EXE", # flake8-executable # "EXE", # flake8-executable

View File

@ -10,7 +10,7 @@ coveralls==3.3.1
ruff==0.0.261 ruff==0.0.261
mypy==1.2.0 mypy==1.2.0
pre-commit==3.2.2 pre-commit==3.2.2
pytest==7.3.0 pytest==7.3.1
pytest-asyncio==0.21.0 pytest-asyncio==0.21.0
pytest-cov==4.0.0 pytest-cov==4.0.0
pytest-mock==3.10.0 pytest-mock==3.10.0
@ -19,7 +19,7 @@ isort==5.12.0
# For datetime mocking # For datetime mocking
time-machine==2.9.0 time-machine==2.9.0
# fastapi testing # fastapi testing
httpx==0.23.3 httpx==0.24.0
# Convert jupyter notebooks to markdown documents # Convert jupyter notebooks to markdown documents
nbconvert==7.3.1 nbconvert==7.3.1

View File

@ -5,8 +5,9 @@
torch==1.13.1; python_version < '3.11' torch==1.13.1; python_version < '3.11'
#until these branches will be released we can use this #until these branches will be released we can use this
gymnasium==0.28.1 gymnasium==0.28.1
stable_baselines3>=2.0.0a1 stable_baselines3==2.0.0a5
sb3_contrib>=2.0.0a1 sb3_contrib>=2.0.0a4
# Gym is forced to this version by stable-baselines3. # Gym is forced to this version by stable-baselines3.
setuptools==65.5.1 # Should be removed when gym is fixed. setuptools==65.5.1 # Should be removed when gym is fixed.
# Progress bar for stable-baselines3 and sb3-contrib
tqdm==4.65.0; python_version < '3.11'

View File

@ -8,4 +8,4 @@ joblib==1.2.0
catboost==1.1.1; platform_machine != 'aarch64' and 'arm' not in platform_machine and python_version < '3.11' catboost==1.1.1; platform_machine != 'aarch64' and 'arm' not in platform_machine and python_version < '3.11'
lightgbm==3.3.5 lightgbm==3.3.5
xgboost==1.7.5 xgboost==1.7.5
tensorboard==2.12.1 tensorboard==2.12.2

View File

@ -2,8 +2,8 @@ numpy==1.24.2
pandas==1.5.3 pandas==1.5.3
pandas-ta==0.3.14b pandas-ta==0.3.14b
ccxt==3.0.59 ccxt==3.0.69
cryptography==40.0.1 cryptography==40.0.2
aiohttp==3.8.4 aiohttp==3.8.4
SQLAlchemy==2.0.9 SQLAlchemy==2.0.9
python-telegram-bot==13.15 python-telegram-bot==13.15
@ -20,7 +20,7 @@ jinja2==3.1.2
tables==3.8.0 tables==3.8.0
blosc==1.11.1 blosc==1.11.1
joblib==1.2.0 joblib==1.2.0
rich==13.3.3 rich==13.3.4
pyarrow==11.0.0; platform_machine != 'armv7l' pyarrow==11.0.0; platform_machine != 'armv7l'
# find first, C search in arrays # find first, C search in arrays
@ -35,7 +35,7 @@ orjson==3.8.10
sdnotify==0.3.2 sdnotify==0.3.2
# API Server # API Server
fastapi==0.95.0 fastapi==0.95.1
pydantic==1.10.7 pydantic==1.10.7
uvicorn==0.21.1 uvicorn==0.21.1
pyjwt==2.6.0 pyjwt==2.6.0

View File

@ -528,9 +528,11 @@ class TestCCXTExchange():
assert res[1] == timeframe assert res[1] == timeframe
assert res[2] == candle_type assert res[2] == candle_type
candles = res[3] candles = res[3]
candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * 0.9 factor = 0.9
candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms candle_count = exchange.ohlcv_candle_limit(timeframe, candle_type, since_ms) * factor
assert len(candles) >= min(candle_count, candle_count1) candle_count1 = (now.timestamp() * 1000 - since_ms) // timeframe_ms * factor
assert len(candles) >= min(candle_count, candle_count1), \
f"{len(candles)} < {candle_count} in {timeframe}, Offset: {offset} {factor}"
assert candles[0][0] == since_ms or (since_ms + timeframe_ms) assert candles[0][0] == since_ms or (since_ms + timeframe_ms)
def test_ccxt__async_get_candle_history(self, exchange: EXCHANGE_FIXTURE_TYPE): def test_ccxt__async_get_candle_history(self, exchange: EXCHANGE_FIXTURE_TYPE):

View File

@ -1,14 +1,14 @@
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from pathlib import Path from pathlib import Path
from unittest.mock import MagicMock, PropertyMock from unittest.mock import AsyncMock, MagicMock, PropertyMock
import ccxt import ccxt
import pytest import pytest
from freqtrade.enums import CandleType, MarginMode, TradingMode from freqtrade.enums import CandleType, MarginMode, TradingMode
from freqtrade.exceptions import RetryableOrderError from freqtrade.exceptions import RetryableOrderError, TemporaryError
from freqtrade.exchange.exchange import timeframe_to_minutes from freqtrade.exchange.exchange import timeframe_to_minutes
from tests.conftest import EXMS, get_mock_coro, get_patched_exchange, log_has from tests.conftest import EXMS, get_patched_exchange, log_has
from tests.exchange.test_exchange import ccxt_exceptionhandlers from tests.exchange.test_exchange import ccxt_exceptionhandlers
@ -278,7 +278,7 @@ def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmpdir, caplog,
'fetchLeverageTiers': False, 'fetchLeverageTiers': False,
'fetchMarketLeverageTiers': True, 'fetchMarketLeverageTiers': True,
}) })
api_mock.fetch_market_leverage_tiers = get_mock_coro(side_effect=[ api_mock.fetch_market_leverage_tiers = AsyncMock(side_effect=[
[ [
{ {
'tier': 1, 'tier': 1,
@ -341,6 +341,7 @@ def test_load_leverage_tiers_okx(default_conf, mocker, markets, tmpdir, caplog,
} }
}, },
], ],
TemporaryError("this Failed"),
[ [
{ {
'tier': 1, 'tier': 1,

View File

@ -465,7 +465,7 @@ def test_generate_periodic_breakdown_stats(testdatadir):
def test__get_resample_from_period(): def test__get_resample_from_period():
assert _get_resample_from_period('day') == '1d' assert _get_resample_from_period('day') == '1d'
assert _get_resample_from_period('week') == '1w' assert _get_resample_from_period('week') == '1W-MON'
assert _get_resample_from_period('month') == '1M' assert _get_resample_from_period('month') == '1M'
with pytest.raises(ValueError, match=r"Period noooo is not supported."): with pytest.raises(ValueError, match=r"Period noooo is not supported."):
_get_resample_from_period('noooo') _get_resample_from_period('noooo')