mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-10 02:12:01 +00:00
Fix odd formatting by ruff format
This commit is contained in:
parent
876a8f9e3e
commit
a9732c6195
|
@ -1,11 +1,12 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
from freqtrade import __version__ as ft_version
|
|
||||||
from freqtrade_client import __version__ as client_version
|
from freqtrade_client import __version__ as client_version
|
||||||
|
|
||||||
|
from freqtrade import __version__ as ft_version
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
if ft_version != client_version:
|
if ft_version != client_version:
|
||||||
print(f"Versions do not match: \n" f"ft: {ft_version} \n" f"client: {client_version}")
|
print(f"Versions do not match: \nft: {ft_version} \nclient: {client_version}")
|
||||||
exit(1)
|
exit(1)
|
||||||
print(f"Versions match: ft: {ft_version}, client: {client_version}")
|
print(f"Versions match: ft: {ft_version}, client: {client_version}")
|
||||||
exit(0)
|
exit(0)
|
||||||
|
|
|
@ -369,7 +369,7 @@ AVAILABLE_CLI_OPTIONS = {
|
||||||
"list_pairs_all": Arg(
|
"list_pairs_all": Arg(
|
||||||
"-a",
|
"-a",
|
||||||
"--all",
|
"--all",
|
||||||
help="Print all pairs or market symbols. By default only active " "ones are shown.",
|
help="Print all pairs or market symbols. By default only active ones are shown.",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
),
|
),
|
||||||
"print_list": Arg(
|
"print_list": Arg(
|
||||||
|
@ -490,7 +490,7 @@ AVAILABLE_CLI_OPTIONS = {
|
||||||
"timeframes": Arg(
|
"timeframes": Arg(
|
||||||
"-t",
|
"-t",
|
||||||
"--timeframes",
|
"--timeframes",
|
||||||
help="Specify which tickers to download. Space-separated list. " "Default: `1m 5m`.",
|
help="Specify which tickers to download. Space-separated list. Default: `1m 5m`.",
|
||||||
nargs="+",
|
nargs="+",
|
||||||
),
|
),
|
||||||
"prepend_data": Arg(
|
"prepend_data": Arg(
|
||||||
|
|
|
@ -89,7 +89,7 @@ def start_new_strategy(args: Dict[str, Any]) -> None:
|
||||||
|
|
||||||
if new_path.exists():
|
if new_path.exists():
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"`{new_path}` already exists. " "Please choose another Strategy Name."
|
f"`{new_path}` already exists. Please choose another Strategy Name."
|
||||||
)
|
)
|
||||||
|
|
||||||
deploy_new_strategy(args["strategy"], new_path, args["template"])
|
deploy_new_strategy(args["strategy"], new_path, args["template"])
|
||||||
|
|
|
@ -218,7 +218,7 @@ class Configuration:
|
||||||
self._args_to_config(
|
self._args_to_config(
|
||||||
config,
|
config,
|
||||||
argname="timeframe",
|
argname="timeframe",
|
||||||
logstring="Parameter -i/--timeframe detected ... " "Using timeframe: {} ...",
|
logstring="Parameter -i/--timeframe detected ... Using timeframe: {} ...",
|
||||||
)
|
)
|
||||||
|
|
||||||
self._args_to_config(
|
self._args_to_config(
|
||||||
|
@ -240,7 +240,7 @@ class Configuration:
|
||||||
elif "max_open_trades" in self.args and self.args["max_open_trades"]:
|
elif "max_open_trades" in self.args and self.args["max_open_trades"]:
|
||||||
config.update({"max_open_trades": self.args["max_open_trades"]})
|
config.update({"max_open_trades": self.args["max_open_trades"]})
|
||||||
logger.info(
|
logger.info(
|
||||||
"Parameter --max-open-trades detected, " "overriding max_open_trades to: %s ...",
|
"Parameter --max-open-trades detected, overriding max_open_trades to: %s ...",
|
||||||
config.get("max_open_trades"),
|
config.get("max_open_trades"),
|
||||||
)
|
)
|
||||||
elif config["runmode"] in NON_UTIL_MODES:
|
elif config["runmode"] in NON_UTIL_MODES:
|
||||||
|
@ -417,7 +417,7 @@ class Configuration:
|
||||||
self._args_to_config(
|
self._args_to_config(
|
||||||
config,
|
config,
|
||||||
argname="dry_run",
|
argname="dry_run",
|
||||||
logstring="Parameter --dry-run detected, " "overriding dry_run to: {} ...",
|
logstring="Parameter --dry-run detected, overriding dry_run to: {} ...",
|
||||||
)
|
)
|
||||||
|
|
||||||
if not self.runmode:
|
if not self.runmode:
|
||||||
|
|
|
@ -69,7 +69,7 @@ def load_config_file(path: str) -> Dict[str, Any]:
|
||||||
except rapidjson.JSONDecodeError as e:
|
except rapidjson.JSONDecodeError as e:
|
||||||
err_range = log_config_error_range(path, str(e))
|
err_range = log_config_error_range(path, str(e))
|
||||||
raise ConfigurationError(
|
raise ConfigurationError(
|
||||||
f"{e}\n" f"Please verify the following segment of your configuration:\n{err_range}"
|
f"{e}\nPlease verify the following segment of your configuration:\n{err_range}"
|
||||||
if err_range
|
if err_range
|
||||||
else "Please verify your configuration file for syntax errors."
|
else "Please verify your configuration file for syntax errors."
|
||||||
)
|
)
|
||||||
|
|
|
@ -98,7 +98,7 @@ def clean_ohlcv_dataframe(
|
||||||
def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) -> DataFrame:
|
def ohlcv_fill_up_missing_data(dataframe: DataFrame, timeframe: str, pair: str) -> DataFrame:
|
||||||
"""
|
"""
|
||||||
Fills up missing data with 0 volume rows,
|
Fills up missing data with 0 volume rows,
|
||||||
using the previous close as price for "open", "high" "low" and "close", volume is set to 0
|
using the previous close as price for "open", "high", "low" and "close", volume is set to 0
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from freqtrade.exchange import timeframe_to_resample_freq
|
from freqtrade.exchange import timeframe_to_resample_freq
|
||||||
|
@ -175,7 +175,7 @@ def trim_dataframes(
|
||||||
processed[pair] = trimed_df
|
processed[pair] = trimed_df
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"{pair} has no data left after adjusting for startup candles, " f"skipping."
|
f"{pair} has no data left after adjusting for startup candles, skipping."
|
||||||
)
|
)
|
||||||
return processed
|
return processed
|
||||||
|
|
||||||
|
@ -285,7 +285,7 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:
|
||||||
:return: Dataframe converted to float/int 32s
|
:return: Dataframe converted to float/int 32s
|
||||||
"""
|
"""
|
||||||
|
|
||||||
logger.debug(f"Memory usage of dataframe is " f"{df.memory_usage().sum() / 1024**2:.2f} MB")
|
logger.debug(f"Memory usage of dataframe is {df.memory_usage().sum() / 1024**2:.2f} MB")
|
||||||
|
|
||||||
df_dtypes = df.dtypes
|
df_dtypes = df.dtypes
|
||||||
for column, dtype in df_dtypes.items():
|
for column, dtype in df_dtypes.items():
|
||||||
|
@ -297,8 +297,6 @@ def reduce_dataframe_footprint(df: DataFrame) -> DataFrame:
|
||||||
df_dtypes[column] = np.int32
|
df_dtypes[column] = np.int32
|
||||||
df = df.astype(df_dtypes)
|
df = df.astype(df_dtypes)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(f"Memory usage after optimization is: {df.memory_usage().sum() / 1024**2:.2f} MB")
|
||||||
f"Memory usage after optimization is: " f"{df.memory_usage().sum() / 1024**2:.2f} MB"
|
|
||||||
)
|
|
||||||
|
|
||||||
return df
|
return df
|
||||||
|
|
|
@ -849,7 +849,7 @@ class Exchange:
|
||||||
if max_stake_amount is None:
|
if max_stake_amount is None:
|
||||||
# * Should never be executed
|
# * Should never be executed
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"{self.name}.get_max_pair_stake_amount should" "never set max_stake_amount to None"
|
f"{self.name}.get_max_pair_stake_amount should never set max_stake_amount to None"
|
||||||
)
|
)
|
||||||
return max_stake_amount
|
return max_stake_amount
|
||||||
|
|
||||||
|
@ -1375,7 +1375,7 @@ class Exchange:
|
||||||
raise DDosProtection(e) from e
|
raise DDosProtection(e) from e
|
||||||
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
|
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
|
||||||
raise TemporaryError(
|
raise TemporaryError(
|
||||||
f"Could not place stoploss order due to {e.__class__.__name__}. " f"Message: {e}"
|
f"Could not place stoploss order due to {e.__class__.__name__}. Message: {e}"
|
||||||
) from e
|
) from e
|
||||||
except ccxt.BaseError as e:
|
except ccxt.BaseError as e:
|
||||||
raise OperationalException(e) from e
|
raise OperationalException(e) from e
|
||||||
|
@ -1800,7 +1800,7 @@ class Exchange:
|
||||||
return self._api.fetch_l2_order_book(pair, limit1)
|
return self._api.fetch_l2_order_book(pair, limit1)
|
||||||
except ccxt.NotSupported as e:
|
except ccxt.NotSupported as e:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Exchange {self._api.name} does not support fetching order book." f"Message: {e}"
|
f"Exchange {self._api.name} does not support fetching order book. Message: {e}"
|
||||||
) from e
|
) from e
|
||||||
except ccxt.DDoSProtection as e:
|
except ccxt.DDoSProtection as e:
|
||||||
raise DDosProtection(e) from e
|
raise DDosProtection(e) from e
|
||||||
|
@ -2498,7 +2498,7 @@ class Exchange:
|
||||||
) from e
|
) from e
|
||||||
except ccxt.BaseError as e:
|
except ccxt.BaseError as e:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Could not fetch historical candle (OHLCV) data " f"for pair {pair}. Message: {e}"
|
f"Could not fetch historical candle (OHLCV) data for pair {pair}. Message: {e}"
|
||||||
) from e
|
) from e
|
||||||
|
|
||||||
async def _fetch_funding_rate_history(
|
async def _fetch_funding_rate_history(
|
||||||
|
@ -2555,7 +2555,7 @@ class Exchange:
|
||||||
raise DDosProtection(e) from e
|
raise DDosProtection(e) from e
|
||||||
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
|
except (ccxt.OperationFailed, ccxt.ExchangeError) as e:
|
||||||
raise TemporaryError(
|
raise TemporaryError(
|
||||||
f"Could not load trade history due to {e.__class__.__name__}. " f"Message: {e}"
|
f"Could not load trade history due to {e.__class__.__name__}. Message: {e}"
|
||||||
) from e
|
) from e
|
||||||
except ccxt.BaseError as e:
|
except ccxt.BaseError as e:
|
||||||
raise OperationalException(f"Could not fetch trade data. Msg: {e}") from e
|
raise OperationalException(f"Could not fetch trade data. Msg: {e}") from e
|
||||||
|
@ -2701,7 +2701,7 @@ class Exchange:
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Exchange {self.name} does use neither time, " f"nor id based pagination"
|
f"Exchange {self.name} does use neither time, nor id based pagination"
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_historic_trades(
|
def get_historic_trades(
|
||||||
|
|
|
@ -105,7 +105,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
:model: Trained model which can be used to inference (self.predict)
|
:model: Trained model which can be used to inference (self.predict)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
logger.info("--------------------Starting training " f"{pair} --------------------")
|
logger.info(f"--------------------Starting training {pair} --------------------")
|
||||||
|
|
||||||
features_filtered, labels_filtered = dk.filter_features(
|
features_filtered, labels_filtered = dk.filter_features(
|
||||||
unfiltered_df,
|
unfiltered_df,
|
||||||
|
@ -430,7 +430,7 @@ class BaseReinforcementLearningModel(IFreqaiModel):
|
||||||
|
|
||||||
# you can use feature values from dataframe
|
# you can use feature values from dataframe
|
||||||
rsi_now = self.raw_features[
|
rsi_now = self.raw_features[
|
||||||
f"%-rsi-period-10_shift-1_{self.pair}_" f"{self.config['timeframe']}"
|
f"%-rsi-period-10_shift-1_{self.pair}_{self.config['timeframe']}"
|
||||||
].iloc[self._current_tick]
|
].iloc[self._current_tick]
|
||||||
|
|
||||||
# reward agent for entering trades
|
# reward agent for entering trades
|
||||||
|
|
|
@ -59,7 +59,7 @@ class BasePyTorchClassifier(BasePyTorchModel):
|
||||||
class_names = self.model.model_meta_data.get("class_names", None)
|
class_names = self.model.model_meta_data.get("class_names", None)
|
||||||
if not class_names:
|
if not class_names:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Missing class names. " "self.model.model_meta_data['class_names'] is None."
|
"Missing class names. self.model.model_meta_data['class_names'] is None."
|
||||||
)
|
)
|
||||||
|
|
||||||
if not self.class_name_to_index:
|
if not self.class_name_to_index:
|
||||||
|
|
|
@ -63,7 +63,7 @@ class FreqaiMultiOutputClassifier(MultiOutputClassifier):
|
||||||
self.classes_.extend(estimator.classes_)
|
self.classes_.extend(estimator.classes_)
|
||||||
if len(set(self.classes_)) != len(self.classes_):
|
if len(set(self.classes_)) != len(self.classes_):
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Class labels must be unique across targets: " f"{self.classes_}"
|
f"Class labels must be unique across targets: {self.classes_}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if hasattr(self.estimators_[0], "n_features_in_"):
|
if hasattr(self.estimators_[0], "n_features_in_"):
|
||||||
|
|
|
@ -618,7 +618,7 @@ class FreqaiDataDrawer:
|
||||||
|
|
||||||
if not model:
|
if not model:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
f"Unable to load model, ensure model exists at {dk.data_path} "
|
||||||
)
|
)
|
||||||
|
|
||||||
# load it into ram if it was loaded from disk
|
# load it into ram if it was loaded from disk
|
||||||
|
|
|
@ -763,7 +763,7 @@ class IFreqaiModel(ABC):
|
||||||
"""
|
"""
|
||||||
current_pairlist = self.config.get("exchange", {}).get("pair_whitelist")
|
current_pairlist = self.config.get("exchange", {}).get("pair_whitelist")
|
||||||
if not self.dd.pair_dict:
|
if not self.dd.pair_dict:
|
||||||
logger.info("Set fresh train queue from whitelist. " f"Queue: {current_pairlist}")
|
logger.info("Set fresh train queue from whitelist. Queue: {current_pairlist}")
|
||||||
return deque(current_pairlist)
|
return deque(current_pairlist)
|
||||||
|
|
||||||
best_queue = deque()
|
best_queue = deque()
|
||||||
|
@ -779,7 +779,7 @@ class IFreqaiModel(ABC):
|
||||||
best_queue.appendleft(pair)
|
best_queue.appendleft(pair)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Set existing queue from trained timestamps. " f"Best approximation queue: {best_queue}"
|
"Set existing queue from trained timestamps. Best approximation queue: {best_queue}"
|
||||||
)
|
)
|
||||||
return best_queue
|
return best_queue
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel):
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Continual training activated - starting training from previously " "trained agent."
|
"Continual training activated - starting training from previously trained agent."
|
||||||
)
|
)
|
||||||
model = self.dd.model_dictionary[dk.pair]
|
model = self.dd.model_dictionary[dk.pair]
|
||||||
model.set_env(self.train_env)
|
model.set_env(self.train_env)
|
||||||
|
|
|
@ -1015,9 +1015,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
# First cancelling stoploss on exchange ...
|
# First cancelling stoploss on exchange ...
|
||||||
for oslo in trade.open_sl_orders:
|
for oslo in trade.open_sl_orders:
|
||||||
try:
|
try:
|
||||||
logger.info(
|
logger.info(f"Cancelling stoploss on exchange for {trade} order: {oslo.order_id}")
|
||||||
f"Cancelling stoploss on exchange for {trade} order: {oslo.order_id}"
|
|
||||||
)
|
|
||||||
co = self.exchange.cancel_stoploss_order_with_result(
|
co = self.exchange.cancel_stoploss_order_with_result(
|
||||||
oslo.order_id, trade.pair, trade.amount
|
oslo.order_id, trade.pair, trade.amount
|
||||||
)
|
)
|
||||||
|
@ -2285,7 +2283,7 @@ class FreqtradeBot(LoggingMixin):
|
||||||
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_:
|
if fee_abs != 0 and self.wallets.get_free(trade_base_currency) >= amount_:
|
||||||
# Eat into dust if we own more than base currency
|
# Eat into dust if we own more than base currency
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Fee amount for {trade} was in base currency - " f"Eating Fee {fee_abs} into dust."
|
f"Fee amount for {trade} was in base currency - Eating Fee {fee_abs} into dust."
|
||||||
)
|
)
|
||||||
elif fee_abs != 0:
|
elif fee_abs != 0:
|
||||||
logger.info(f"Applying fee on amount for {trade}, fee={fee_abs}.")
|
logger.info(f"Applying fee on amount for {trade}, fee={fee_abs}.")
|
||||||
|
|
|
@ -215,7 +215,7 @@ class Backtesting:
|
||||||
def _validate_pairlists_for_backtesting(self):
|
def _validate_pairlists_for_backtesting(self):
|
||||||
if "VolumePairList" in self.pairlists.name_list:
|
if "VolumePairList" in self.pairlists.name_list:
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
"VolumePairList not allowed for backtesting. " "Please use StaticPairList instead."
|
"VolumePairList not allowed for backtesting. Please use StaticPairList instead."
|
||||||
)
|
)
|
||||||
if "PerformanceFilter" in self.pairlists.name_list:
|
if "PerformanceFilter" in self.pairlists.name_list:
|
||||||
raise OperationalException("PerformanceFilter not allowed for backtesting.")
|
raise OperationalException("PerformanceFilter not allowed for backtesting.")
|
||||||
|
|
|
@ -316,7 +316,7 @@ def text_table_add_metrics(strat_results: Dict) -> str:
|
||||||
f"{strat_results['worst_pair']['profit_total']:.2%}",
|
f"{strat_results['worst_pair']['profit_total']:.2%}",
|
||||||
),
|
),
|
||||||
("Best trade", f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
("Best trade", f"{best_trade['pair']} {best_trade['profit_ratio']:.2%}"),
|
||||||
("Worst trade", f"{worst_trade['pair']} " f"{worst_trade['profit_ratio']:.2%}"),
|
("Worst trade", f"{worst_trade['pair']} {worst_trade['profit_ratio']:.2%}"),
|
||||||
(
|
(
|
||||||
"Best day",
|
"Best day",
|
||||||
fmt_coin(strat_results["backtest_best_day_abs"], strat_results["stake_currency"]),
|
fmt_coin(strat_results["backtest_best_day_abs"], strat_results["stake_currency"]),
|
||||||
|
|
|
@ -361,9 +361,7 @@ def check_migrate(engine, decl_base, previous_tables) -> None:
|
||||||
|
|
||||||
if not has_column(cols_pairlocks, "side"):
|
if not has_column(cols_pairlocks, "side"):
|
||||||
migrating = True
|
migrating = True
|
||||||
logger.info(
|
logger.info(f"Running database migration for pairlocks - backup: {pairlock_table_bak_name}")
|
||||||
f"Running database migration for pairlocks - " f"backup: {pairlock_table_bak_name}"
|
|
||||||
)
|
|
||||||
|
|
||||||
migrate_pairlocks_table(
|
migrate_pairlocks_table(
|
||||||
decl_base, inspector, engine, pairlock_table_bak_name, cols_pairlocks
|
decl_base, inspector, engine, pairlock_table_bak_name, cols_pairlocks
|
||||||
|
|
|
@ -73,7 +73,7 @@ class AgeFilter(IPairList):
|
||||||
f"{self.name} - Filtering pairs with age less than "
|
f"{self.name} - Filtering pairs with age less than "
|
||||||
f"{self._min_days_listed} {plural(self._min_days_listed, 'day')}"
|
f"{self._min_days_listed} {plural(self._min_days_listed, 'day')}"
|
||||||
) + (
|
) + (
|
||||||
(" or more than " f"{self._max_days_listed} {plural(self._max_days_listed, 'day')}")
|
(" or more than {self._max_days_listed} {plural(self._max_days_listed, 'day')}")
|
||||||
if self._max_days_listed
|
if self._max_days_listed
|
||||||
else ""
|
else ""
|
||||||
)
|
)
|
||||||
|
|
|
@ -236,7 +236,7 @@ class IPairList(LoggingMixin, ABC):
|
||||||
|
|
||||||
if not self._exchange.market_is_tradable(markets[pair]):
|
if not self._exchange.market_is_tradable(markets[pair]):
|
||||||
self.log_once(
|
self.log_once(
|
||||||
f"Pair {pair} is not tradable with Freqtrade." "Removing it from whitelist..",
|
f"Pair {pair} is not tradable with Freqtrade. Removing it from whitelist..",
|
||||||
logger.warning,
|
logger.warning,
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -84,7 +84,7 @@ class PriceFilter(IPairList):
|
||||||
"default": 0,
|
"default": 0,
|
||||||
"description": "Low price ratio",
|
"description": "Low price ratio",
|
||||||
"help": (
|
"help": (
|
||||||
"Remove pairs where a price move of 1 price unit (pip) " "is above this ratio."
|
"Remove pairs where a price move of 1 price unit (pip) is above this ratio."
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
"min_price": {
|
"min_price": {
|
||||||
|
@ -130,7 +130,7 @@ class PriceFilter(IPairList):
|
||||||
changeperc = compare / price
|
changeperc = compare / price
|
||||||
if changeperc > self._low_price_ratio:
|
if changeperc > self._low_price_ratio:
|
||||||
self.log_once(
|
self.log_once(
|
||||||
f"Removed {pair} from whitelist, " f"because 1 unit is {changeperc:.3%}",
|
f"Removed {pair} from whitelist, because 1 unit is {changeperc:.3%}",
|
||||||
logger.info,
|
logger.info,
|
||||||
)
|
)
|
||||||
return False
|
return False
|
||||||
|
|
|
@ -201,12 +201,12 @@ class RemotePairList(IPairList):
|
||||||
pairlist = self._handle_error(f"Failed processing JSON data: {type(e)}")
|
pairlist = self._handle_error(f"Failed processing JSON data: {type(e)}")
|
||||||
else:
|
else:
|
||||||
pairlist = self._handle_error(
|
pairlist = self._handle_error(
|
||||||
f"RemotePairList is not of type JSON." f" {self._pairlist_url}"
|
f"RemotePairList is not of type JSON. {self._pairlist_url}"
|
||||||
)
|
)
|
||||||
|
|
||||||
except requests.exceptions.RequestException:
|
except requests.exceptions.RequestException:
|
||||||
pairlist = self._handle_error(
|
pairlist = self._handle_error(
|
||||||
f"Was not able to fetch pairlist from:" f" {self._pairlist_url}"
|
f"Was not able to fetch pairlist from: {self._pairlist_url}"
|
||||||
)
|
)
|
||||||
|
|
||||||
time_elapsed = 0
|
time_elapsed = 0
|
||||||
|
|
|
@ -65,7 +65,7 @@ class IProtection(LoggingMixin, ABC):
|
||||||
f"{plural(self._stop_duration_candles, 'candle', 'candles')}"
|
f"{plural(self._stop_duration_candles, 'candle', 'candles')}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return f"{self._stop_duration} " f"{plural(self._stop_duration, 'minute', 'minutes')}"
|
return f"{self._stop_duration} {plural(self._stop_duration, 'minute', 'minutes')}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def lookback_period_str(self) -> str:
|
def lookback_period_str(self) -> str:
|
||||||
|
@ -78,9 +78,7 @@ class IProtection(LoggingMixin, ABC):
|
||||||
f"{plural(self._lookback_period_candles, 'candle', 'candles')}"
|
f"{plural(self._lookback_period_candles, 'candle', 'candles')}"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
return (
|
return f"{self._lookback_period} {plural(self._lookback_period, 'minute', 'minutes')}"
|
||||||
f"{self._lookback_period} " f"{plural(self._lookback_period, 'minute', 'minutes')}"
|
|
||||||
)
|
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def short_desc(self) -> str:
|
def short_desc(self) -> str:
|
||||||
|
|
|
@ -44,7 +44,7 @@ class StrategyResolver(IResolver):
|
||||||
|
|
||||||
if not config.get("strategy"):
|
if not config.get("strategy"):
|
||||||
raise OperationalException(
|
raise OperationalException(
|
||||||
"No strategy set. Please use `--strategy` to specify " "the strategy class to use."
|
"No strategy set. Please use `--strategy` to specify the strategy class to use."
|
||||||
)
|
)
|
||||||
|
|
||||||
strategy_name = config["strategy"]
|
strategy_name = config["strategy"]
|
||||||
|
|
|
@ -219,7 +219,7 @@ class Telegram(RPCHandler):
|
||||||
raise OperationalException(err_msg)
|
raise OperationalException(err_msg)
|
||||||
else:
|
else:
|
||||||
self._keyboard = cust_keyboard
|
self._keyboard = cust_keyboard
|
||||||
logger.info("using custom keyboard from " f"config.json: {self._keyboard}")
|
logger.info("using custom keyboard from config.json: {self._keyboard}")
|
||||||
|
|
||||||
def _init_telegram_app(self):
|
def _init_telegram_app(self):
|
||||||
return Application.builder().token(self._config["telegram"]["token"]).build()
|
return Application.builder().token(self._config["telegram"]["token"]).build()
|
||||||
|
@ -1749,9 +1749,7 @@ class Telegram(RPCHandler):
|
||||||
|
|
||||||
for chunk in chunks(edge_pairs, 25):
|
for chunk in chunks(edge_pairs, 25):
|
||||||
edge_pairs_tab = tabulate(chunk, headers="keys", tablefmt="simple")
|
edge_pairs_tab = tabulate(chunk, headers="keys", tablefmt="simple")
|
||||||
message = (
|
message = f"<b>Edge only validated following pairs:</b>\n<pre>{edge_pairs_tab}</pre>"
|
||||||
f"<b>Edge only validated following pairs:</b>\n" f"<pre>{edge_pairs_tab}</pre>"
|
|
||||||
)
|
|
||||||
|
|
||||||
await self._send_msg(message, parse_mode=ParseMode.HTML)
|
await self._send_msg(message, parse_mode=ParseMode.HTML)
|
||||||
|
|
||||||
|
|
|
@ -1201,7 +1201,7 @@ class IStrategy(ABC, HyperStrategyMixin):
|
||||||
# Tags can be None, which does not resolve to False.
|
# Tags can be None, which does not resolve to False.
|
||||||
exit_tag = exit_tag if isinstance(exit_tag, str) and exit_tag != "nan" else None
|
exit_tag = exit_tag if isinstance(exit_tag, str) and exit_tag != "nan" else None
|
||||||
|
|
||||||
logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) " f"enter={enter} exit={exit_}")
|
logger.debug(f"exit-trigger: {latest['date']} (pair={pair}) enter={enter} exit={exit_}")
|
||||||
|
|
||||||
return enter, exit_, exit_tag
|
return enter, exit_, exit_tag
|
||||||
|
|
||||||
|
|
|
@ -27,12 +27,12 @@ def strategy_safe_wrapper(f: F, message: str = "", default_retval=None, supress_
|
||||||
kwargs["trade"] = deepcopy(kwargs["trade"])
|
kwargs["trade"] = deepcopy(kwargs["trade"])
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
except ValueError as error:
|
except ValueError as error:
|
||||||
logger.warning(f"{message}" f"Strategy caused the following exception: {error}" f"{f}")
|
logger.warning(f"{message}Strategy caused the following exception: {error}{f}")
|
||||||
if default_retval is None and not supress_error:
|
if default_retval is None and not supress_error:
|
||||||
raise StrategyError(str(error)) from error
|
raise StrategyError(str(error)) from error
|
||||||
return default_retval
|
return default_retval
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.exception(f"{message}" f"Unexpected error {error} calling {f}")
|
logger.exception(f"{message}Unexpected error {error} calling {f}")
|
||||||
if default_retval is None and not supress_error:
|
if default_retval is None and not supress_error:
|
||||||
raise StrategyError(str(error)) from error
|
raise StrategyError(str(error)) from error
|
||||||
return default_retval
|
return default_retval
|
||||||
|
|
251
freqtrade/vendor/qtpylib/indicators.py
vendored
251
freqtrade/vendor/qtpylib/indicators.py
vendored
|
@ -42,7 +42,7 @@ def numpy_rolling_series(func):
|
||||||
|
|
||||||
new_series = np.empty(len(series)) * np.nan
|
new_series = np.empty(len(series)) * np.nan
|
||||||
calculated = func(series, window)
|
calculated = func(series, window)
|
||||||
new_series[-len(calculated):] = calculated
|
new_series[-len(calculated) :] = calculated
|
||||||
|
|
||||||
if as_source and isinstance(data, pd.Series):
|
if as_source and isinstance(data, pd.Series):
|
||||||
return pd.Series(index=data.index, data=new_series)
|
return pd.Series(index=data.index, data=new_series)
|
||||||
|
@ -65,97 +65,103 @@ def numpy_rolling_std(data, window, as_source=False):
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def session(df, start='17:00', end='16:00'):
|
def session(df, start="17:00", end="16:00"):
|
||||||
""" remove previous globex day from df """
|
"""remove previous globex day from df"""
|
||||||
if df.empty:
|
if df.empty:
|
||||||
return df
|
return df
|
||||||
|
|
||||||
# get start/end/now as decimals
|
# get start/end/now as decimals
|
||||||
int_start = list(map(int, start.split(':')))
|
int_start = list(map(int, start.split(":")))
|
||||||
int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001
|
int_start = (int_start[0] + int_start[1] - 1 / 100) - 0.0001
|
||||||
int_end = list(map(int, end.split(':')))
|
int_end = list(map(int, end.split(":")))
|
||||||
int_end = int_end[0] + int_end[1] / 100
|
int_end = int_end[0] + int_end[1] / 100
|
||||||
int_now = (df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100)
|
int_now = df[-1:].index.hour[0] + (df[:1].index.minute[0]) / 100
|
||||||
|
|
||||||
# same-dat session?
|
# same-dat session?
|
||||||
is_same_day = int_end > int_start
|
is_same_day = int_end > int_start
|
||||||
|
|
||||||
# set pointers
|
# set pointers
|
||||||
curr = prev = df[-1:].index[0].strftime('%Y-%m-%d')
|
curr = prev = df[-1:].index[0].strftime("%Y-%m-%d")
|
||||||
|
|
||||||
# globex/forex session
|
# globex/forex session
|
||||||
if not is_same_day:
|
if not is_same_day:
|
||||||
prev = (datetime.strptime(curr, '%Y-%m-%d') -
|
prev = (datetime.strptime(curr, "%Y-%m-%d") - timedelta(1)).strftime("%Y-%m-%d")
|
||||||
timedelta(1)).strftime('%Y-%m-%d')
|
|
||||||
|
|
||||||
# slice
|
# slice
|
||||||
if int_now >= int_start:
|
if int_now >= int_start:
|
||||||
df = df[df.index >= curr + ' ' + start]
|
df = df[df.index >= curr + " " + start]
|
||||||
else:
|
else:
|
||||||
df = df[df.index >= prev + ' ' + start]
|
df = df[df.index >= prev + " " + start]
|
||||||
|
|
||||||
return df.copy()
|
return df.copy()
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def heikinashi(bars):
|
def heikinashi(bars):
|
||||||
bars = bars.copy()
|
bars = bars.copy()
|
||||||
bars['ha_close'] = (bars['open'] + bars['high'] +
|
bars["ha_close"] = (bars["open"] + bars["high"] + bars["low"] + bars["close"]) / 4
|
||||||
bars['low'] + bars['close']) / 4
|
|
||||||
|
|
||||||
# ha open
|
# ha open
|
||||||
bars.at[0, 'ha_open'] = (bars.at[0, 'open'] + bars.at[0, 'close']) / 2
|
bars.at[0, "ha_open"] = (bars.at[0, "open"] + bars.at[0, "close"]) / 2
|
||||||
for i in range(1, len(bars)):
|
for i in range(1, len(bars)):
|
||||||
bars.at[i, 'ha_open'] = (bars.at[i - 1, 'ha_open'] + bars.at[i - 1, 'ha_close']) / 2
|
bars.at[i, "ha_open"] = (bars.at[i - 1, "ha_open"] + bars.at[i - 1, "ha_close"]) / 2
|
||||||
|
|
||||||
bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)
|
bars["ha_high"] = bars.loc[:, ["high", "ha_open", "ha_close"]].max(axis=1)
|
||||||
bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)
|
bars["ha_low"] = bars.loc[:, ["low", "ha_open", "ha_close"]].min(axis=1)
|
||||||
|
|
||||||
|
return pd.DataFrame(
|
||||||
|
index=bars.index,
|
||||||
|
data={
|
||||||
|
"open": bars["ha_open"],
|
||||||
|
"high": bars["ha_high"],
|
||||||
|
"low": bars["ha_low"],
|
||||||
|
"close": bars["ha_close"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
return pd.DataFrame(index=bars.index,
|
|
||||||
data={'open': bars['ha_open'],
|
|
||||||
'high': bars['ha_high'],
|
|
||||||
'low': bars['ha_low'],
|
|
||||||
'close': bars['ha_close']})
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def tdi(series, rsi_lookback=13, rsi_smooth_len=2,
|
def tdi(series, rsi_lookback=13, rsi_smooth_len=2, rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):
|
||||||
rsi_signal_len=7, bb_lookback=34, bb_std=1.6185):
|
|
||||||
|
|
||||||
rsi_data = rsi(series, rsi_lookback)
|
rsi_data = rsi(series, rsi_lookback)
|
||||||
rsi_smooth = sma(rsi_data, rsi_smooth_len)
|
rsi_smooth = sma(rsi_data, rsi_smooth_len)
|
||||||
rsi_signal = sma(rsi_data, rsi_signal_len)
|
rsi_signal = sma(rsi_data, rsi_signal_len)
|
||||||
|
|
||||||
bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)
|
bb_series = bollinger_bands(rsi_data, bb_lookback, bb_std)
|
||||||
|
|
||||||
return pd.DataFrame(index=series.index, data={
|
return pd.DataFrame(
|
||||||
"rsi": rsi_data,
|
index=series.index,
|
||||||
"rsi_signal": rsi_signal,
|
data={
|
||||||
"rsi_smooth": rsi_smooth,
|
"rsi": rsi_data,
|
||||||
"rsi_bb_upper": bb_series['upper'],
|
"rsi_signal": rsi_signal,
|
||||||
"rsi_bb_lower": bb_series['lower'],
|
"rsi_smooth": rsi_smooth,
|
||||||
"rsi_bb_mid": bb_series['mid']
|
"rsi_bb_upper": bb_series["upper"],
|
||||||
})
|
"rsi_bb_lower": bb_series["lower"],
|
||||||
|
"rsi_bb_mid": bb_series["mid"],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def awesome_oscillator(df, weighted=False, fast=5, slow=34):
|
def awesome_oscillator(df, weighted=False, fast=5, slow=34):
|
||||||
midprice = (df['high'] + df['low']) / 2
|
midprice = (df["high"] + df["low"]) / 2
|
||||||
|
|
||||||
if weighted:
|
if weighted:
|
||||||
ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values
|
ao = (midprice.ewm(fast).mean() - midprice.ewm(slow).mean()).values
|
||||||
else:
|
else:
|
||||||
ao = numpy_rolling_mean(midprice, fast) - \
|
ao = numpy_rolling_mean(midprice, fast) - numpy_rolling_mean(midprice, slow)
|
||||||
numpy_rolling_mean(midprice, slow)
|
|
||||||
|
|
||||||
return pd.Series(index=df.index, data=ao)
|
return pd.Series(index=df.index, data=ao)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def nans(length=1):
|
def nans(length=1):
|
||||||
mtx = np.empty(length)
|
mtx = np.empty(length)
|
||||||
mtx[:] = np.nan
|
mtx[:] = np.nan
|
||||||
|
@ -164,39 +170,45 @@ def nans(length=1):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def typical_price(bars):
|
def typical_price(bars):
|
||||||
res = (bars['high'] + bars['low'] + bars['close']) / 3.
|
res = (bars["high"] + bars["low"] + bars["close"]) / 3.0
|
||||||
return pd.Series(index=bars.index, data=res)
|
return pd.Series(index=bars.index, data=res)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def mid_price(bars):
|
def mid_price(bars):
|
||||||
res = (bars['high'] + bars['low']) / 2.
|
res = (bars["high"] + bars["low"]) / 2.0
|
||||||
return pd.Series(index=bars.index, data=res)
|
return pd.Series(index=bars.index, data=res)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def ibs(bars):
|
def ibs(bars):
|
||||||
""" Internal bar strength """
|
"""Internal bar strength"""
|
||||||
res = np.round((bars['close'] - bars['low']) /
|
res = np.round((bars["close"] - bars["low"]) / (bars["high"] - bars["low"]), 2)
|
||||||
(bars['high'] - bars['low']), 2)
|
|
||||||
return pd.Series(index=bars.index, data=res)
|
return pd.Series(index=bars.index, data=res)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def true_range(bars):
|
def true_range(bars):
|
||||||
return pd.DataFrame({
|
return pd.DataFrame(
|
||||||
"hl": bars['high'] - bars['low'],
|
{
|
||||||
"hc": abs(bars['high'] - bars['close'].shift(1)),
|
"hl": bars["high"] - bars["low"],
|
||||||
"lc": abs(bars['low'] - bars['close'].shift(1))
|
"hc": abs(bars["high"] - bars["close"].shift(1)),
|
||||||
}).max(axis=1)
|
"lc": abs(bars["low"] - bars["close"].shift(1)),
|
||||||
|
}
|
||||||
|
).max(axis=1)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def atr(bars, window=14, exp=False):
|
def atr(bars, window=14, exp=False):
|
||||||
tr = true_range(bars)
|
tr = true_range(bars)
|
||||||
|
|
||||||
|
@ -210,6 +222,7 @@ def atr(bars, window=14, exp=False):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def crossed(series1, series2, direction=None):
|
def crossed(series1, series2, direction=None):
|
||||||
if isinstance(series1, np.ndarray):
|
if isinstance(series1, np.ndarray):
|
||||||
series1 = pd.Series(series1)
|
series1 = pd.Series(series1)
|
||||||
|
@ -218,12 +231,10 @@ def crossed(series1, series2, direction=None):
|
||||||
series2 = pd.Series(index=series1.index, data=series2)
|
series2 = pd.Series(index=series1.index, data=series2)
|
||||||
|
|
||||||
if direction is None or direction == "above":
|
if direction is None or direction == "above":
|
||||||
above = pd.Series((series1 > series2) & (
|
above = pd.Series((series1 > series2) & (series1.shift(1) <= series2.shift(1)))
|
||||||
series1.shift(1) <= series2.shift(1)))
|
|
||||||
|
|
||||||
if direction is None or direction == "below":
|
if direction is None or direction == "below":
|
||||||
below = pd.Series((series1 < series2) & (
|
below = pd.Series((series1 < series2) & (series1.shift(1) >= series2.shift(1)))
|
||||||
series1.shift(1) >= series2.shift(1)))
|
|
||||||
|
|
||||||
if direction is None:
|
if direction is None:
|
||||||
return above | below
|
return above | below
|
||||||
|
@ -238,6 +249,7 @@ def crossed_above(series1, series2):
|
||||||
def crossed_below(series1, series2):
|
def crossed_below(series1, series2):
|
||||||
return crossed(series1, series2, "below")
|
return crossed(series1, series2, "below")
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@ -251,6 +263,7 @@ def rolling_std(series, window=200, min_periods=None):
|
||||||
except Exception as e: # noqa: F841
|
except Exception as e: # noqa: F841
|
||||||
return pd.Series(series).rolling(window=window, min_periods=min_periods).std()
|
return pd.Series(series).rolling(window=window, min_periods=min_periods).std()
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@ -264,6 +277,7 @@ def rolling_mean(series, window=200, min_periods=None):
|
||||||
except Exception as e: # noqa: F841
|
except Exception as e: # noqa: F841
|
||||||
return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()
|
return pd.Series(series).rolling(window=window, min_periods=min_periods).mean()
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
@ -277,6 +291,7 @@ def rolling_min(series, window=14, min_periods=None):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def rolling_max(series, window=14, min_periods=None):
|
def rolling_max(series, window=14, min_periods=None):
|
||||||
min_periods = window if min_periods is None else min_periods
|
min_periods = window if min_periods is None else min_periods
|
||||||
try:
|
try:
|
||||||
|
@ -287,6 +302,7 @@ def rolling_max(series, window=14, min_periods=None):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def rolling_weighted_mean(series, window=200, min_periods=None):
|
def rolling_weighted_mean(series, window=200, min_periods=None):
|
||||||
min_periods = window if min_periods is None else min_periods
|
min_periods = window if min_periods is None else min_periods
|
||||||
try:
|
try:
|
||||||
|
@ -297,41 +313,49 @@ def rolling_weighted_mean(series, window=200, min_periods=None):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def hull_moving_average(series, window=200, min_periods=None):
|
def hull_moving_average(series, window=200, min_periods=None):
|
||||||
min_periods = window if min_periods is None else min_periods
|
min_periods = window if min_periods is None else min_periods
|
||||||
ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - \
|
ma = (2 * rolling_weighted_mean(series, window / 2, min_periods)) - rolling_weighted_mean(
|
||||||
rolling_weighted_mean(series, window, min_periods)
|
series, window, min_periods
|
||||||
|
)
|
||||||
return rolling_weighted_mean(ma, np.sqrt(window), min_periods)
|
return rolling_weighted_mean(ma, np.sqrt(window), min_periods)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def sma(series, window=200, min_periods=None):
|
def sma(series, window=200, min_periods=None):
|
||||||
return rolling_mean(series, window=window, min_periods=min_periods)
|
return rolling_mean(series, window=window, min_periods=min_periods)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def wma(series, window=200, min_periods=None):
|
def wma(series, window=200, min_periods=None):
|
||||||
return rolling_weighted_mean(series, window=window, min_periods=min_periods)
|
return rolling_weighted_mean(series, window=window, min_periods=min_periods)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def hma(series, window=200, min_periods=None):
|
def hma(series, window=200, min_periods=None):
|
||||||
return hull_moving_average(series, window=window, min_periods=min_periods)
|
return hull_moving_average(series, window=window, min_periods=min_periods)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def vwap(bars):
|
def vwap(bars):
|
||||||
"""
|
"""
|
||||||
calculate vwap of entire time series
|
calculate vwap of entire time series
|
||||||
(input can be pandas series or numpy array)
|
(input can be pandas series or numpy array)
|
||||||
bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
|
bars are usually mid [ (h+l)/2 ] or typical [ (h+l+c)/3 ]
|
||||||
"""
|
"""
|
||||||
raise ValueError("using `qtpylib.vwap` facilitates lookahead bias. Please use "
|
raise ValueError(
|
||||||
"`qtpylib.rolling_vwap` instead, which calculates vwap in a rolling manner.")
|
"using `qtpylib.vwap` facilitates lookahead bias. Please use "
|
||||||
|
"`qtpylib.rolling_vwap` instead, which calculates vwap in a rolling manner."
|
||||||
|
)
|
||||||
# typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values
|
# typical = ((bars['high'] + bars['low'] + bars['close']) / 3).values
|
||||||
# volume = bars['volume'].values
|
# volume = bars['volume'].values
|
||||||
|
|
||||||
|
@ -341,6 +365,7 @@ def vwap(bars):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def rolling_vwap(bars, window=200, min_periods=None):
|
def rolling_vwap(bars, window=200, min_periods=None):
|
||||||
"""
|
"""
|
||||||
calculate vwap using moving window
|
calculate vwap using moving window
|
||||||
|
@ -349,19 +374,22 @@ def rolling_vwap(bars, window=200, min_periods=None):
|
||||||
"""
|
"""
|
||||||
min_periods = window if min_periods is None else min_periods
|
min_periods = window if min_periods is None else min_periods
|
||||||
|
|
||||||
typical = ((bars['high'] + bars['low'] + bars['close']) / 3)
|
typical = (bars["high"] + bars["low"] + bars["close"]) / 3
|
||||||
volume = bars['volume']
|
volume = bars["volume"]
|
||||||
|
|
||||||
left = (volume * typical).rolling(window=window,
|
left = (volume * typical).rolling(window=window, min_periods=min_periods).sum()
|
||||||
min_periods=min_periods).sum()
|
|
||||||
right = volume.rolling(window=window, min_periods=min_periods).sum()
|
right = volume.rolling(window=window, min_periods=min_periods).sum()
|
||||||
|
|
||||||
return pd.Series(index=bars.index, data=(left / right)
|
return (
|
||||||
).replace([np.inf, -np.inf], float('NaN')).ffill()
|
pd.Series(index=bars.index, data=(left / right))
|
||||||
|
.replace([np.inf, -np.inf], float("NaN"))
|
||||||
|
.ffill()
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def rsi(series, window=14):
|
def rsi(series, window=14):
|
||||||
"""
|
"""
|
||||||
compute the n period relative strength indicator
|
compute the n period relative strength indicator
|
||||||
|
@ -369,13 +397,13 @@ def rsi(series, window=14):
|
||||||
|
|
||||||
# 100-(100/relative_strength)
|
# 100-(100/relative_strength)
|
||||||
deltas = np.diff(series)
|
deltas = np.diff(series)
|
||||||
seed = deltas[:window + 1]
|
seed = deltas[: window + 1]
|
||||||
|
|
||||||
# default values
|
# default values
|
||||||
ups = seed[seed > 0].sum() / window
|
ups = seed[seed > 0].sum() / window
|
||||||
downs = -seed[seed < 0].sum() / window
|
downs = -seed[seed < 0].sum() / window
|
||||||
rsival = np.zeros_like(series)
|
rsival = np.zeros_like(series)
|
||||||
rsival[:window] = 100. - 100. / (1. + ups / downs)
|
rsival[:window] = 100.0 - 100.0 / (1.0 + ups / downs)
|
||||||
|
|
||||||
# period values
|
# period values
|
||||||
for i in range(window, len(series)):
|
for i in range(window, len(series)):
|
||||||
|
@ -388,8 +416,8 @@ def rsi(series, window=14):
|
||||||
downval = -delta
|
downval = -delta
|
||||||
|
|
||||||
ups = (ups * (window - 1) + upval) / window
|
ups = (ups * (window - 1) + upval) / window
|
||||||
downs = (downs * (window - 1.) + downval) / window
|
downs = (downs * (window - 1.0) + downval) / window
|
||||||
rsival[i] = 100. - 100. / (1. + ups / downs)
|
rsival[i] = 100.0 - 100.0 / (1.0 + ups / downs)
|
||||||
|
|
||||||
# return rsival
|
# return rsival
|
||||||
return pd.Series(index=series.index, data=rsival)
|
return pd.Series(index=series.index, data=rsival)
|
||||||
|
@ -397,60 +425,57 @@ def rsi(series, window=14):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def macd(series, fast=3, slow=10, smooth=16):
|
def macd(series, fast=3, slow=10, smooth=16):
|
||||||
"""
|
"""
|
||||||
compute the MACD (Moving Average Convergence/Divergence)
|
compute the MACD (Moving Average Convergence/Divergence)
|
||||||
using a fast and slow exponential moving avg'
|
using a fast and slow exponential moving avg'
|
||||||
return value is emaslow, emafast, macd which are len(x) arrays
|
return value is emaslow, emafast, macd which are len(x) arrays
|
||||||
"""
|
"""
|
||||||
macd_line = rolling_weighted_mean(series, window=fast) - \
|
macd_line = rolling_weighted_mean(series, window=fast) - rolling_weighted_mean(
|
||||||
rolling_weighted_mean(series, window=slow)
|
series, window=slow
|
||||||
|
)
|
||||||
signal = rolling_weighted_mean(macd_line, window=smooth)
|
signal = rolling_weighted_mean(macd_line, window=smooth)
|
||||||
histogram = macd_line - signal
|
histogram = macd_line - signal
|
||||||
# return macd_line, signal, histogram
|
# return macd_line, signal, histogram
|
||||||
return pd.DataFrame(index=series.index, data={
|
return pd.DataFrame(
|
||||||
'macd': macd_line.values,
|
index=series.index,
|
||||||
'signal': signal.values,
|
data={"macd": macd_line.values, "signal": signal.values, "histogram": histogram.values},
|
||||||
'histogram': histogram.values
|
)
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def bollinger_bands(series, window=20, stds=2):
|
def bollinger_bands(series, window=20, stds=2):
|
||||||
ma = rolling_mean(series, window=window, min_periods=1)
|
ma = rolling_mean(series, window=window, min_periods=1)
|
||||||
std = rolling_std(series, window=window, min_periods=1)
|
std = rolling_std(series, window=window, min_periods=1)
|
||||||
upper = ma + std * stds
|
upper = ma + std * stds
|
||||||
lower = ma - std * stds
|
lower = ma - std * stds
|
||||||
|
|
||||||
return pd.DataFrame(index=series.index, data={
|
return pd.DataFrame(index=series.index, data={"upper": upper, "mid": ma, "lower": lower})
|
||||||
'upper': upper,
|
|
||||||
'mid': ma,
|
|
||||||
'lower': lower
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def weighted_bollinger_bands(series, window=20, stds=2):
|
def weighted_bollinger_bands(series, window=20, stds=2):
|
||||||
ema = rolling_weighted_mean(series, window=window)
|
ema = rolling_weighted_mean(series, window=window)
|
||||||
std = rolling_std(series, window=window)
|
std = rolling_std(series, window=window)
|
||||||
upper = ema + std * stds
|
upper = ema + std * stds
|
||||||
lower = ema - std * stds
|
lower = ema - std * stds
|
||||||
|
|
||||||
return pd.DataFrame(index=series.index, data={
|
return pd.DataFrame(
|
||||||
'upper': upper.values,
|
index=series.index, data={"upper": upper.values, "mid": ema.values, "lower": lower.values}
|
||||||
'mid': ema.values,
|
)
|
||||||
'lower': lower.values
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def returns(series):
|
def returns(series):
|
||||||
try:
|
try:
|
||||||
res = (series / series.shift(1) -
|
res = (series / series.shift(1) - 1).replace([np.inf, -np.inf], float("NaN"))
|
||||||
1).replace([np.inf, -np.inf], float('NaN'))
|
|
||||||
except Exception as e: # noqa: F841
|
except Exception as e: # noqa: F841
|
||||||
res = nans(len(series))
|
res = nans(len(series))
|
||||||
|
|
||||||
|
@ -459,10 +484,10 @@ def returns(series):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def log_returns(series):
|
def log_returns(series):
|
||||||
try:
|
try:
|
||||||
res = np.log(series / series.shift(1)
|
res = np.log(series / series.shift(1)).replace([np.inf, -np.inf], float("NaN"))
|
||||||
).replace([np.inf, -np.inf], float('NaN'))
|
|
||||||
except Exception as e: # noqa: F841
|
except Exception as e: # noqa: F841
|
||||||
res = nans(len(series))
|
res = nans(len(series))
|
||||||
|
|
||||||
|
@ -471,10 +496,10 @@ def log_returns(series):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def implied_volatility(series, window=252):
|
def implied_volatility(series, window=252):
|
||||||
try:
|
try:
|
||||||
logret = np.log(series / series.shift(1)
|
logret = np.log(series / series.shift(1)).replace([np.inf, -np.inf], float("NaN"))
|
||||||
).replace([np.inf, -np.inf], float('NaN'))
|
|
||||||
res = numpy_rolling_std(logret, window) * np.sqrt(window)
|
res = numpy_rolling_std(logret, window) * np.sqrt(window)
|
||||||
except Exception as e: # noqa: F841
|
except Exception as e: # noqa: F841
|
||||||
res = nans(len(series))
|
res = nans(len(series))
|
||||||
|
@ -484,6 +509,7 @@ def implied_volatility(series, window=252):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def keltner_channel(bars, window=14, atrs=2):
|
def keltner_channel(bars, window=14, atrs=2):
|
||||||
typical_mean = rolling_mean(typical_price(bars), window)
|
typical_mean = rolling_mean(typical_price(bars), window)
|
||||||
atrval = atr(bars, window) * atrs
|
atrval = atr(bars, window) * atrs
|
||||||
|
@ -491,15 +517,15 @@ def keltner_channel(bars, window=14, atrs=2):
|
||||||
upper = typical_mean + atrval
|
upper = typical_mean + atrval
|
||||||
lower = typical_mean - atrval
|
lower = typical_mean - atrval
|
||||||
|
|
||||||
return pd.DataFrame(index=bars.index, data={
|
return pd.DataFrame(
|
||||||
'upper': upper.values,
|
index=bars.index,
|
||||||
'mid': typical_mean.values,
|
data={"upper": upper.values, "mid": typical_mean.values, "lower": lower.values},
|
||||||
'lower': lower.values
|
)
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def roc(series, window=14):
|
def roc(series, window=14):
|
||||||
"""
|
"""
|
||||||
compute rate of change
|
compute rate of change
|
||||||
|
@ -510,18 +536,20 @@ def roc(series, window=14):
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def cci(series, window=14):
|
def cci(series, window=14):
|
||||||
"""
|
"""
|
||||||
compute commodity channel index
|
compute commodity channel index
|
||||||
"""
|
"""
|
||||||
price = typical_price(series)
|
price = typical_price(series)
|
||||||
typical_mean = rolling_mean(price, window)
|
typical_mean = rolling_mean(price, window)
|
||||||
res = (price - typical_mean) / (.015 * np.std(typical_mean))
|
res = (price - typical_mean) / (0.015 * np.std(typical_mean))
|
||||||
return pd.Series(index=series.index, data=res)
|
return pd.Series(index=series.index, data=res)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def stoch(df, window=14, d=3, k=3, fast=False):
|
def stoch(df, window=14, d=3, k=3, fast=False):
|
||||||
"""
|
"""
|
||||||
compute the n period relative strength indicator
|
compute the n period relative strength indicator
|
||||||
|
@ -530,22 +558,22 @@ def stoch(df, window=14, d=3, k=3, fast=False):
|
||||||
|
|
||||||
my_df = pd.DataFrame(index=df.index)
|
my_df = pd.DataFrame(index=df.index)
|
||||||
|
|
||||||
my_df['rolling_max'] = df['high'].rolling(window).max()
|
my_df["rolling_max"] = df["high"].rolling(window).max()
|
||||||
my_df['rolling_min'] = df['low'].rolling(window).min()
|
my_df["rolling_min"] = df["low"].rolling(window).min()
|
||||||
|
|
||||||
my_df['fast_k'] = (
|
my_df["fast_k"] = (
|
||||||
100 * (df['close'] - my_df['rolling_min']) /
|
100 * (df["close"] - my_df["rolling_min"]) / (my_df["rolling_max"] - my_df["rolling_min"])
|
||||||
(my_df['rolling_max'] - my_df['rolling_min'])
|
|
||||||
)
|
)
|
||||||
my_df['fast_d'] = my_df['fast_k'].rolling(d).mean()
|
my_df["fast_d"] = my_df["fast_k"].rolling(d).mean()
|
||||||
|
|
||||||
if fast:
|
if fast:
|
||||||
return my_df.loc[:, ['fast_k', 'fast_d']]
|
return my_df.loc[:, ["fast_k", "fast_d"]]
|
||||||
|
|
||||||
my_df['slow_k'] = my_df['fast_k'].rolling(k).mean()
|
my_df["slow_k"] = my_df["fast_k"].rolling(k).mean()
|
||||||
my_df['slow_d'] = my_df['slow_k'].rolling(d).mean()
|
my_df["slow_d"] = my_df["slow_k"].rolling(d).mean()
|
||||||
|
|
||||||
|
return my_df.loc[:, ["slow_k", "slow_d"]]
|
||||||
|
|
||||||
return my_df.loc[:, ['slow_k', 'slow_d']]
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
@ -559,7 +587,7 @@ def zlma(series, window=20, min_periods=None, kind="ema"):
|
||||||
|
|
||||||
lag = (window - 1) // 2
|
lag = (window - 1) // 2
|
||||||
series = 2 * series - series.shift(lag)
|
series = 2 * series - series.shift(lag)
|
||||||
if kind in ['ewm', 'ema']:
|
if kind in ["ewm", "ema"]:
|
||||||
return wma(series, lag, min_periods)
|
return wma(series, lag, min_periods)
|
||||||
elif kind == "hma":
|
elif kind == "hma":
|
||||||
return hma(series, lag, min_periods)
|
return hma(series, lag, min_periods)
|
||||||
|
@ -577,29 +605,30 @@ def zlsma(series, window, min_periods=None):
|
||||||
def zlhma(series, window, min_periods=None):
|
def zlhma(series, window, min_periods=None):
|
||||||
return zlma(series, window, min_periods, kind="hma")
|
return zlma(series, window, min_periods, kind="hma")
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def zscore(bars, window=20, stds=1, col='close'):
|
def zscore(bars, window=20, stds=1, col="close"):
|
||||||
""" get zscore of price """
|
"""get zscore of price"""
|
||||||
std = numpy_rolling_std(bars[col], window)
|
std = numpy_rolling_std(bars[col], window)
|
||||||
mean = numpy_rolling_mean(bars[col], window)
|
mean = numpy_rolling_mean(bars[col], window)
|
||||||
return (bars[col] - mean) / (std * stds)
|
return (bars[col] - mean) / (std * stds)
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------
|
# ---------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
def pvt(bars):
|
def pvt(bars):
|
||||||
""" Price Volume Trend """
|
"""Price Volume Trend"""
|
||||||
trend = ((bars['close'] - bars['close'].shift(1)) /
|
trend = ((bars["close"] - bars["close"].shift(1)) / bars["close"].shift(1)) * bars["volume"]
|
||||||
bars['close'].shift(1)) * bars['volume']
|
|
||||||
return trend.cumsum()
|
return trend.cumsum()
|
||||||
|
|
||||||
|
|
||||||
def chopiness(bars, window=14):
|
def chopiness(bars, window=14):
|
||||||
atrsum = true_range(bars).rolling(window).sum()
|
atrsum = true_range(bars).rolling(window).sum()
|
||||||
highs = bars['high'].rolling(window).max()
|
highs = bars["high"].rolling(window).max()
|
||||||
lows = bars['low'].rolling(window).min()
|
lows = bars["low"].rolling(window).min()
|
||||||
return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window)
|
return 100 * np.log10(atrsum / (highs - lows)) / np.log10(window)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -131,7 +131,7 @@ class Worker:
|
||||||
if strategy_version is not None:
|
if strategy_version is not None:
|
||||||
version += ", strategy_version: " + strategy_version
|
version += ", strategy_version: " + strategy_version
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Bot heartbeat. PID={getpid()}, " f"version='{version}', state='{state.name}'"
|
f"Bot heartbeat. PID={getpid()}, version='{version}', state='{state.name}'"
|
||||||
)
|
)
|
||||||
self._heartbeat_msg = now
|
self._heartbeat_msg = now
|
||||||
|
|
||||||
|
|
|
@ -186,7 +186,7 @@ def test_list_timeframes(mocker, capsys):
|
||||||
start_list_timeframes(get_args(args))
|
start_list_timeframes(get_args(args))
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert re.match(
|
assert re.match(
|
||||||
"Timeframes available for the exchange `Bybit`: " "1m, 5m, 30m, 1h, 1d", captured.out
|
"Timeframes available for the exchange `Bybit`: 1m, 5m, 30m, 1h, 1d", captured.out
|
||||||
)
|
)
|
||||||
|
|
||||||
# Test with --exchange bybit
|
# Test with --exchange bybit
|
||||||
|
@ -198,7 +198,7 @@ def test_list_timeframes(mocker, capsys):
|
||||||
start_list_timeframes(get_args(args))
|
start_list_timeframes(get_args(args))
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert re.match(
|
assert re.match(
|
||||||
"Timeframes available for the exchange `Bybit`: " "1m, 5m, 30m, 1h, 1d", captured.out
|
"Timeframes available for the exchange `Bybit`: 1m, 5m, 30m, 1h, 1d", captured.out
|
||||||
)
|
)
|
||||||
|
|
||||||
api_mock.timeframes = {
|
api_mock.timeframes = {
|
||||||
|
@ -222,7 +222,7 @@ def test_list_timeframes(mocker, capsys):
|
||||||
start_list_timeframes(get_args(args))
|
start_list_timeframes(get_args(args))
|
||||||
captured = capsys.readouterr()
|
captured = capsys.readouterr()
|
||||||
assert re.match(
|
assert re.match(
|
||||||
"Timeframes available for the exchange `Binance`: " "1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d",
|
"Timeframes available for the exchange `Binance`: 1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d",
|
||||||
captured.out,
|
captured.out,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -415,7 +415,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
||||||
assert td != len(data["UNITTEST/BTC"])
|
assert td != len(data["UNITTEST/BTC"])
|
||||||
start_real = data["UNITTEST/BTC"].iloc[0, 0]
|
start_real = data["UNITTEST/BTC"].iloc[0, 0]
|
||||||
assert log_has(
|
assert log_has(
|
||||||
f"UNITTEST/BTC, spot, 5m, " f"data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}",
|
f"UNITTEST/BTC, spot, 5m, data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
# Make sure we start fresh - test missing data at end
|
# Make sure we start fresh - test missing data at end
|
||||||
|
@ -435,7 +435,7 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
|
||||||
# Shift endtime with +5
|
# Shift endtime with +5
|
||||||
end_real = data["UNITTEST/BTC"].iloc[-1, 0].to_pydatetime()
|
end_real = data["UNITTEST/BTC"].iloc[-1, 0].to_pydatetime()
|
||||||
assert log_has(
|
assert log_has(
|
||||||
f"UNITTEST/BTC, spot, 5m, " f"data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}",
|
f"UNITTEST/BTC, spot, 5m, data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -645,7 +645,7 @@ def test_validate_stakecurrency_error(default_conf, mocker, caplog):
|
||||||
mocker.patch(f"{EXMS}._load_async_markets")
|
mocker.patch(f"{EXMS}._load_async_markets")
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
ConfigurationError,
|
ConfigurationError,
|
||||||
match=r"XRP is not available as stake on .*" "Available currencies are: BTC, ETH, USDT",
|
match=r"XRP is not available as stake on .*Available currencies are: BTC, ETH, USDT",
|
||||||
):
|
):
|
||||||
Exchange(default_conf)
|
Exchange(default_conf)
|
||||||
|
|
||||||
|
@ -2328,7 +2328,7 @@ def test_refresh_latest_ohlcv(mocker, default_conf, caplog, candle_type) -> None
|
||||||
|
|
||||||
assert exchange._api_async.fetch_ohlcv.call_count == 0
|
assert exchange._api_async.fetch_ohlcv.call_count == 0
|
||||||
assert log_has(
|
assert log_has(
|
||||||
f"Using cached candle (OHLCV) data for {pairs[0][0]}, " f"{pairs[0][1]}, {candle_type} ...",
|
f"Using cached candle (OHLCV) data for {pairs[0][0]}, {pairs[0][1]}, {candle_type} ...",
|
||||||
caplog,
|
caplog,
|
||||||
)
|
)
|
||||||
caplog.clear()
|
caplog.clear()
|
||||||
|
|
|
@ -3771,7 +3771,7 @@ def test_get_real_amount_quote_dust(
|
||||||
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
|
assert freqtrade.get_real_amount(trade, buy_order_fee, order_obj) is None
|
||||||
assert walletmock.call_count == 1
|
assert walletmock.call_count == 1
|
||||||
assert log_has_re(
|
assert log_has_re(
|
||||||
r"Fee amount for Trade.* was in base currency " "- Eating Fee 0.008 into dust", caplog
|
r"Fee amount for Trade.* was in base currency - Eating Fee 0.008 into dust", caplog
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -393,9 +393,7 @@ def test_backtesting_start(default_conf, mocker, caplog) -> None:
|
||||||
backtesting.strategy.bot_start = MagicMock()
|
backtesting.strategy.bot_start = MagicMock()
|
||||||
backtesting.start()
|
backtesting.start()
|
||||||
# check the logs, that will contain the backtest result
|
# check the logs, that will contain the backtest result
|
||||||
exists = [
|
exists = ["Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days)."]
|
||||||
"Backtesting with data from 2017-11-14 21:17:00 " "up to 2017-11-14 22:59:00 (0 days)."
|
|
||||||
]
|
|
||||||
for line in exists:
|
for line in exists:
|
||||||
assert log_has(line, caplog)
|
assert log_has(line, caplog)
|
||||||
assert backtesting.strategy.dp._pairlists is not None
|
assert backtesting.strategy.dp._pairlists is not None
|
||||||
|
@ -1574,8 +1572,8 @@ def test_backtest_start_timerange(default_conf, mocker, caplog, testdatadir):
|
||||||
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
||||||
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
||||||
f"Using data directory: {testdatadir} ...",
|
f"Using data directory: {testdatadir} ...",
|
||||||
"Loading data from 2017-11-14 20:57:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Backtesting with data from 2017-11-14 21:17:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Parameter --enable-position-stacking detected ...",
|
"Parameter --enable-position-stacking detected ...",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1665,8 +1663,8 @@ def test_backtest_start_multi_strat(default_conf, mocker, caplog, testdatadir):
|
||||||
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
||||||
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
||||||
f"Using data directory: {testdatadir} ...",
|
f"Using data directory: {testdatadir} ...",
|
||||||
"Loading data from 2017-11-14 20:57:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Backtesting with data from 2017-11-14 21:17:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Parameter --enable-position-stacking detected ...",
|
"Parameter --enable-position-stacking detected ...",
|
||||||
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
||||||
"Running backtesting for Strategy StrategyTestV2",
|
"Running backtesting for Strategy StrategyTestV2",
|
||||||
|
@ -1799,8 +1797,8 @@ def test_backtest_start_multi_strat_nomock(default_conf, mocker, caplog, testdat
|
||||||
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
"Ignoring max_open_trades (--disable-max-market-positions was used) ...",
|
||||||
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
"Parameter --timerange detected: 1510694220-1510700340 ...",
|
||||||
f"Using data directory: {testdatadir} ...",
|
f"Using data directory: {testdatadir} ...",
|
||||||
"Loading data from 2017-11-14 20:57:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Loading data from 2017-11-14 20:57:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Backtesting with data from 2017-11-14 21:17:00 " "up to 2017-11-14 22:59:00 (0 days).",
|
"Backtesting with data from 2017-11-14 21:17:00 up to 2017-11-14 22:59:00 (0 days).",
|
||||||
"Parameter --enable-position-stacking detected ...",
|
"Parameter --enable-position-stacking detected ...",
|
||||||
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
||||||
"Running backtesting for Strategy StrategyTestV2",
|
"Running backtesting for Strategy StrategyTestV2",
|
||||||
|
@ -1975,8 +1973,8 @@ def test_backtest_start_nomock_futures(default_conf_usdt, mocker, caplog, testda
|
||||||
exists = [
|
exists = [
|
||||||
"Parameter -i/--timeframe detected ... Using timeframe: 1h ...",
|
"Parameter -i/--timeframe detected ... Using timeframe: 1h ...",
|
||||||
f"Using data directory: {testdatadir} ...",
|
f"Using data directory: {testdatadir} ...",
|
||||||
"Loading data from 2021-11-17 01:00:00 " "up to 2021-11-21 04:00:00 (4 days).",
|
"Loading data from 2021-11-17 01:00:00 up to 2021-11-21 04:00:00 (4 days).",
|
||||||
"Backtesting with data from 2021-11-17 21:00:00 " "up to 2021-11-21 04:00:00 (3 days).",
|
"Backtesting with data from 2021-11-17 21:00:00 up to 2021-11-21 04:00:00 (3 days).",
|
||||||
"XRP/USDT:USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00",
|
"XRP/USDT:USDT, funding_rate, 8h, data starts at 2021-11-18 00:00:00",
|
||||||
"XRP/USDT:USDT, mark, 8h, data starts at 2021-11-18 00:00:00",
|
"XRP/USDT:USDT, mark, 8h, data starts at 2021-11-18 00:00:00",
|
||||||
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
||||||
|
@ -2112,8 +2110,8 @@ def test_backtest_start_multi_strat_nomock_detail(
|
||||||
"Parameter -i/--timeframe detected ... Using timeframe: 5m ...",
|
"Parameter -i/--timeframe detected ... Using timeframe: 5m ...",
|
||||||
"Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...",
|
"Parameter --timeframe-detail detected, using 1m for intra-candle backtesting ...",
|
||||||
f"Using data directory: {testdatadir} ...",
|
f"Using data directory: {testdatadir} ...",
|
||||||
"Loading data from 2019-10-11 00:00:00 " "up to 2019-10-13 11:15:00 (2 days).",
|
"Loading data from 2019-10-11 00:00:00 up to 2019-10-13 11:15:00 (2 days).",
|
||||||
"Backtesting with data from 2019-10-11 01:40:00 " "up to 2019-10-13 11:15:00 (2 days).",
|
"Backtesting with data from 2019-10-11 01:40:00 up to 2019-10-13 11:15:00 (2 days).",
|
||||||
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
f"Running backtesting for Strategy {CURRENT_TEST_STRATEGY}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ def test_load_strategy_noname(default_conf):
|
||||||
default_conf["strategy"] = ""
|
default_conf["strategy"] = ""
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
OperationalException,
|
OperationalException,
|
||||||
match="No strategy set. Please use `--strategy` to specify " "the strategy class to use.",
|
match="No strategy set. Please use `--strategy` to specify the strategy class to use.",
|
||||||
):
|
):
|
||||||
StrategyResolver.load_strategy(default_conf)
|
StrategyResolver.load_strategy(default_conf)
|
||||||
|
|
||||||
|
|
|
@ -664,7 +664,7 @@ def test_validate_max_open_trades(default_conf):
|
||||||
default_conf["stake_amount"] = "unlimited"
|
default_conf["stake_amount"] = "unlimited"
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
OperationalException,
|
OperationalException,
|
||||||
match="`max_open_trades` and `stake_amount` " "cannot both be unlimited.",
|
match="`max_open_trades` and `stake_amount` cannot both be unlimited.",
|
||||||
):
|
):
|
||||||
validate_config_consistency(default_conf)
|
validate_config_consistency(default_conf)
|
||||||
|
|
||||||
|
@ -767,7 +767,7 @@ def test_validate_edge2(edge_conf):
|
||||||
)
|
)
|
||||||
with pytest.raises(
|
with pytest.raises(
|
||||||
OperationalException,
|
OperationalException,
|
||||||
match="Edge requires `use_exit_signal` to be True, " "otherwise no sells will happen.",
|
match="Edge requires `use_exit_signal` to be True, otherwise no sells will happen.",
|
||||||
):
|
):
|
||||||
validate_config_consistency(edge_conf)
|
validate_config_consistency(edge_conf)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user