ruff format: commands

This commit is contained in:
Matthias 2024-05-12 16:27:03 +02:00
parent 5eb4ad2208
commit 3c9be47236
14 changed files with 1064 additions and 812 deletions

View File

@ -6,6 +6,7 @@ Contains all start-commands, subcommands and CLI Interface creation.
Note: Be careful with file-scoped imports in these subfiles.
as they are parsed on startup, nothing containing optional modules should be loaded.
"""
from freqtrade.commands.analyze_commands import start_analysis_entries_exits
from freqtrade.commands.arguments import Arguments
from freqtrade.commands.build_config_commands import start_new_config, start_show_config

View File

@ -20,25 +20,25 @@ def setup_analyze_configuration(args: Dict[str, Any], method: RunMode) -> Dict[s
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.BACKTEST: "backtesting",
}
if method in no_unlimited_runmodes.keys():
from freqtrade.data.btanalysis import get_latest_backtest_filename
if 'exportfilename' in config:
if config['exportfilename'].is_dir():
btfile = Path(get_latest_backtest_filename(config['exportfilename']))
if "exportfilename" in config:
if config["exportfilename"].is_dir():
btfile = Path(get_latest_backtest_filename(config["exportfilename"]))
signals_file = f"{config['exportfilename']}/{btfile.stem}_signals.pkl"
else:
if config['exportfilename'].exists():
btfile = Path(config['exportfilename'])
if config["exportfilename"].exists():
btfile = Path(config["exportfilename"])
signals_file = f"{btfile.parent}/{btfile.stem}_signals.pkl"
else:
raise ConfigurationError(f"{config['exportfilename']} does not exist.")
else:
raise ConfigurationError('exportfilename not in config.')
raise ConfigurationError("exportfilename not in config.")
if (not Path(signals_file).exists()):
if not Path(signals_file).exists():
raise OperationalException(
f"Cannot find latest backtest signals file: {signals_file}."
"Run backtesting with `--export signals`."
@ -58,6 +58,6 @@ def start_analysis_entries_exits(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_analyze_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in analysis mode')
logger.info("Starting freqtrade in analysis mode")
process_entry_exit_reasons(config)

View File

@ -1,6 +1,7 @@
"""
This module contains the argument manager class
"""
import argparse
from functools import partial
from pathlib import Path
@ -12,35 +13,72 @@ from freqtrade.constants import DEFAULT_CONFIG
ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
ARGS_STRATEGY = ["strategy", "strategy_path", "recursive_strategy_search", "freqaimodel",
"freqaimodel_path"]
ARGS_STRATEGY = [
"strategy",
"strategy_path",
"recursive_strategy_search",
"freqaimodel",
"freqaimodel_path",
]
ARGS_TRADE = ["db_url", "sd_notify", "dry_run", "dry_run_wallet", "fee"]
ARGS_WEBSERVER: List[str] = []
ARGS_COMMON_OPTIMIZE = ["timeframe", "timerange", "dataformat_ohlcv",
"max_open_trades", "stake_amount", "fee", "pairs"]
ARGS_COMMON_OPTIMIZE = [
"timeframe",
"timerange",
"dataformat_ohlcv",
"max_open_trades",
"stake_amount",
"fee",
"pairs",
]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions",
"enable_protections", "dry_run_wallet", "timeframe_detail",
"strategy_list", "export", "exportfilename",
"backtest_breakdown", "backtest_cache",
"freqai_backtest_live_models"]
ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
"strategy_list",
"export",
"exportfilename",
"backtest_breakdown",
"backtest_cache",
"freqai_backtest_live_models",
]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
"position_stacking", "use_max_market_positions",
"enable_protections", "dry_run_wallet", "timeframe_detail",
"epochs", "spaces", "print_all",
"print_colorized", "print_json", "hyperopt_jobs",
"hyperopt_random_state", "hyperopt_min_trades",
"hyperopt_loss", "disableparamexport",
"hyperopt_ignore_missing_space", "analyze_per_epoch"]
ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [
"hyperopt",
"hyperopt_path",
"position_stacking",
"use_max_market_positions",
"enable_protections",
"dry_run_wallet",
"timeframe_detail",
"epochs",
"spaces",
"print_all",
"print_colorized",
"print_json",
"hyperopt_jobs",
"hyperopt_random_state",
"hyperopt_min_trades",
"hyperopt_loss",
"disableparamexport",
"hyperopt_ignore_missing_space",
"analyze_per_epoch",
]
ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
ARGS_LIST_STRATEGIES = ["strategy_path", "print_one_column", "print_colorized",
"recursive_strategy_search"]
ARGS_LIST_STRATEGIES = [
"strategy_path",
"print_one_column",
"print_colorized",
"recursive_strategy_search",
]
ARGS_LIST_FREQAIMODELS = ["freqaimodel_path", "print_one_column", "print_colorized"]
@ -52,12 +90,27 @@ ARGS_LIST_EXCHANGES = ["print_one_column", "list_exchanges_all"]
ARGS_LIST_TIMEFRAMES = ["exchange", "print_one_column"]
ARGS_LIST_PAIRS = ["exchange", "print_list", "list_pairs_print_json", "print_one_column",
"print_csv", "base_currencies", "quote_currencies", "list_pairs_all",
"trading_mode"]
ARGS_LIST_PAIRS = [
"exchange",
"print_list",
"list_pairs_print_json",
"print_one_column",
"print_csv",
"base_currencies",
"quote_currencies",
"list_pairs_all",
"trading_mode",
]
ARGS_TEST_PAIRLIST = ["user_data_dir", "verbosity", "config", "quote_currencies",
"print_one_column", "list_pairs_print_json", "exchange"]
ARGS_TEST_PAIRLIST = [
"user_data_dir",
"verbosity",
"config",
"quote_currencies",
"print_one_column",
"list_pairs_print_json",
"exchange",
]
ARGS_CREATE_USERDIR = ["user_data_dir", "reset"]
@ -70,22 +123,58 @@ ARGS_CONVERT_DATA_TRADES = ["pairs", "format_from_trades", "format_to", "erase",
ARGS_CONVERT_DATA = ["pairs", "format_from", "format_to", "erase", "exchange"]
ARGS_CONVERT_DATA_OHLCV = ARGS_CONVERT_DATA + ["timeframes", "trading_mode", "candle_types"]
ARGS_CONVERT_TRADES = ["pairs", "timeframes", "exchange", "dataformat_ohlcv", "dataformat_trades",
"trading_mode"]
ARGS_CONVERT_TRADES = [
"pairs",
"timeframes",
"exchange",
"dataformat_ohlcv",
"dataformat_trades",
"trading_mode",
]
ARGS_LIST_DATA = ["exchange", "dataformat_ohlcv", "pairs", "trading_mode", "show_timerange"]
ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "new_pairs_days", "include_inactive",
"timerange", "download_trades", "exchange", "timeframes",
"erase", "dataformat_ohlcv", "dataformat_trades", "trading_mode",
"prepend_data"]
ARGS_DOWNLOAD_DATA = [
"pairs",
"pairs_file",
"days",
"new_pairs_days",
"include_inactive",
"timerange",
"download_trades",
"exchange",
"timeframes",
"erase",
"dataformat_ohlcv",
"dataformat_trades",
"trading_mode",
"prepend_data",
]
ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit",
"db_url", "trade_source", "export", "exportfilename",
"timerange", "timeframe", "no_trades"]
ARGS_PLOT_DATAFRAME = [
"pairs",
"indicators1",
"indicators2",
"plot_limit",
"db_url",
"trade_source",
"export",
"exportfilename",
"timerange",
"timeframe",
"no_trades",
]
ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
"trade_source", "timeframe", "plot_auto_open", ]
ARGS_PLOT_PROFIT = [
"pairs",
"timerange",
"export",
"exportfilename",
"db_url",
"trade_source",
"timeframe",
"plot_auto_open",
]
ARGS_CONVERT_DB = ["db_url", "db_url_from"]
@ -93,36 +182,76 @@ ARGS_INSTALL_UI = ["erase_ui_only", "ui_version"]
ARGS_SHOW_TRADES = ["db_url", "trade_ids", "print_json"]
ARGS_HYPEROPT_LIST = ["hyperopt_list_best", "hyperopt_list_profitable",
"hyperopt_list_min_trades", "hyperopt_list_max_trades",
"hyperopt_list_min_avg_time", "hyperopt_list_max_avg_time",
"hyperopt_list_min_avg_profit", "hyperopt_list_max_avg_profit",
"hyperopt_list_min_total_profit", "hyperopt_list_max_total_profit",
"hyperopt_list_min_objective", "hyperopt_list_max_objective",
"print_colorized", "print_json", "hyperopt_list_no_details",
"hyperoptexportfilename", "export_csv"]
ARGS_HYPEROPT_LIST = [
"hyperopt_list_best",
"hyperopt_list_profitable",
"hyperopt_list_min_trades",
"hyperopt_list_max_trades",
"hyperopt_list_min_avg_time",
"hyperopt_list_max_avg_time",
"hyperopt_list_min_avg_profit",
"hyperopt_list_max_avg_profit",
"hyperopt_list_min_total_profit",
"hyperopt_list_max_total_profit",
"hyperopt_list_min_objective",
"hyperopt_list_max_objective",
"print_colorized",
"print_json",
"hyperopt_list_no_details",
"hyperoptexportfilename",
"export_csv",
]
ARGS_HYPEROPT_SHOW = ["hyperopt_list_best", "hyperopt_list_profitable", "hyperopt_show_index",
"print_json", "hyperoptexportfilename", "hyperopt_show_no_header",
"disableparamexport", "backtest_breakdown"]
ARGS_HYPEROPT_SHOW = [
"hyperopt_list_best",
"hyperopt_list_profitable",
"hyperopt_show_index",
"print_json",
"hyperoptexportfilename",
"hyperopt_show_no_header",
"disableparamexport",
"backtest_breakdown",
]
ARGS_ANALYZE_ENTRIES_EXITS = ["exportfilename", "analysis_groups", "enter_reason_list",
"exit_reason_list", "indicator_list", "timerange",
"analysis_rejected", "analysis_to_csv", "analysis_csv_path"]
ARGS_ANALYZE_ENTRIES_EXITS = [
"exportfilename",
"analysis_groups",
"enter_reason_list",
"exit_reason_list",
"indicator_list",
"timerange",
"analysis_rejected",
"analysis_to_csv",
"analysis_csv_path",
]
NO_CONF_REQURIED = ["convert-data", "convert-trade-data", "download-data", "list-timeframes",
"list-markets", "list-pairs", "list-strategies", "list-freqaimodels",
"list-data", "hyperopt-list", "hyperopt-show", "backtest-filter",
"plot-dataframe", "plot-profit", "show-trades", "trades-to-ohlcv",
"strategy-updater"]
NO_CONF_REQURIED = [
"convert-data",
"convert-trade-data",
"download-data",
"list-timeframes",
"list-markets",
"list-pairs",
"list-strategies",
"list-freqaimodels",
"list-data",
"hyperopt-list",
"hyperopt-show",
"backtest-filter",
"plot-dataframe",
"plot-profit",
"show-trades",
"trades-to-ohlcv",
"strategy-updater",
]
NO_CONF_ALLOWED = ["create-userdir", "list-exchanges", "new-strategy"]
ARGS_STRATEGY_UPDATER = ["strategy_list", "strategy_path", "recursive_strategy_search"]
ARGS_LOOKAHEAD_ANALYSIS = [
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", 'cache')
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
a for a in ARGS_BACKTEST if a not in ("position_stacking", "use_max_market_positions", "cache")
] + ["minimum_trade_amount", "targeted_trade_amount", "lookahead_analysis_exportfilename"]
ARGS_RECURSIVE_ANALYSIS = ["timeframe", "timerange", "dataformat_ohlcv", "pairs", "startup_candle"]
@ -156,14 +285,14 @@ class Arguments:
# Workaround issue in argparse with action='append' and default value
# (see https://bugs.python.org/issue16399)
# Allow no-config for certain commands (like downloading / plotting)
if ('config' in parsed_arg and parsed_arg.config is None):
conf_required = ('command' in parsed_arg and parsed_arg.command in NO_CONF_REQURIED)
if "config" in parsed_arg and parsed_arg.config is None:
conf_required = "command" in parsed_arg and parsed_arg.command in NO_CONF_REQURIED
if 'user_data_dir' in parsed_arg and parsed_arg.user_data_dir is not None:
if "user_data_dir" in parsed_arg and parsed_arg.user_data_dir is not None:
user_dir = parsed_arg.user_data_dir
else:
# Default case
user_dir = 'user_data'
user_dir = "user_data"
# Try loading from "user_data/config.json"
cfgfile = Path(user_dir) / DEFAULT_CONFIG
if cfgfile.is_file():
@ -177,7 +306,6 @@ class Arguments:
return parsed_arg
def _build_args(self, optionlist, parser):
for val in optionlist:
opt = AVAILABLE_CLI_OPTIONS[val]
parser.add_argument(*opt.cli, dest=val, **opt.kwargs)
@ -198,10 +326,9 @@ class Arguments:
# Build main command
self.parser = argparse.ArgumentParser(
prog="freqtrade",
description='Free, open source crypto trading bot'
prog="freqtrade", description="Free, open source crypto trading bot"
)
self._build_args(optionlist=['version'], parser=self.parser)
self._build_args(optionlist=["version"], parser=self.parser)
from freqtrade.commands import (
start_analysis_entries_exits,
@ -237,24 +364,23 @@ class Arguments:
start_webserver,
)
subparsers = self.parser.add_subparsers(dest='command',
# Use custom message when no subhandler is added
# shown from `main.py`
# required=True
)
subparsers = self.parser.add_subparsers(
dest="command",
# Use custom message when no subhandler is added
# shown from `main.py`
# required=True
)
# Add trade subcommand
trade_cmd = subparsers.add_parser(
'trade',
help='Trade module.',
parents=[_common_parser, _strategy_parser]
"trade", help="Trade module.", parents=[_common_parser, _strategy_parser]
)
trade_cmd.set_defaults(func=start_trading)
self._build_args(optionlist=ARGS_TRADE, parser=trade_cmd)
# add create-userdir subcommand
create_userdir_cmd = subparsers.add_parser(
'create-userdir',
"create-userdir",
help="Create user-data directory.",
)
create_userdir_cmd.set_defaults(func=start_create_userdir)
@ -262,7 +388,7 @@ class Arguments:
# add new-config subcommand
build_config_cmd = subparsers.add_parser(
'new-config',
"new-config",
help="Create new config",
)
build_config_cmd.set_defaults(func=start_new_config)
@ -270,7 +396,7 @@ class Arguments:
# add show-config subcommand
show_config_cmd = subparsers.add_parser(
'show-config',
"show-config",
help="Show resolved config",
)
show_config_cmd.set_defaults(func=start_show_config)
@ -278,7 +404,7 @@ class Arguments:
# add new-strategy subcommand
build_strategy_cmd = subparsers.add_parser(
'new-strategy',
"new-strategy",
help="Create new strategy",
)
build_strategy_cmd.set_defaults(func=start_new_strategy)
@ -286,8 +412,8 @@ class Arguments:
# Add download-data subcommand
download_data_cmd = subparsers.add_parser(
'download-data',
help='Download backtesting data.',
"download-data",
help="Download backtesting data.",
parents=[_common_parser],
)
download_data_cmd.set_defaults(func=start_download_data)
@ -295,8 +421,8 @@ class Arguments:
# Add convert-data subcommand
convert_data_cmd = subparsers.add_parser(
'convert-data',
help='Convert candle (OHLCV) data from one format to another.',
"convert-data",
help="Convert candle (OHLCV) data from one format to another.",
parents=[_common_parser],
)
convert_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=True))
@ -304,8 +430,8 @@ class Arguments:
# Add convert-trade-data subcommand
convert_trade_data_cmd = subparsers.add_parser(
'convert-trade-data',
help='Convert trade data from one format to another.',
"convert-trade-data",
help="Convert trade data from one format to another.",
parents=[_common_parser],
)
convert_trade_data_cmd.set_defaults(func=partial(start_convert_data, ohlcv=False))
@ -313,8 +439,8 @@ class Arguments:
# Add trades-to-ohlcv subcommand
convert_trade_data_cmd = subparsers.add_parser(
'trades-to-ohlcv',
help='Convert trade data to OHLCV data.',
"trades-to-ohlcv",
help="Convert trade data to OHLCV data.",
parents=[_common_parser],
)
convert_trade_data_cmd.set_defaults(func=start_convert_trades)
@ -322,8 +448,8 @@ class Arguments:
# Add list-data subcommand
list_data_cmd = subparsers.add_parser(
'list-data',
help='List downloaded data.',
"list-data",
help="List downloaded data.",
parents=[_common_parser],
)
list_data_cmd.set_defaults(func=start_list_data)
@ -331,17 +457,15 @@ class Arguments:
# Add backtesting subcommand
backtesting_cmd = subparsers.add_parser(
'backtesting',
help='Backtesting module.',
parents=[_common_parser, _strategy_parser]
"backtesting", help="Backtesting module.", parents=[_common_parser, _strategy_parser]
)
backtesting_cmd.set_defaults(func=start_backtesting)
self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)
# Add backtesting-show subcommand
backtesting_show_cmd = subparsers.add_parser(
'backtesting-show',
help='Show past Backtest results',
"backtesting-show",
help="Show past Backtest results",
parents=[_common_parser],
)
backtesting_show_cmd.set_defaults(func=start_backtesting_show)
@ -349,26 +473,22 @@ class Arguments:
# Add backtesting analysis subcommand
analysis_cmd = subparsers.add_parser(
'backtesting-analysis',
help='Backtest Analysis module.',
parents=[_common_parser]
"backtesting-analysis", help="Backtest Analysis module.", parents=[_common_parser]
)
analysis_cmd.set_defaults(func=start_analysis_entries_exits)
self._build_args(optionlist=ARGS_ANALYZE_ENTRIES_EXITS, parser=analysis_cmd)
# Add edge subcommand
edge_cmd = subparsers.add_parser(
'edge',
help='Edge module.',
parents=[_common_parser, _strategy_parser]
"edge", help="Edge module.", parents=[_common_parser, _strategy_parser]
)
edge_cmd.set_defaults(func=start_edge)
self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)
# Add hyperopt subcommand
hyperopt_cmd = subparsers.add_parser(
'hyperopt',
help='Hyperopt module.',
"hyperopt",
help="Hyperopt module.",
parents=[_common_parser, _strategy_parser],
)
hyperopt_cmd.set_defaults(func=start_hyperopt)
@ -376,8 +496,8 @@ class Arguments:
# Add hyperopt-list subcommand
hyperopt_list_cmd = subparsers.add_parser(
'hyperopt-list',
help='List Hyperopt results',
"hyperopt-list",
help="List Hyperopt results",
parents=[_common_parser],
)
hyperopt_list_cmd.set_defaults(func=start_hyperopt_list)
@ -385,8 +505,8 @@ class Arguments:
# Add hyperopt-show subcommand
hyperopt_show_cmd = subparsers.add_parser(
'hyperopt-show',
help='Show details of Hyperopt results',
"hyperopt-show",
help="Show details of Hyperopt results",
parents=[_common_parser],
)
hyperopt_show_cmd.set_defaults(func=start_hyperopt_show)
@ -394,8 +514,8 @@ class Arguments:
# Add list-exchanges subcommand
list_exchanges_cmd = subparsers.add_parser(
'list-exchanges',
help='Print available exchanges.',
"list-exchanges",
help="Print available exchanges.",
parents=[_common_parser],
)
list_exchanges_cmd.set_defaults(func=start_list_exchanges)
@ -403,8 +523,8 @@ class Arguments:
# Add list-markets subcommand
list_markets_cmd = subparsers.add_parser(
'list-markets',
help='Print markets on exchange.',
"list-markets",
help="Print markets on exchange.",
parents=[_common_parser],
)
list_markets_cmd.set_defaults(func=partial(start_list_markets, pairs_only=False))
@ -412,8 +532,8 @@ class Arguments:
# Add list-pairs subcommand
list_pairs_cmd = subparsers.add_parser(
'list-pairs',
help='Print pairs on exchange.',
"list-pairs",
help="Print pairs on exchange.",
parents=[_common_parser],
)
list_pairs_cmd.set_defaults(func=partial(start_list_markets, pairs_only=True))
@ -421,8 +541,8 @@ class Arguments:
# Add list-strategies subcommand
list_strategies_cmd = subparsers.add_parser(
'list-strategies',
help='Print available strategies.',
"list-strategies",
help="Print available strategies.",
parents=[_common_parser],
)
list_strategies_cmd.set_defaults(func=start_list_strategies)
@ -430,8 +550,8 @@ class Arguments:
# Add list-freqAI Models subcommand
list_freqaimodels_cmd = subparsers.add_parser(
'list-freqaimodels',
help='Print available freqAI models.',
"list-freqaimodels",
help="Print available freqAI models.",
parents=[_common_parser],
)
list_freqaimodels_cmd.set_defaults(func=start_list_freqAI_models)
@ -439,8 +559,8 @@ class Arguments:
# Add list-timeframes subcommand
list_timeframes_cmd = subparsers.add_parser(
'list-timeframes',
help='Print available timeframes for the exchange.',
"list-timeframes",
help="Print available timeframes for the exchange.",
parents=[_common_parser],
)
list_timeframes_cmd.set_defaults(func=start_list_timeframes)
@ -448,8 +568,8 @@ class Arguments:
# Add show-trades subcommand
show_trades = subparsers.add_parser(
'show-trades',
help='Show trades.',
"show-trades",
help="Show trades.",
parents=[_common_parser],
)
show_trades.set_defaults(func=start_show_trades)
@ -457,8 +577,8 @@ class Arguments:
# Add test-pairlist subcommand
test_pairlist_cmd = subparsers.add_parser(
'test-pairlist',
help='Test your pairlist configuration.',
"test-pairlist",
help="Test your pairlist configuration.",
)
test_pairlist_cmd.set_defaults(func=start_test_pairlist)
self._build_args(optionlist=ARGS_TEST_PAIRLIST, parser=test_pairlist_cmd)
@ -473,16 +593,16 @@ class Arguments:
# Add install-ui subcommand
install_ui_cmd = subparsers.add_parser(
'install-ui',
help='Install FreqUI',
"install-ui",
help="Install FreqUI",
)
install_ui_cmd.set_defaults(func=start_install_ui)
self._build_args(optionlist=ARGS_INSTALL_UI, parser=install_ui_cmd)
# Add Plotting subcommand
plot_dataframe_cmd = subparsers.add_parser(
'plot-dataframe',
help='Plot candles with indicators.',
"plot-dataframe",
help="Plot candles with indicators.",
parents=[_common_parser, _strategy_parser],
)
plot_dataframe_cmd.set_defaults(func=start_plot_dataframe)
@ -490,8 +610,8 @@ class Arguments:
# Plot profit
plot_profit_cmd = subparsers.add_parser(
'plot-profit',
help='Generate plot showing profits.',
"plot-profit",
help="Generate plot showing profits.",
parents=[_common_parser, _strategy_parser],
)
plot_profit_cmd.set_defaults(func=start_plot_profit)
@ -499,40 +619,36 @@ class Arguments:
# Add webserver subcommand
webserver_cmd = subparsers.add_parser(
'webserver',
help='Webserver module.',
parents=[_common_parser]
"webserver", help="Webserver module.", parents=[_common_parser]
)
webserver_cmd.set_defaults(func=start_webserver)
self._build_args(optionlist=ARGS_WEBSERVER, parser=webserver_cmd)
# Add strategy_updater subcommand
strategy_updater_cmd = subparsers.add_parser(
'strategy-updater',
help='updates outdated strategy files to the current version',
parents=[_common_parser]
"strategy-updater",
help="updates outdated strategy files to the current version",
parents=[_common_parser],
)
strategy_updater_cmd.set_defaults(func=start_strategy_update)
self._build_args(optionlist=ARGS_STRATEGY_UPDATER, parser=strategy_updater_cmd)
# Add lookahead_analysis subcommand
lookahead_analayis_cmd = subparsers.add_parser(
'lookahead-analysis',
"lookahead-analysis",
help="Check for potential look ahead bias.",
parents=[_common_parser, _strategy_parser]
parents=[_common_parser, _strategy_parser],
)
lookahead_analayis_cmd.set_defaults(func=start_lookahead_analysis)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS,
parser=lookahead_analayis_cmd)
self._build_args(optionlist=ARGS_LOOKAHEAD_ANALYSIS, parser=lookahead_analayis_cmd)
# Add recursive_analysis subcommand
recursive_analayis_cmd = subparsers.add_parser(
'recursive-analysis',
"recursive-analysis",
help="Check for potential recursive formula issue.",
parents=[_common_parser, _strategy_parser]
parents=[_common_parser, _strategy_parser],
)
recursive_analayis_cmd.set_defaults(func=start_recursive_analysis)
self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS,
parser=recursive_analayis_cmd)
self._build_args(optionlist=ARGS_RECURSIVE_ANALYSIS, parser=recursive_analayis_cmd)

View File

@ -45,7 +45,7 @@ def ask_user_overwrite(config_path: Path) -> bool:
},
]
answers = prompt(questions)
return answers['overwrite']
return answers["overwrite"]
def ask_user_config() -> Dict[str, Any]:
@ -65,7 +65,7 @@ def ask_user_config() -> Dict[str, Any]:
"type": "text",
"name": "stake_currency",
"message": "Please insert your stake currency:",
"default": 'USDT',
"default": "USDT",
},
{
"type": "text",
@ -75,34 +75,33 @@ def ask_user_config() -> Dict[str, Any]:
"validate": lambda val: val == UNLIMITED_STAKE_AMOUNT or validate_is_float(val),
"filter": lambda val: '"' + UNLIMITED_STAKE_AMOUNT + '"'
if val == UNLIMITED_STAKE_AMOUNT
else val
else val,
},
{
"type": "text",
"name": "max_open_trades",
"message": "Please insert max_open_trades (Integer or -1 for unlimited open trades):",
"default": "3",
"validate": lambda val: validate_is_int(val)
"validate": lambda val: validate_is_int(val),
},
{
"type": "select",
"name": "timeframe_in_config",
"message": "Time",
"choices": ["Have the strategy define timeframe.", "Override in configuration."]
"choices": ["Have the strategy define timeframe.", "Override in configuration."],
},
{
"type": "text",
"name": "timeframe",
"message": "Please insert your desired timeframe (e.g. 5m):",
"default": "5m",
"when": lambda x: x["timeframe_in_config"] == 'Override in configuration.'
"when": lambda x: x["timeframe_in_config"] == "Override in configuration.",
},
{
"type": "text",
"name": "fiat_display_currency",
"message": "Please insert your display Currency (for reporting):",
"default": 'USD',
"default": "USD",
},
{
"type": "select",
@ -125,33 +124,33 @@ def ask_user_config() -> Dict[str, Any]:
"name": "trading_mode",
"message": "Do you want to trade Perpetual Swaps (perpetual futures)?",
"default": False,
"filter": lambda val: 'futures' if val else 'spot',
"when": lambda x: x["exchange_name"] in ['binance', 'gate', 'okx'],
"filter": lambda val: "futures" if val else "spot",
"when": lambda x: x["exchange_name"] in ["binance", "gate", "okx"],
},
{
"type": "autocomplete",
"name": "exchange_name",
"message": "Type your exchange name (Must be supported by ccxt)",
"choices": available_exchanges(),
"when": lambda x: x["exchange_name"] == 'other'
"when": lambda x: x["exchange_name"] == "other",
},
{
"type": "password",
"name": "exchange_key",
"message": "Insert Exchange Key",
"when": lambda x: not x['dry_run']
"when": lambda x: not x["dry_run"],
},
{
"type": "password",
"name": "exchange_secret",
"message": "Insert Exchange Secret",
"when": lambda x: not x['dry_run']
"when": lambda x: not x["dry_run"],
},
{
"type": "password",
"name": "exchange_key_password",
"message": "Insert Exchange API Key password",
"when": lambda x: not x['dry_run'] and x['exchange_name'] in ('kucoin', 'okx')
"when": lambda x: not x["dry_run"] and x["exchange_name"] in ("kucoin", "okx"),
},
{
"type": "confirm",
@ -163,13 +162,13 @@ def ask_user_config() -> Dict[str, Any]:
"type": "password",
"name": "telegram_token",
"message": "Insert Telegram token",
"when": lambda x: x['telegram']
"when": lambda x: x["telegram"],
},
{
"type": "password",
"name": "telegram_chat_id",
"message": "Insert Telegram chat id",
"when": lambda x: x['telegram']
"when": lambda x: x["telegram"],
},
{
"type": "confirm",
@ -180,23 +179,25 @@ def ask_user_config() -> Dict[str, Any]:
{
"type": "text",
"name": "api_server_listen_addr",
"message": ("Insert Api server Listen Address (0.0.0.0 for docker, "
"otherwise best left untouched)"),
"message": (
"Insert Api server Listen Address (0.0.0.0 for docker, "
"otherwise best left untouched)"
),
"default": "127.0.0.1" if not running_in_docker() else "0.0.0.0",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
{
"type": "text",
"name": "api_server_username",
"message": "Insert api-server username",
"default": "freqtrader",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
{
"type": "password",
"name": "api_server_password",
"message": "Insert api-server password",
"when": lambda x: x['api_server']
"when": lambda x: x["api_server"],
},
]
answers = prompt(questions)
@ -205,15 +206,11 @@ def ask_user_config() -> Dict[str, Any]:
# Interrupted questionary sessions return an empty dict.
raise OperationalException("User interrupted interactive questions.")
# Ensure default is set for non-futures exchanges
answers['trading_mode'] = answers.get('trading_mode', "spot")
answers['margin_mode'] = (
'isolated'
if answers.get('trading_mode') == 'futures'
else ''
)
answers["trading_mode"] = answers.get("trading_mode", "spot")
answers["margin_mode"] = "isolated" if answers.get("trading_mode") == "futures" else ""
# Force JWT token to be a random string
answers['api_server_jwt_key'] = secrets.token_hex()
answers['api_server_ws_token'] = secrets.token_urlsafe(25)
answers["api_server_jwt_key"] = secrets.token_hex()
answers["api_server_ws_token"] = secrets.token_urlsafe(25)
return answers
@ -225,26 +222,26 @@ def deploy_new_config(config_path: Path, selections: Dict[str, Any]) -> None:
:param selections: Dict containing selections taken by the user.
"""
from jinja2.exceptions import TemplateNotFound
try:
exchange_template = MAP_EXCHANGE_CHILDCLASS.get(
selections['exchange_name'], selections['exchange_name'])
selections["exchange_name"], selections["exchange_name"]
)
selections['exchange'] = render_template(
templatefile=f"subtemplates/exchange_{exchange_template}.j2",
arguments=selections
selections["exchange"] = render_template(
templatefile=f"subtemplates/exchange_{exchange_template}.j2", arguments=selections
)
except TemplateNotFound:
selections['exchange'] = render_template(
templatefile="subtemplates/exchange_generic.j2",
arguments=selections
selections["exchange"] = render_template(
templatefile="subtemplates/exchange_generic.j2", arguments=selections
)
config_text = render_template(templatefile='base_config.json.j2',
arguments=selections)
config_text = render_template(templatefile="base_config.json.j2", arguments=selections)
logger.info(f"Writing config to `{config_path}`.")
logger.info(
"Please make sure to check the configuration contents and adjust settings to your needs.")
"Please make sure to check the configuration contents and adjust settings to your needs."
)
config_path.write_text(config_text)
@ -255,7 +252,7 @@ def start_new_config(args: Dict[str, Any]) -> None:
Asking the user questions to fill out the template accordingly.
"""
config_path = Path(args['config'][0])
config_path = Path(args["config"][0])
chown_user_directory(config_path.parent)
if config_path.exists():
overwrite = ask_user_overwrite(config_path)
@ -264,22 +261,22 @@ def start_new_config(args: Dict[str, Any]) -> None:
else:
raise OperationalException(
f"Configuration file `{config_path}` already exists. "
"Please delete it or use a different configuration file name.")
"Please delete it or use a different configuration file name."
)
selections = ask_user_config()
deploy_new_config(config_path, selections)
def start_show_config(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE, set_dry=False)
# TODO: Sanitize from sensitive info before printing
print("Your combined configuration is:")
config_sanitized = sanitize_config(
config['original_config'],
show_sensitive=args.get('show_sensitive', False)
config["original_config"], show_sensitive=args.get("show_sensitive", False)
)
from rich import print_json
print_json(data=config_sanitized)

File diff suppressed because it is too large Load Diff

View File

@ -23,14 +23,17 @@ logger = logging.getLogger(__name__)
def _check_data_config_download_sanity(config: Config) -> None:
if 'days' in config and 'timerange' in config:
raise ConfigurationError("--days and --timerange are mutually exclusive. "
"You can only specify one or the other.")
if "days" in config and "timerange" in config:
raise ConfigurationError(
"--days and --timerange are mutually exclusive. "
"You can only specify one or the other."
)
if 'pairs' not in config:
if "pairs" not in config:
raise ConfigurationError(
"Downloading data requires a list of pairs. "
"Please check the documentation on how to configure this.")
"Please check the documentation on how to configure this."
)
def start_download_data(args: Dict[str, Any]) -> None:
@ -49,38 +52,41 @@ def start_download_data(args: Dict[str, Any]) -> None:
def start_convert_trades(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
timerange = TimeRange()
# Remove stake-currency to skip checks which are not relevant for datadownload
config['stake_currency'] = ''
config["stake_currency"] = ""
if 'timeframes' not in config:
config['timeframes'] = DL_DATA_TIMEFRAMES
if "timeframes" not in config:
config["timeframes"] = DL_DATA_TIMEFRAMES
# Init exchange
exchange = ExchangeResolver.load_exchange(config, validate=False)
# Manual validations of relevant settings
for timeframe in config['timeframes']:
for timeframe in config["timeframes"]:
exchange.validate_timeframes(timeframe)
available_pairs = [
p for p in exchange.get_markets(
tradable_only=True, active_only=not config.get('include_inactive')
).keys()
p
for p in exchange.get_markets(
tradable_only=True, active_only=not config.get("include_inactive")
).keys()
]
expanded_pairs = dynamic_expand_pairlist(config, available_pairs)
# Convert downloaded trade data to different timeframes
convert_trades_to_ohlcv(
pairs=expanded_pairs, timeframes=config['timeframes'],
datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),
data_format_ohlcv=config['dataformat_ohlcv'],
data_format_trades=config['dataformat_trades'],
candle_type=config.get('candle_type_def', CandleType.SPOT)
pairs=expanded_pairs,
timeframes=config["timeframes"],
datadir=config["datadir"],
timerange=timerange,
erase=bool(config.get("erase")),
data_format_ohlcv=config["dataformat_ohlcv"],
data_format_trades=config["dataformat_trades"],
candle_type=config.get("candle_type_def", CandleType.SPOT),
)
@ -91,14 +97,19 @@ def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if ohlcv:
migrate_data(config)
convert_ohlcv_format(config,
convert_from=args['format_from'],
convert_to=args['format_to'],
erase=args['erase'])
convert_ohlcv_format(
config,
convert_from=args["format_from"],
convert_to=args["format_to"],
erase=args["erase"],
)
else:
convert_trades_format(config,
convert_from=args['format_from_trades'], convert_to=args['format_to'],
erase=args['erase'])
convert_trades_format(
config,
convert_from=args["format_from_trades"],
convert_to=args["format_to"],
erase=args["erase"],
)
def start_list_data(args: Dict[str, Any]) -> None:
@ -111,45 +122,59 @@ def start_list_data(args: Dict[str, Any]) -> None:
from tabulate import tabulate
from freqtrade.data.history import get_datahandler
dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])
dhc = get_datahandler(config["datadir"], config["dataformat_ohlcv"])
paircombs = dhc.ohlcv_get_available_data(
config['datadir'],
config.get('trading_mode', TradingMode.SPOT)
)
config["datadir"], config.get("trading_mode", TradingMode.SPOT)
)
if args['pairs']:
paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]
if args["pairs"]:
paircombs = [comb for comb in paircombs if comb[0] in args["pairs"]]
print(f"Found {len(paircombs)} pair / timeframe combinations.")
if not config.get('show_timerange'):
if not config.get("show_timerange"):
groupedpair = defaultdict(list)
for pair, timeframe, candle_type in sorted(
paircombs,
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
paircombs, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
):
groupedpair[(pair, candle_type)].append(timeframe)
if groupedpair:
print(tabulate([
(pair, ', '.join(timeframes), candle_type)
for (pair, candle_type), timeframes in groupedpair.items()
],
headers=("Pair", "Timeframe", "Type"),
tablefmt='psql', stralign='right'))
print(
tabulate(
[
(pair, ", ".join(timeframes), candle_type)
for (pair, candle_type), timeframes in groupedpair.items()
],
headers=("Pair", "Timeframe", "Type"),
tablefmt="psql",
stralign="right",
)
)
else:
paircombs1 = [(
pair, timeframe, candle_type,
*dhc.ohlcv_data_min_max(pair, timeframe, candle_type)
) for pair, timeframe, candle_type in paircombs]
paircombs1 = [
(pair, timeframe, candle_type, *dhc.ohlcv_data_min_max(pair, timeframe, candle_type))
for pair, timeframe, candle_type in paircombs
]
print(tabulate([
(pair, timeframe, candle_type,
start.strftime(DATETIME_PRINT_FORMAT),
end.strftime(DATETIME_PRINT_FORMAT), length)
for pair, timeframe, candle_type, start, end, length in sorted(
paircombs1,
key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]))
],
headers=("Pair", "Timeframe", "Type", 'From', 'To', 'Candles'),
tablefmt='psql', stralign='right'))
print(
tabulate(
[
(
pair,
timeframe,
candle_type,
start.strftime(DATETIME_PRINT_FORMAT),
end.strftime(DATETIME_PRINT_FORMAT),
length,
)
for pair, timeframe, candle_type, start, end, length in sorted(
paircombs1, key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])
)
],
headers=("Pair", "Timeframe", "Type", "From", "To", "Candles"),
tablefmt="psql",
stralign="right",
)
)

View File

@ -19,9 +19,9 @@ def start_convert_db(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
init_db(config['db_url'])
init_db(config["db_url"])
session_target = Trade.session
init_db(config['db_url_from'])
init_db(config["db_url_from"])
logger.info("Starting db migration.")
trade_count = 0
@ -47,9 +47,11 @@ def start_convert_db(args: Dict[str, Any]) -> None:
max_order_id = session_target.scalar(select(func.max(Order.id)))
max_pairlock_id = session_target.scalar(select(func.max(PairLock.id)))
set_sequence_ids(session_target.get_bind(),
trade_id=max_trade_id,
order_id=max_order_id,
pairlock_id=max_pairlock_id)
set_sequence_ids(
session_target.get_bind(),
trade_id=max_trade_id,
order_id=max_order_id,
pairlock_id=max_pairlock_id,
)
logger.info(f"Migrated {trade_count} Trades, and {pairlock_count} Pairlocks.")

View File

@ -38,7 +38,7 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st
"""
Deploy new strategy from template to strategy_path
"""
fallback = 'full'
fallback = "full"
attributes = render_template_with_fallback(
templatefile=f"strategy_subtemplates/strategy_attributes_{subtemplate}.j2",
templatefallbackfile=f"strategy_subtemplates/strategy_attributes_{fallback}.j2",
@ -64,33 +64,35 @@ def deploy_new_strategy(strategy_name: str, strategy_path: Path, subtemplate: st
templatefallbackfile="strategy_subtemplates/strategy_methods_empty.j2",
)
strategy_text = render_template(templatefile='base_strategy.py.j2',
arguments={"strategy": strategy_name,
"attributes": attributes,
"indicators": indicators,
"buy_trend": buy_trend,
"sell_trend": sell_trend,
"plot_config": plot_config,
"additional_methods": additional_methods,
})
strategy_text = render_template(
templatefile="base_strategy.py.j2",
arguments={
"strategy": strategy_name,
"attributes": attributes,
"indicators": indicators,
"buy_trend": buy_trend,
"sell_trend": sell_trend,
"plot_config": plot_config,
"additional_methods": additional_methods,
},
)
logger.info(f"Writing strategy to `{strategy_path}`.")
strategy_path.write_text(strategy_text)
def start_new_strategy(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if "strategy" in args and args["strategy"]:
new_path = config['user_data_dir'] / USERPATH_STRATEGIES / (args['strategy'] + '.py')
new_path = config["user_data_dir"] / USERPATH_STRATEGIES / (args["strategy"] + ".py")
if new_path.exists():
raise OperationalException(f"`{new_path}` already exists. "
"Please choose another Strategy Name.")
raise OperationalException(
f"`{new_path}` already exists. " "Please choose another Strategy Name."
)
deploy_new_strategy(args['strategy'], new_path, args['template'])
deploy_new_strategy(args["strategy"], new_path, args["template"])
else:
raise ConfigurationError("`new-strategy` requires --strategy to be set.")
@ -100,8 +102,8 @@ def clean_ui_subdir(directory: Path):
if directory.is_dir():
logger.info("Removing UI directory content.")
for p in reversed(list(directory.glob('**/*'))): # iterate contents from leaves to root
if p.name in ('.gitkeep', 'fallback_file.html'):
for p in reversed(list(directory.glob("**/*"))): # iterate contents from leaves to root
if p.name in (".gitkeep", "fallback_file.html"):
continue
if p.is_file():
p.unlink()
@ -110,11 +112,11 @@ def clean_ui_subdir(directory: Path):
def read_ui_version(dest_folder: Path) -> Optional[str]:
file = dest_folder / '.uiversion'
file = dest_folder / ".uiversion"
if not file.is_file():
return None
with file.open('r') as f:
with file.open("r") as f:
return f.read()
@ -133,12 +135,12 @@ def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):
destfile.mkdir(exist_ok=True)
else:
destfile.write_bytes(x.read())
with (dest_folder / '.uiversion').open('w') as f:
with (dest_folder / ".uiversion").open("w") as f:
f.write(version)
def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]:
base_url = 'https://api.github.com/repos/freqtrade/frequi/'
base_url = "https://api.github.com/repos/freqtrade/frequi/"
# Get base UI Repo path
resp = requests.get(f"{base_url}releases", timeout=req_timeout)
@ -146,42 +148,41 @@ def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]:
r = resp.json()
if version:
tmp = [x for x in r if x['name'] == version]
tmp = [x for x in r if x["name"] == version]
if tmp:
latest_version = tmp[0]['name']
assets = tmp[0].get('assets', [])
latest_version = tmp[0]["name"]
assets = tmp[0].get("assets", [])
else:
raise ValueError("UI-Version not found.")
else:
latest_version = r[0]['name']
assets = r[0].get('assets', [])
dl_url = ''
latest_version = r[0]["name"]
assets = r[0].get("assets", [])
dl_url = ""
if assets and len(assets) > 0:
dl_url = assets[0]['browser_download_url']
dl_url = assets[0]["browser_download_url"]
# URL not found - try assets url
if not dl_url:
assets = r[0]['assets_url']
assets = r[0]["assets_url"]
resp = requests.get(assets, timeout=req_timeout)
r = resp.json()
dl_url = r[0]['browser_download_url']
dl_url = r[0]["browser_download_url"]
return dl_url, latest_version
def start_install_ui(args: Dict[str, Any]) -> None:
dest_folder = Path(__file__).parents[1] / 'rpc/api_server/ui/installed/'
dest_folder = Path(__file__).parents[1] / "rpc/api_server/ui/installed/"
# First make sure the assets are removed.
dl_url, latest_version = get_ui_download_url(args.get('ui_version'))
dl_url, latest_version = get_ui_download_url(args.get("ui_version"))
curr_version = read_ui_version(dest_folder)
if curr_version == latest_version and not args.get('erase_ui_only'):
if curr_version == latest_version and not args.get("erase_ui_only"):
logger.info(f"UI already up-to-date, FreqUI Version {curr_version}.")
return
clean_ui_subdir(dest_folder)
if args.get('erase_ui_only'):
if args.get("erase_ui_only"):
logger.info("Erased UI directory content. Not downloading new version.")
else:
# Download a new version

View File

@ -22,15 +22,15 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_colorized = config.get('print_colorized', False)
print_json = config.get('print_json', False)
export_csv = config.get('export_csv')
no_details = config.get('hyperopt_list_no_details', False)
print_colorized = config.get("print_colorized", False)
print_json = config.get("print_json", False)
export_csv = config.get("export_csv")
no_details = config.get("hyperopt_list_no_details", False)
no_header = False
results_file = get_latest_hyperopt_file(
config['user_data_dir'] / 'hyperopt_results',
config.get('hyperoptexportfilename'))
config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename")
)
# Previous evaluations
epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)
@ -40,21 +40,26 @@ def start_hyperopt_list(args: Dict[str, Any]) -> None:
if not export_csv:
try:
print(HyperoptTools.get_result_table(config, epochs, total_epochs,
not config.get('hyperopt_list_best', False),
print_colorized, 0))
print(
HyperoptTools.get_result_table(
config,
epochs,
total_epochs,
not config.get("hyperopt_list_best", False),
print_colorized,
0,
)
)
except KeyboardInterrupt:
print('User interrupted..')
print("User interrupted..")
if epochs and not no_details:
sorted_epochs = sorted(epochs, key=itemgetter('loss'))
sorted_epochs = sorted(epochs, key=itemgetter("loss"))
results = sorted_epochs[0]
HyperoptTools.show_epoch_details(results, total_epochs, print_json, no_header)
if epochs and export_csv:
HyperoptTools.export_csv_file(
config, epochs, export_csv
)
HyperoptTools.export_csv_file(config, epochs, export_csv)
def start_hyperopt_show(args: Dict[str, Any]) -> None:
@ -65,13 +70,13 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
print_json = config.get('print_json', False)
no_header = config.get('hyperopt_show_no_header', False)
print_json = config.get("print_json", False)
no_header = config.get("hyperopt_show_no_header", False)
results_file = get_latest_hyperopt_file(
config['user_data_dir'] / 'hyperopt_results',
config.get('hyperoptexportfilename'))
config["user_data_dir"] / "hyperopt_results", config.get("hyperoptexportfilename")
)
n = config.get('hyperopt_show_index', -1)
n = config.get("hyperopt_show_index", -1)
# Previous evaluations
epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)
@ -80,10 +85,12 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
if n > filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be less than {filtered_epochs + 1}.")
f"The index of the epoch to show should be less than {filtered_epochs + 1}."
)
if n < -filtered_epochs:
raise OperationalException(
f"The index of the epoch to show should be greater than {-filtered_epochs - 1}.")
f"The index of the epoch to show should be greater than {-filtered_epochs - 1}."
)
# Translate epoch index from human-readable format to pythonic
if n > 0:
@ -92,13 +99,18 @@ def start_hyperopt_show(args: Dict[str, Any]) -> None:
if epochs:
val = epochs[n]
metrics = val['results_metrics']
if 'strategy_name' in metrics:
strategy_name = metrics['strategy_name']
show_backtest_result(strategy_name, metrics,
metrics['stake_currency'], config.get('backtest_breakdown', []))
metrics = val["results_metrics"]
if "strategy_name" in metrics:
strategy_name = metrics["strategy_name"]
show_backtest_result(
strategy_name,
metrics,
metrics["stake_currency"],
config.get("backtest_breakdown", []),
)
HyperoptTools.try_export_params(config, strategy_name, val)
HyperoptTools.show_epoch_details(val, total_epochs, print_json, no_header,
header_str="Epoch details")
HyperoptTools.show_epoch_details(
val, total_epochs, print_json, no_header, header_str="Epoch details"
)

View File

@ -26,42 +26,47 @@ def start_list_exchanges(args: Dict[str, Any]) -> None:
:param args: Cli args from Arguments()
:return: None
"""
exchanges = list_available_exchanges(args['list_exchanges_all'])
exchanges = list_available_exchanges(args["list_exchanges_all"])
if args['print_one_column']:
print('\n'.join([e['name'] for e in exchanges]))
if args["print_one_column"]:
print("\n".join([e["name"] for e in exchanges]))
else:
headers = {
'name': 'Exchange name',
'supported': 'Supported',
'trade_modes': 'Markets',
'comment': 'Reason',
}
headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})
"name": "Exchange name",
"supported": "Supported",
"trade_modes": "Markets",
"comment": "Reason",
}
headers.update({"valid": "Valid"} if args["list_exchanges_all"] else {})
def build_entry(exchange: ValidExchangesType, valid: bool):
valid_entry = {'valid': exchange['valid']} if valid else {}
valid_entry = {"valid": exchange["valid"]} if valid else {}
result: Dict[str, Union[str, bool]] = {
'name': exchange['name'],
"name": exchange["name"],
**valid_entry,
'supported': 'Official' if exchange['supported'] else '',
'trade_modes': ', '.join(
(f"{a['margin_mode']} " if a['margin_mode'] else '') + a['trading_mode']
for a in exchange['trade_modes']
"supported": "Official" if exchange["supported"] else "",
"trade_modes": ", ".join(
(f"{a['margin_mode']} " if a["margin_mode"] else "") + a["trading_mode"]
for a in exchange["trade_modes"]
),
'comment': exchange['comment'],
"comment": exchange["comment"],
}
return result
if args['list_exchanges_all']:
if args["list_exchanges_all"]:
print("All exchanges supported by the ccxt library:")
exchanges = [build_entry(e, True) for e in exchanges]
else:
print("Exchanges available for Freqtrade:")
exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False]
exchanges = [build_entry(e, False) for e in exchanges if e["valid"] is not False]
print(tabulate(exchanges, headers=headers, ))
print(
tabulate(
exchanges,
headers=headers,
)
)
def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
@ -71,26 +76,35 @@ def _print_objs_tabular(objs: List, print_colorized: bool) -> None:
yellow = Fore.YELLOW
reset = Style.RESET_ALL
else:
red = ''
yellow = ''
reset = ''
red = ""
yellow = ""
reset = ""
names = [s['name'] for s in objs]
objs_to_print = [{
'name': s['name'] if s['name'] else "--",
'location': s['location_rel'],
'status': (red + "LOAD FAILED" + reset if s['class'] is None
else "OK" if names.count(s['name']) == 1
else yellow + "DUPLICATE NAME" + reset)
} for s in objs]
names = [s["name"] for s in objs]
objs_to_print = [
{
"name": s["name"] if s["name"] else "--",
"location": s["location_rel"],
"status": (
red + "LOAD FAILED" + reset
if s["class"] is None
else "OK"
if names.count(s["name"]) == 1
else yellow + "DUPLICATE NAME" + reset
),
}
for s in objs
]
for idx, s in enumerate(objs):
if 'hyperoptable' in s:
objs_to_print[idx].update({
'hyperoptable': "Yes" if s['hyperoptable']['count'] > 0 else "No",
'buy-Params': len(s['hyperoptable'].get('buy', [])),
'sell-Params': len(s['hyperoptable'].get('sell', [])),
})
print(tabulate(objs_to_print, headers='keys', tablefmt='psql', stralign='right'))
if "hyperoptable" in s:
objs_to_print[idx].update(
{
"hyperoptable": "Yes" if s["hyperoptable"]["count"] > 0 else "No",
"buy-Params": len(s["hyperoptable"].get("buy", [])),
"sell-Params": len(s["hyperoptable"].get("sell", [])),
}
)
print(tabulate(objs_to_print, headers="keys", tablefmt="psql", stralign="right"))
def start_list_strategies(args: Dict[str, Any]) -> None:
@ -100,19 +114,20 @@ def start_list_strategies(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
strategy_objs = StrategyResolver.search_all_objects(
config, not args['print_one_column'], config.get('recursive_strategy_search', False))
config, not args["print_one_column"], config.get("recursive_strategy_search", False)
)
# Sort alphabetically
strategy_objs = sorted(strategy_objs, key=lambda x: x['name'])
strategy_objs = sorted(strategy_objs, key=lambda x: x["name"])
for obj in strategy_objs:
if obj['class']:
obj['hyperoptable'] = obj['class'].detect_all_parameters()
if obj["class"]:
obj["hyperoptable"] = obj["class"].detect_all_parameters()
else:
obj['hyperoptable'] = {'count': 0}
obj["hyperoptable"] = {"count": 0}
if args['print_one_column']:
print('\n'.join([s['name'] for s in strategy_objs]))
if args["print_one_column"]:
print("\n".join([s["name"] for s in strategy_objs]))
else:
_print_objs_tabular(strategy_objs, config.get('print_colorized', False))
_print_objs_tabular(strategy_objs, config.get("print_colorized", False))
def start_list_freqAI_models(args: Dict[str, Any]) -> None:
@ -121,13 +136,14 @@ def start_list_freqAI_models(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver
model_objs = FreqaiModelResolver.search_all_objects(config, not args['print_one_column'])
model_objs = FreqaiModelResolver.search_all_objects(config, not args["print_one_column"])
# Sort alphabetically
model_objs = sorted(model_objs, key=lambda x: x['name'])
if args['print_one_column']:
print('\n'.join([s['name'] for s in model_objs]))
model_objs = sorted(model_objs, key=lambda x: x["name"])
if args["print_one_column"]:
print("\n".join([s["name"] for s in model_objs]))
else:
_print_objs_tabular(model_objs, config.get('print_colorized', False))
_print_objs_tabular(model_objs, config.get("print_colorized", False))
def start_list_timeframes(args: Dict[str, Any]) -> None:
@ -136,16 +152,18 @@ def start_list_timeframes(args: Dict[str, Any]) -> None:
"""
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
# Do not use timeframe set in the config
config['timeframe'] = None
config["timeframe"] = None
# Init exchange
exchange = ExchangeResolver.load_exchange(config, validate=False)
if args['print_one_column']:
print('\n'.join(exchange.timeframes))
if args["print_one_column"]:
print("\n".join(exchange.timeframes))
else:
print(f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}")
print(
f"Timeframes available for the exchange `{exchange.name}`: "
f"{', '.join(exchange.timeframes)}"
)
def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
@ -161,51 +179,75 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
exchange = ExchangeResolver.load_exchange(config, validate=False)
# By default only active pairs/markets are to be shown
active_only = not args.get('list_pairs_all', False)
active_only = not args.get("list_pairs_all", False)
base_currencies = args.get('base_currencies', [])
quote_currencies = args.get('quote_currencies', [])
base_currencies = args.get("base_currencies", [])
quote_currencies = args.get("quote_currencies", [])
try:
pairs = exchange.get_markets(base_currencies=base_currencies,
quote_currencies=quote_currencies,
tradable_only=pairs_only,
active_only=active_only)
pairs = exchange.get_markets(
base_currencies=base_currencies,
quote_currencies=quote_currencies,
tradable_only=pairs_only,
active_only=active_only,
)
# Sort the pairs/markets by symbol
pairs = dict(sorted(pairs.items()))
except Exception as e:
raise OperationalException(f"Cannot get markets. Reason: {e}") from e
else:
summary_str = ((f"Exchange {exchange.name} has {len(pairs)} ") +
("active " if active_only else "") +
(plural(len(pairs), "pair" if pairs_only else "market")) +
(f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies else "") +
(" and" if base_currencies and quote_currencies else "") +
(f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies else ""))
summary_str = (
(f"Exchange {exchange.name} has {len(pairs)} ")
+ ("active " if active_only else "")
+ (plural(len(pairs), "pair" if pairs_only else "market"))
+ (
f" with {', '.join(base_currencies)} as base "
f"{plural(len(base_currencies), 'currency', 'currencies')}"
if base_currencies
else ""
)
+ (" and" if base_currencies and quote_currencies else "")
+ (
f" with {', '.join(quote_currencies)} as quote "
f"{plural(len(quote_currencies), 'currency', 'currencies')}"
if quote_currencies
else ""
)
)
headers = ["Id", "Symbol", "Base", "Quote", "Active",
"Spot", "Margin", "Future", "Leverage"]
headers = [
"Id",
"Symbol",
"Base",
"Quote",
"Active",
"Spot",
"Margin",
"Future",
"Leverage",
]
tabular_data = [{
'Id': v['id'],
'Symbol': v['symbol'],
'Base': v['base'],
'Quote': v['quote'],
'Active': market_is_active(v),
'Spot': 'Spot' if exchange.market_is_spot(v) else '',
'Margin': 'Margin' if exchange.market_is_margin(v) else '',
'Future': 'Future' if exchange.market_is_future(v) else '',
'Leverage': exchange.get_max_leverage(v['symbol'], 20)
} for _, v in pairs.items()]
tabular_data = [
{
"Id": v["id"],
"Symbol": v["symbol"],
"Base": v["base"],
"Quote": v["quote"],
"Active": market_is_active(v),
"Spot": "Spot" if exchange.market_is_spot(v) else "",
"Margin": "Margin" if exchange.market_is_margin(v) else "",
"Future": "Future" if exchange.market_is_future(v) else "",
"Leverage": exchange.get_max_leverage(v["symbol"], 20),
}
for _, v in pairs.items()
]
if (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
if (
args.get("print_one_column", False)
or args.get("list_pairs_print_json", False)
or args.get("print_csv", False)
):
# Print summary string in the log in case of machine-readable
# regular formats.
logger.info(f"{summary_str}.")
@ -215,24 +257,26 @@ def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:
print()
if pairs:
if args.get('print_list', False):
if args.get("print_list", False):
# print data as a list, with human-readable summary
print(f"{summary_str}: {', '.join(pairs.keys())}.")
elif args.get('print_one_column', False):
print('\n'.join(pairs.keys()))
elif args.get('list_pairs_print_json', False):
elif args.get("print_one_column", False):
print("\n".join(pairs.keys()))
elif args.get("list_pairs_print_json", False):
print(rapidjson.dumps(list(pairs.keys()), default=str))
elif args.get('print_csv', False):
elif args.get("print_csv", False):
writer = csv.DictWriter(sys.stdout, fieldnames=headers)
writer.writeheader()
writer.writerows(tabular_data)
else:
# print data as a table, with the human-readable summary
print(f"{summary_str}:")
print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right'))
elif not (args.get('print_one_column', False) or
args.get('list_pairs_print_json', False) or
args.get('print_csv', False)):
print(tabulate(tabular_data, headers="keys", tablefmt="psql", stralign="right"))
elif not (
args.get("print_one_column", False)
or args.get("list_pairs_print_json", False)
or args.get("print_csv", False)
):
print(f"{summary_str}.")
@ -243,21 +287,22 @@ def start_show_trades(args: Dict[str, Any]) -> None:
import json
from freqtrade.persistence import Trade, init_db
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
if 'db_url' not in config:
if "db_url" not in config:
raise ConfigurationError("--db-url is required for this command.")
logger.info(f'Using DB: "{parse_db_uri_for_logging(config["db_url"])}"')
init_db(config['db_url'])
init_db(config["db_url"])
tfilter = []
if config.get('trade_ids'):
tfilter.append(Trade.id.in_(config['trade_ids']))
if config.get("trade_ids"):
tfilter.append(Trade.id.in_(config["trade_ids"]))
trades = Trade.get_trades(tfilter).all()
logger.info(f"Printing {len(trades)} Trades: ")
if config.get('print_json', False):
if config.get("print_json", False):
print(json.dumps([trade.to_json() for trade in trades], indent=4))
else:
for trade in trades:

View File

@ -21,20 +21,22 @@ def setup_optimize_configuration(args: Dict[str, Any], method: RunMode) -> Dict[
config = setup_utils_configuration(args, method)
no_unlimited_runmodes = {
RunMode.BACKTEST: 'backtesting',
RunMode.HYPEROPT: 'hyperoptimization',
RunMode.BACKTEST: "backtesting",
RunMode.HYPEROPT: "hyperoptimization",
}
if method in no_unlimited_runmodes.keys():
wallet_size = config['dry_run_wallet'] * config['tradable_balance_ratio']
wallet_size = config["dry_run_wallet"] * config["tradable_balance_ratio"]
# tradable_balance_ratio
if (config['stake_amount'] != constants.UNLIMITED_STAKE_AMOUNT
and config['stake_amount'] > wallet_size):
wallet = fmt_coin(wallet_size, config['stake_currency'])
stake = fmt_coin(config['stake_amount'], config['stake_currency'])
if (
config["stake_amount"] != constants.UNLIMITED_STAKE_AMOUNT
and config["stake_amount"] > wallet_size
):
wallet = fmt_coin(wallet_size, config["stake_currency"])
stake = fmt_coin(config["stake_amount"], config["stake_currency"])
raise ConfigurationError(
f"Starting balance ({wallet}) is smaller than stake_amount {stake}. "
f"Wallet is calculated as `dry_run_wallet * tradable_balance_ratio`."
)
)
return config
@ -51,7 +53,7 @@ def start_backtesting(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.BACKTEST)
logger.info('Starting freqtrade in Backtesting mode')
logger.info("Starting freqtrade in Backtesting mode")
# Initialize backtesting object
backtesting = Backtesting(config)
@ -68,7 +70,7 @@ def start_backtesting_show(args: Dict[str, Any]) -> None:
from freqtrade.data.btanalysis import load_backtest_stats
from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist
results = load_backtest_stats(config['exportfilename'])
results = load_backtest_stats(config["exportfilename"])
show_backtest_results(config, results)
show_sorted_pairlist(config, results)
@ -87,20 +89,20 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
from freqtrade.optimize.hyperopt import Hyperopt
except ImportError as e:
raise OperationalException(
f"{e}. Please ensure that the hyperopt dependencies are installed.") from e
f"{e}. Please ensure that the hyperopt dependencies are installed."
) from e
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.HYPEROPT)
logger.info('Starting freqtrade in Hyperopt mode')
logger.info("Starting freqtrade in Hyperopt mode")
lock = FileLock(Hyperopt.get_lock_filename(config))
try:
with lock.acquire(timeout=1):
# Remove noisy log messages
logging.getLogger('hyperopt.tpe').setLevel(logging.WARNING)
logging.getLogger('filelock').setLevel(logging.WARNING)
logging.getLogger("hyperopt.tpe").setLevel(logging.WARNING)
logging.getLogger("filelock").setLevel(logging.WARNING)
# Initialize backtesting object
hyperopt = Hyperopt(config)
@ -108,9 +110,11 @@ def start_hyperopt(args: Dict[str, Any]) -> None:
except Timeout:
logger.info("Another running instance of freqtrade Hyperopt detected.")
logger.info("Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines.")
logger.info(
"Simultaneous execution of multiple Hyperopt commands is not supported. "
"Hyperopt module is resource hungry. Please run your Hyperopt sequentially "
"or on separate machines."
)
logger.info("Quitting now.")
# TODO: return False here in order to help freqtrade to exit
# with non-zero exit code...
@ -127,7 +131,7 @@ def start_edge(args: Dict[str, Any]) -> None:
# Initialize configuration
config = setup_optimize_configuration(args, RunMode.EDGE)
logger.info('Starting freqtrade in Edge mode')
logger.info("Starting freqtrade in Edge mode")
# Initialize Edge object
edge_cli = EdgeCli(config)

View File

@ -17,28 +17,29 @@ def start_test_pairlist(args: Dict[str, Any]) -> None:
"""
from freqtrade.persistence import FtNoDBContext
from freqtrade.plugins.pairlistmanager import PairListManager
config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)
exchange = ExchangeResolver.load_exchange(config, validate=False)
quote_currencies = args.get('quote_currencies')
quote_currencies = args.get("quote_currencies")
if not quote_currencies:
quote_currencies = [config.get('stake_currency')]
quote_currencies = [config.get("stake_currency")]
results = {}
with FtNoDBContext():
for curr in quote_currencies:
config['stake_currency'] = curr
config["stake_currency"] = curr
pairlists = PairListManager(exchange, config)
pairlists.refresh_pairlist()
results[curr] = pairlists.whitelist
for curr, pairlist in results.items():
if not args.get('print_one_column', False) and not args.get('list_pairs_print_json', False):
if not args.get("print_one_column", False) and not args.get("list_pairs_print_json", False):
print(f"Pairs for {curr}: ")
if args.get('print_one_column', False):
print('\n'.join(pairlist))
elif args.get('list_pairs_print_json', False):
if args.get("print_one_column", False):
print("\n".join(pairlist))
elif args.get("list_pairs_print_json", False):
print(rapidjson.dumps(list(pairlist), default=str))
else:
print(pairlist)

View File

@ -6,10 +6,11 @@ from freqtrade.exceptions import ConfigurationError
def validate_plot_args(args: Dict[str, Any]) -> None:
if not args.get('datadir') and not args.get('config'):
if not args.get("datadir") and not args.get("config"):
raise ConfigurationError(
"You need to specify either `--datadir` or `--config` "
"for plot-profit and plot-dataframe.")
"for plot-profit and plot-dataframe."
)
def start_plot_dataframe(args: Dict[str, Any]) -> None:
@ -18,6 +19,7 @@ def start_plot_dataframe(args: Dict[str, Any]) -> None:
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import load_and_plot_trades
validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)
@ -30,6 +32,7 @@ def start_plot_profit(args: Dict[str, Any]) -> None:
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import plot_profit
validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)

View File

@ -26,13 +26,15 @@ def start_strategy_update(args: Dict[str, Any]) -> None:
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
strategy_objs = StrategyResolver.search_all_objects(
config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))
config, enum_failed=False, recursive=config.get("recursive_strategy_search", False)
)
filtered_strategy_objs = []
if args['strategy_list']:
if args["strategy_list"]:
filtered_strategy_objs = [
strategy_obj for strategy_obj in strategy_objs
if strategy_obj['name'] in args['strategy_list']
strategy_obj
for strategy_obj in strategy_objs
if strategy_obj["name"] in args["strategy_list"]
]
else:
@ -41,8 +43,8 @@ def start_strategy_update(args: Dict[str, Any]) -> None:
processed_locations = set()
for strategy_obj in filtered_strategy_objs:
if strategy_obj['location'] not in processed_locations:
processed_locations.add(strategy_obj['location'])
if strategy_obj["location"] not in processed_locations:
processed_locations.add(strategy_obj["location"])
start_conversion(strategy_obj, config)