freqtrade_origin/freqtrade/optimize/hyperopt.py

704 lines
28 KiB
Python
Raw Normal View History

# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
"""
This module contains the hyperopt logic
"""
import logging
import random
import sys
import warnings
from datetime import datetime, timezone
2020-08-08 15:04:32 +00:00
from math import ceil
2019-01-06 13:47:38 +00:00
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
2021-05-12 03:58:25 +00:00
import rapidjson
from colorama import init as colorama_init
2020-09-28 17:39:41 +00:00
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
from joblib.externals import cloudpickle
from pandas import DataFrame
from rich.progress import (
BarColumn,
MofNCompleteColumn,
Progress,
TaskProgressColumn,
TextColumn,
TimeElapsedColumn,
TimeRemainingColumn,
)
2018-06-18 19:40:36 +00:00
2022-09-18 11:20:36 +00:00
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
from freqtrade.data.converter import trim_dataframes
from freqtrade.data.history import get_timerange
from freqtrade.data.metrics import calculate_market_change
from freqtrade.enums import HyperoptState
from freqtrade.exceptions import OperationalException
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
2018-03-02 15:22:00 +00:00
from freqtrade.optimize.backtesting import Backtesting
2019-08-14 10:25:49 +00:00
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
2022-05-22 17:32:32 +00:00
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
from freqtrade.optimize.hyperopt_tools import (
HyperoptStateContainer,
HyperoptTools,
hyperopt_serializer,
)
from freqtrade.optimize.optimize_reports import generate_strategy_stats
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
2020-09-28 17:39:41 +00:00
2019-12-10 15:10:51 +00:00
# Suppress scikit-learn FutureWarnings from skopt
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
from skopt import Optimizer
from skopt.space import Dimension
2018-03-25 19:37:14 +00:00
logger = logging.getLogger(__name__)
INITIAL_POINTS = 30
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
# in the skopt model queue, to optimize memory consumption
SKOPT_MODEL_QUEUE_SIZE = 10
MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization
2018-03-25 19:37:14 +00:00
class Hyperopt:
"""
Hyperopt class, this class contains all the logic to run a hyperopt simulation
2022-09-26 08:11:00 +00:00
To start a hyperopt run:
hyperopt = Hyperopt(config)
hyperopt.start()
"""
2020-01-31 21:37:05 +00:00
2022-09-18 11:20:36 +00:00
def __init__(self, config: Config) -> None:
self.buy_space: List[Dimension] = []
self.sell_space: List[Dimension] = []
2021-08-03 05:10:04 +00:00
self.protection_space: List[Dimension] = []
self.roi_space: List[Dimension] = []
self.stoploss_space: List[Dimension] = []
self.trailing_space: List[Dimension] = []
self.max_open_trades_space: List[Dimension] = []
self.dimensions: List[Dimension] = []
self.config = config
self.min_date: datetime
self.max_date: datetime
2019-09-18 19:57:17 +00:00
self.backtesting = Backtesting(self.config)
self.pairlist = self.backtesting.pairlists.whitelist
2022-05-22 17:32:32 +00:00
self.custom_hyperopt: HyperOptAuto
2024-05-12 15:16:02 +00:00
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
2019-09-18 19:57:17 +00:00
2024-05-12 15:16:02 +00:00
if not self.config.get("hyperopt"):
self.custom_hyperopt = HyperOptAuto(self.config)
else:
raise OperationalException(
"Using separate Hyperopt files has been removed in 2021.9. Please convert "
2024-05-12 15:16:02 +00:00
"your existing Hyperopt file to the new Hyperoptable strategy interface"
)
self.backtesting._set_strategy(self.backtesting.strategylist[0])
self.custom_hyperopt.strategy = self.backtesting.strategy
self.hyperopt_pickle_magic(self.backtesting.strategy.__class__.__bases__)
2022-05-22 17:32:32 +00:00
self.custom_hyperoptloss: IHyperOptLoss = HyperOptLossResolver.load_hyperoptloss(
2024-05-12 15:16:02 +00:00
self.config
)
2019-07-16 04:27:23 +00:00
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
2020-09-27 14:33:26 +00:00
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
2024-05-12 15:16:02 +00:00
strategy = str(self.config["strategy"])
self.results_file: Path = (
self.config["user_data_dir"]
/ "hyperopt_results"
/ f"strategy_{strategy}_{time_now}.fthypt"
)
self.data_pickle_file = (
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
)
self.total_epochs = config.get("epochs", 0)
self.current_best_loss = 100
2020-09-27 14:18:28 +00:00
self.clean_hyperopt()
2019-07-16 03:50:27 +00:00
2022-10-04 10:27:04 +00:00
self.market_change = 0.0
2020-04-28 19:56:19 +00:00
self.num_epochs_saved = 0
2021-05-12 03:58:25 +00:00
self.current_best_epoch: Optional[Dict[str, Any]] = None
2019-08-02 19:22:58 +00:00
# Use max_open_trades for hyperopt as well, except --disable-max-market-positions is set
2024-05-12 15:16:02 +00:00
if not self.config.get("use_max_market_positions", True):
logger.debug("Ignoring max_open_trades (--disable-max-market-positions was used) ...")
self.backtesting.strategy.max_open_trades = float("inf")
config.update({"max_open_trades": self.backtesting.strategy.max_open_trades})
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "sell"):
2022-04-05 18:07:58 +00:00
# Make sure use_exit_signal is enabled
2024-05-12 15:16:02 +00:00
self.config["use_exit_signal"] = True
2019-08-01 20:57:26 +00:00
2024-05-12 15:16:02 +00:00
self.print_all = self.config.get("print_all", False)
self.hyperopt_table_header = 0
2024-05-12 15:16:02 +00:00
self.print_colorized = self.config.get("print_colorized", False)
self.print_json = self.config.get("print_json", False)
2019-07-21 14:07:06 +00:00
@staticmethod
2022-09-18 11:20:36 +00:00
def get_lock_filename(config: Config) -> str:
2024-05-12 15:16:02 +00:00
return str(config["user_data_dir"] / "hyperopt.lock")
2019-07-21 14:07:06 +00:00
2020-02-02 04:00:40 +00:00
def clean_hyperopt(self) -> None:
"""
Remove hyperopt pickle files to restart hyperopt.
"""
2020-04-28 19:56:19 +00:00
for f in [self.data_pickle_file, self.results_file]:
p = Path(f)
if p.is_file():
logger.info(f"Removing `{p}`.")
p.unlink()
def hyperopt_pickle_magic(self, bases) -> None:
"""
Hyperopt magic to allow strategy inheritance across files.
For this to properly work, we need to register the module of the imported class
to pickle as value.
"""
for modules in bases:
2024-05-12 15:16:02 +00:00
if modules.__name__ != "IStrategy":
cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
self.hyperopt_pickle_magic(modules.__bases__)
def _get_params_dict(self, dimensions: List[Dimension], raw_params: List[Any]) -> Dict:
2018-06-19 06:09:54 +00:00
# Ensure the number of dimensions match
# the number of parameters in the list.
if len(raw_params) != len(dimensions):
2024-05-12 15:16:02 +00:00
raise ValueError("Mismatch in number of search-space dimensions.")
2018-06-19 06:09:54 +00:00
# Return a dict where the keys are the names of the dimensions
# and the values are taken from the list of parameters.
return {d.name: v for d, v in zip(dimensions, raw_params)}
2018-06-19 06:09:54 +00:00
2021-05-12 03:58:25 +00:00
def _save_result(self, epoch: Dict) -> None:
"""
2020-04-28 19:56:19 +00:00
Save hyperopt results to file
2021-05-12 03:58:25 +00:00
Store one line per epoch.
While not a valid json object - this allows appending easily.
:param epoch: result dictionary for this epoch.
"""
epoch[FTHYPT_FILEVERSION] = 2
2024-05-12 15:16:02 +00:00
with self.results_file.open("a") as f:
rapidjson.dump(
epoch,
f,
default=hyperopt_serializer,
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
)
2021-05-15 05:01:32 +00:00
f.write("\n")
2021-05-12 03:58:25 +00:00
self.num_epochs_saved += 1
2024-05-12 15:16:02 +00:00
logger.debug(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
2021-05-12 03:58:25 +00:00
# Store hyperopt filename
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
2024-05-12 15:16:02 +00:00
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
def _get_params_details(self, params: Dict) -> Dict:
"""
Return the params for each space
"""
result: Dict = {}
2019-11-23 08:32:33 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "buy"):
result["buy"] = {p.name: params.get(p.name) for p in self.buy_space}
if HyperoptTools.has_space(self.config, "sell"):
result["sell"] = {p.name: params.get(p.name) for p in self.sell_space}
if HyperoptTools.has_space(self.config, "protection"):
result["protection"] = {p.name: params.get(p.name) for p in self.protection_space}
if HyperoptTools.has_space(self.config, "roi"):
result["roi"] = {
str(k): v for k, v in self.custom_hyperopt.generate_roi_table(params).items()
}
if HyperoptTools.has_space(self.config, "stoploss"):
result["stoploss"] = {p.name: params.get(p.name) for p in self.stoploss_space}
if HyperoptTools.has_space(self.config, "trailing"):
result["trailing"] = self.custom_hyperopt.generate_trailing_params(params)
if HyperoptTools.has_space(self.config, "trades"):
result["max_open_trades"] = {
"max_open_trades": self.backtesting.strategy.max_open_trades
if self.backtesting.strategy.max_open_trades != float("inf")
else -1
}
2019-08-15 18:39:04 +00:00
return result
2019-11-23 08:32:33 +00:00
def _get_no_optimize_details(self) -> Dict[str, Any]:
"""
Get non-optimized parameters
"""
result: Dict[str, Any] = {}
strategy = self.backtesting.strategy
2024-05-12 15:16:02 +00:00
if not HyperoptTools.has_space(self.config, "roi"):
result["roi"] = {str(k): v for k, v in strategy.minimal_roi.items()}
if not HyperoptTools.has_space(self.config, "stoploss"):
result["stoploss"] = {"stoploss": strategy.stoploss}
if not HyperoptTools.has_space(self.config, "trailing"):
result["trailing"] = {
"trailing_stop": strategy.trailing_stop,
"trailing_stop_positive": strategy.trailing_stop_positive,
"trailing_stop_positive_offset": strategy.trailing_stop_positive_offset,
"trailing_only_offset_is_reached": strategy.trailing_only_offset_is_reached,
}
2024-05-12 15:16:02 +00:00
if not HyperoptTools.has_space(self.config, "trades"):
result["max_open_trades"] = {"max_open_trades": strategy.max_open_trades}
return result
def print_results(self, results) -> None:
"""
Log results if it is better than any previous evaluation
TODO: this should be moved to HyperoptTools too
"""
2024-05-12 15:16:02 +00:00
is_best = results["is_best"]
2019-11-23 08:32:33 +00:00
if self.print_all or is_best:
print(
HyperoptTools.get_result_table(
2024-05-12 15:16:02 +00:00
self.config,
results,
self.total_epochs,
self.print_all,
self.print_colorized,
self.hyperopt_table_header,
2020-03-11 21:30:36 +00:00
)
)
self.hyperopt_table_header = 2
def init_spaces(self):
"""
Assign the dimensions in the hyperoptimization space.
"""
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "protection"):
2021-08-03 05:10:04 +00:00
# Protections can only be optimized when using the Parameter interface
logger.debug("Hyperopt has 'protection' space")
# Enable Protections if protection space is selected.
2024-05-12 15:16:02 +00:00
self.config["enable_protections"] = True
2022-10-18 04:39:55 +00:00
self.backtesting.enable_protections = True
self.protection_space = self.custom_hyperopt.protection_space()
2019-11-07 22:55:14 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "buy"):
2019-08-01 20:57:26 +00:00
logger.debug("Hyperopt has 'buy' space")
self.buy_space = self.custom_hyperopt.buy_indicator_space()
2019-11-07 22:55:14 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "sell"):
2019-08-01 20:57:26 +00:00
logger.debug("Hyperopt has 'sell' space")
self.sell_space = self.custom_hyperopt.sell_indicator_space()
2019-11-07 22:55:14 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "roi"):
2019-08-01 20:57:26 +00:00
logger.debug("Hyperopt has 'roi' space")
self.roi_space = self.custom_hyperopt.roi_space()
2019-11-07 22:55:14 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "stoploss"):
2019-08-01 20:57:26 +00:00
logger.debug("Hyperopt has 'stoploss' space")
self.stoploss_space = self.custom_hyperopt.stoploss_space()
2019-11-07 22:55:14 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "trailing"):
2019-11-07 22:55:14 +00:00
logger.debug("Hyperopt has 'trailing' space")
self.trailing_space = self.custom_hyperopt.trailing_space()
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "trades"):
logger.debug("Hyperopt has 'trades' space")
self.max_open_trades_space = self.custom_hyperopt.max_open_trades_space()
2024-05-12 15:16:02 +00:00
self.dimensions = (
self.buy_space
+ self.sell_space
+ self.protection_space
+ self.roi_space
+ self.stoploss_space
+ self.trailing_space
+ self.max_open_trades_space
)
2017-12-26 08:08:10 +00:00
def assign_params(self, params_dict: Dict, category: str) -> None:
"""
Assign hyperoptable parameters
"""
for attr_name, attr in self.backtesting.strategy.enumerate_parameters(category):
if attr.optimize:
# noinspection PyProtectedMember
attr.value = params_dict[attr_name]
2022-09-11 09:56:17 +00:00
def generate_optimizer(self, raw_params: List[Any]) -> Dict[str, Any]:
"""
2021-08-02 19:12:10 +00:00
Used Optimize function.
Called once per epoch to optimize whatever is configured.
Keep this function as optimized as possible!
"""
HyperoptStateContainer.set_state(HyperoptState.OPTIMIZE)
backtest_start_time = datetime.now(timezone.utc)
params_dict = self._get_params_dict(self.dimensions, raw_params)
# Apply parameters
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "buy"):
self.assign_params(params_dict, "buy")
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "sell"):
self.assign_params(params_dict, "sell")
2019-01-06 09:16:30 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "protection"):
self.assign_params(params_dict, "protection")
2021-08-03 05:10:04 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "roi"):
self.backtesting.strategy.minimal_roi = self.custom_hyperopt.generate_roi_table(
params_dict
)
2021-08-02 19:12:10 +00:00
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "stoploss"):
self.backtesting.strategy.stoploss = params_dict["stoploss"]
2024-05-12 15:16:02 +00:00
if HyperoptTools.has_space(self.config, "trailing"):
d = self.custom_hyperopt.generate_trailing_params(params_dict)
2024-05-12 15:16:02 +00:00
self.backtesting.strategy.trailing_stop = d["trailing_stop"]
self.backtesting.strategy.trailing_stop_positive = d["trailing_stop_positive"]
self.backtesting.strategy.trailing_stop_positive_offset = d[
"trailing_stop_positive_offset"
]
self.backtesting.strategy.trailing_only_offset_is_reached = d[
"trailing_only_offset_is_reached"
]
if HyperoptTools.has_space(self.config, "trades"):
if self.config["stake_amount"] == "unlimited" and (
params_dict["max_open_trades"] == -1 or params_dict["max_open_trades"] == 0
):
# Ignore unlimited max open trades if stake amount is unlimited
2024-05-12 15:16:02 +00:00
params_dict.update({"max_open_trades": self.config["max_open_trades"]})
2024-05-12 15:16:02 +00:00
updated_max_open_trades = (
int(params_dict["max_open_trades"])
if (params_dict["max_open_trades"] != -1 and params_dict["max_open_trades"] != 0)
else float("inf")
)
2024-05-12 15:16:02 +00:00
self.config.update({"max_open_trades": updated_max_open_trades})
self.backtesting.strategy.max_open_trades = updated_max_open_trades
2024-05-12 15:16:02 +00:00
with self.data_pickle_file.open("rb") as f:
processed = load(f, mmap_mode="r")
if self.analyze_per_epoch:
# Data is not yet analyzed, rerun populate_indicators.
processed = self.advise_and_trim(processed)
bt_results = self.backtesting.backtest(
2024-05-12 15:16:02 +00:00
processed=processed, start_date=self.min_date, end_date=self.max_date
)
backtest_end_time = datetime.now(timezone.utc)
2024-05-12 15:16:02 +00:00
bt_results.update(
{
"backtest_start_time": int(backtest_start_time.timestamp()),
"backtest_end_time": int(backtest_end_time.timestamp()),
}
)
return self._get_results_dict(
bt_results, self.min_date, self.max_date, params_dict, processed=processed
)
def _get_results_dict(
self, backtesting_results, min_date, max_date, params_dict, processed: Dict[str, DataFrame]
) -> Dict[str, Any]:
2021-05-02 09:01:26 +00:00
params_details = self._get_params_details(params_dict)
strat_stats = generate_strategy_stats(
2024-05-12 15:16:02 +00:00
self.pairlist,
self.backtesting.strategy.get_strategy_name(),
backtesting_results,
min_date,
max_date,
market_change=self.market_change,
is_hyperopt=True,
)
results_explanation = HyperoptTools.format_results_explanation_string(
2024-05-12 15:16:02 +00:00
strat_stats, self.config["stake_currency"]
)
not_optimized = self.backtesting.strategy.get_no_optimize_params()
not_optimized = deep_merge_dicts(not_optimized, self._get_no_optimize_details())
2024-05-12 15:16:02 +00:00
trade_count = strat_stats["total_trades"]
total_profit = strat_stats["profit_total"]
# If this evaluation contains too short amount of trades to be
# interesting -- consider it as 'bad' (assigned max. loss value)
2019-05-01 12:27:58 +00:00
# in order to cast this hyperspace point away from optimization
# path. We do not want to optimize 'hodl' strategies.
loss: float = MAX_LOSS
2024-05-12 15:16:02 +00:00
if trade_count >= self.config["hyperopt_min_trades"]:
loss = self.calculate_loss(
results=backtesting_results["results"],
trade_count=trade_count,
min_date=min_date,
max_date=max_date,
config=self.config,
processed=processed,
backtest_stats=strat_stats,
)
2018-06-19 18:57:42 +00:00
return {
2024-05-12 15:16:02 +00:00
"loss": loss,
"params_dict": params_dict,
"params_details": params_details,
"params_not_optimized": not_optimized,
"results_metrics": strat_stats,
"results_explanation": results_explanation,
"total_profit": total_profit,
2018-06-19 18:57:42 +00:00
}
def get_optimizer(self, dimensions: List[Dimension], cpu_count) -> Optimizer:
estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
acq_optimizer = "sampling"
if isinstance(estimator, str):
if estimator not in ("GP", "RF", "ET", "GBRT"):
raise OperationalException(f"Estimator {estimator} not supported.")
else:
acq_optimizer = "auto"
logger.info(f"Using estimator {estimator}.")
2018-06-24 12:27:53 +00:00
return Optimizer(
2019-09-16 18:22:07 +00:00
dimensions,
base_estimator=estimator,
acq_optimizer=acq_optimizer,
2019-05-10 07:54:44 +00:00
n_initial_points=INITIAL_POINTS,
2024-05-12 15:16:02 +00:00
acq_optimizer_kwargs={"n_jobs": cpu_count},
random_state=self.random_state,
model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
2018-06-24 12:27:53 +00:00
)
2024-05-12 15:16:02 +00:00
def run_optimizer_parallel(self, parallel: Parallel, asked: List[List]) -> List[Dict[str, Any]]:
"""Start optimizer in a parallel way"""
return parallel(
delayed(wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked
)
2018-06-24 12:27:53 +00:00
def _set_random_state(self, random_state: Optional[int]) -> int:
return random_state or random.randint(1, 2**16 - 1)
2022-08-19 13:12:55 +00:00
def advise_and_trim(self, data: Dict[str, DataFrame]) -> Dict[str, DataFrame]:
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
# Trim startup period from analyzed dataframe to get correct dates for output.
# This is only used to keep track of min/max date after trimming.
# The result is NOT returned from this method, actual trimming happens in backtesting.
trimmed = trim_dataframes(preprocessed, self.timerange, self.backtesting.required_startup)
self.min_date, self.max_date = get_timerange(trimmed)
if not self.market_change:
2024-05-12 15:16:02 +00:00
self.market_change = calculate_market_change(trimmed, "close")
# Real trimming will happen as part of backtesting.
2022-09-05 16:12:19 +00:00
return preprocessed
2022-08-19 13:12:55 +00:00
def prepare_hyperopt_data(self) -> None:
HyperoptStateContainer.set_state(HyperoptState.DATALOAD)
data, self.timerange = self.backtesting.load_bt_data()
2022-03-02 18:50:16 +00:00
self.backtesting.load_bt_data_detail()
logger.info("Dataload complete. Calculating indicators")
2020-04-07 08:44:18 +00:00
if not self.analyze_per_epoch:
HyperoptStateContainer.set_state(HyperoptState.INDICATORS)
preprocessed = self.advise_and_trim(data)
2024-05-12 15:16:02 +00:00
logger.info(
f"Hyperopting with data from "
f"{self.min_date.strftime(DATETIME_PRINT_FORMAT)} "
f"up to {self.max_date.strftime(DATETIME_PRINT_FORMAT)} "
f"({(self.max_date - self.min_date).days} days).."
)
# Store non-trimmed data - will be trimmed after signal generation.
dump(preprocessed, self.data_pickle_file)
else:
dump(data, self.data_pickle_file)
def get_asked_points(self, n_points: int) -> Tuple[List[List[Any]], List[bool]]:
2022-04-23 07:44:04 +00:00
"""
Enforce points returned from `self.opt.ask` have not been already evaluated
Steps:
1. Try to get points using `self.opt.ask` first
2. Discard the points that have already been evaluated
3. Retry using `self.opt.ask` up to 3 times
4. If still some points are missing in respect to `n_points`, random sample some points
5. Repeat until at least `n_points` points in the `asked_non_tried` list
2022-03-20 16:03:07 +00:00
6. Return a list with length truncated at `n_points`
2022-04-23 07:44:04 +00:00
"""
2024-05-12 15:16:02 +00:00
def unique_list(a_list):
2022-04-13 08:36:46 +00:00
new_list = []
for item in a_list:
if item not in new_list:
new_list.append(item)
return new_list
2024-05-12 15:16:02 +00:00
i = 0
asked_non_tried: List[List[Any]] = []
is_random_non_tried: List[bool] = []
2022-03-29 18:33:35 +00:00
while i < 5 and len(asked_non_tried) < n_points:
2022-03-20 16:06:41 +00:00
if i < 3:
self.opt.cache_ = {}
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
is_random = [False for _ in range(len(asked))]
else:
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
is_random = [True for _ in range(len(asked))]
2024-05-12 15:16:02 +00:00
is_random_non_tried += [
rand
for x, rand in zip(asked, is_random)
if x not in self.opt.Xi and x not in asked_non_tried
]
asked_non_tried += [
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
]
2022-03-20 16:06:41 +00:00
i += 1
if asked_non_tried:
return (
2024-05-12 15:16:02 +00:00
asked_non_tried[: min(len(asked_non_tried), n_points)],
is_random_non_tried[: min(len(asked_non_tried), n_points)],
)
else:
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
def evaluate_result(self, val: Dict[str, Any], current: int, is_random: bool):
"""
Evaluate results returned from generate_optimizer
"""
2024-05-12 15:16:02 +00:00
val["current_epoch"] = current
val["is_initial_point"] = current <= INITIAL_POINTS
logger.debug("Optimizer epoch evaluated: %s", val)
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
# This value is assigned here and not in the optimization method
# to keep proper order in the list of results. That's because
# evaluations can take different time. Here they are aligned in the
# order they will be shown to the user.
2024-05-12 15:16:02 +00:00
val["is_best"] = is_best
val["is_random"] = is_random
self.print_results(val)
if is_best:
2024-05-12 15:16:02 +00:00
self.current_best_loss = val["loss"]
self.current_best_epoch = val
self._save_result(val)
def start(self) -> None:
2024-05-12 15:16:02 +00:00
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
logger.info(f"Using optimizer random state: {self.random_state}")
self.hyperopt_table_header = -1
# Initialize spaces ...
self.init_spaces()
self.prepare_hyperopt_data()
# We don't need exchange instance anymore while running hyperopt
2021-02-06 09:22:59 +00:00
self.backtesting.exchange.close()
2022-04-23 09:31:12 +00:00
self.backtesting.exchange._api = None
self.backtesting.exchange._api_async = None
2021-12-31 16:35:08 +00:00
self.backtesting.exchange.loop = None # type: ignore
2022-04-28 04:59:03 +00:00
self.backtesting.exchange._loop_lock = None # type: ignore
self.backtesting.exchange._cache_lock = None # type: ignore
# self.backtesting.exchange = None # type: ignore
self.backtesting.pairlists = None # type: ignore
cpus = cpu_count()
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
2024-05-12 15:16:02 +00:00
config_jobs = self.config.get("hyperopt_jobs", -1)
logger.info(f"Number of parallel jobs set as: {config_jobs}")
2018-06-21 11:59:36 +00:00
2019-09-16 18:22:07 +00:00
self.opt = self.get_optimizer(self.dimensions, config_jobs)
2020-06-01 07:37:10 +00:00
if self.print_colorized:
colorama_init(autoreset=True)
try:
with Parallel(n_jobs=config_jobs) as parallel:
jobs = parallel._effective_n_jobs()
2024-05-12 15:16:02 +00:00
logger.info(f"Effective number of parallel workers used: {jobs}")
2020-03-11 21:30:36 +00:00
# Define progressbar
with Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(bar_width=None),
MofNCompleteColumn(),
TaskProgressColumn(),
2023-04-09 16:25:50 +00:00
"",
TimeElapsedColumn(),
2023-04-09 16:25:50 +00:00
"",
TimeRemainingColumn(),
expand=True,
2021-08-06 22:19:36 +00:00
) as pbar:
task = pbar.add_task("Epochs", total=self.total_epochs)
start = 0
if self.analyze_per_epoch:
# First analysis not in parallel mode when using --analyze-per-epoch.
# This allows dataprovider to load it's informative cache.
asked, is_random = self.get_asked_points(n_points=1)
2022-09-11 17:31:11 +00:00
f_val0 = self.generate_optimizer(asked[0])
2024-05-12 15:16:02 +00:00
self.opt.tell(asked, [f_val0["loss"]])
2022-09-11 17:31:11 +00:00
self.evaluate_result(f_val0, 1, is_random[0])
pbar.update(task, advance=1)
start += 1
evals = ceil((self.total_epochs - start) / jobs)
for i in range(evals):
# Correct the number of epochs to be processed for the last
# iteration (should not exceed self.total_epochs in total)
n_rest = (i + 1) * jobs - (self.total_epochs - start)
current_jobs = jobs - n_rest if n_rest > 0 else jobs
asked, is_random = self.get_asked_points(n_points=current_jobs)
2022-09-11 09:56:17 +00:00
f_val = self.run_optimizer_parallel(parallel, asked)
2024-05-12 15:16:02 +00:00
self.opt.tell(asked, [v["loss"] for v in f_val])
for j, val in enumerate(f_val):
# Use human-friendly indexes here (starting from 1)
current = i * jobs + j + 1 + start
self.evaluate_result(val, current, is_random[j])
pbar.update(task, advance=1)
2020-03-10 19:30:36 +00:00
except KeyboardInterrupt:
2024-05-12 15:16:02 +00:00
print("User interrupted..")
2024-05-12 15:16:02 +00:00
logger.info(
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
f"saved to '{self.results_file}'."
)
2021-05-12 03:58:25 +00:00
if self.current_best_epoch:
HyperoptTools.try_export_params(
2024-05-12 15:16:02 +00:00
self.config, self.backtesting.strategy.get_strategy_name(), self.current_best_epoch
)
2024-05-12 15:16:02 +00:00
HyperoptTools.show_epoch_details(
self.current_best_epoch, self.total_epochs, self.print_json
)
2023-12-16 21:09:02 +00:00
elif self.num_epochs_saved > 0:
2023-12-16 21:36:56 +00:00
print(
f"No good result found for given optimization function in {self.num_epochs_saved} "
2024-05-12 15:16:02 +00:00
f"{plural(self.num_epochs_saved, 'epoch')}."
)
else:
# This is printed when Ctrl+C is pressed quickly, before first epochs have
# a chance to be evaluated.
print("No epochs evaluated yet, no best result.")