mirror of
https://github.com/freqtrade/freqtrade.git
synced 2024-11-14 20:23:57 +00:00
Merge pull request #10919 from freqtrade/refactor/hyperopt
Restore hyperopt logging, refactor hyperopt layout
This commit is contained in:
commit
4e5ae0af84
|
@ -913,6 +913,31 @@ Your epochs should therefore be aligned to the possible values - or you should b
|
|||
|
||||
After you run Hyperopt for the desired amount of epochs, you can later list all results for analysis, select only best or profitable once, and show the details for any of the epochs previously evaluated. This can be done with the `hyperopt-list` and `hyperopt-show` sub-commands. The usage of these sub-commands is described in the [Utils](utils.md#list-hyperopt-results) chapter.
|
||||
|
||||
## Output debug messages from your strategy
|
||||
|
||||
If you want to output debug messages from your strategy, you can use the `logging` module. By default, Freqtrade will output all messages with a level of `INFO` or higher.
|
||||
|
||||
|
||||
``` python
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MyAwesomeStrategy(IStrategy):
|
||||
...
|
||||
|
||||
def populate_entry_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
|
||||
logger.info("This is a debug message")
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
!!! Note "using print"
|
||||
Messages printed via `print()` will not be shown in the hyperopt output unless parallelism is disabled (`-j 1`).
|
||||
It is recommended to use the `logging` module instead.
|
||||
|
||||
## Validate backtesting results
|
||||
|
||||
Once the optimized strategy has been implemented into your strategy, you should backtest this strategy to make sure everything is working as expected.
|
||||
|
@ -920,6 +945,7 @@ Once the optimized strategy has been implemented into your strategy, you should
|
|||
To achieve same the results (number of trades, their durations, profit, etc.) as during Hyperopt, please use the same configuration and parameters (timerange, timeframe, ...) used for hyperopt for Backtesting.
|
||||
|
||||
### Why do my backtest results not match my hyperopt results?
|
||||
|
||||
Should results not match, check the following factors:
|
||||
|
||||
* You may have added parameters to hyperopt in `populate_indicators()` where they will be calculated only once **for all epochs**. If you are, for example, trying to optimise multiple SMA timeperiod values, the hyperoptable timeperiod parameter should be placed in `populate_entry_trend()` which is calculated every epoch. See [Optimizing an indicator parameter](https://www.freqtrade.io/en/stable/hyperopt/#optimizing-an-indicator-parameter).
|
||||
|
|
|
@ -15,7 +15,7 @@ def start_hyperopt_list(args: dict[str, Any]) -> None:
|
|||
"""
|
||||
from freqtrade.configuration import setup_utils_configuration
|
||||
from freqtrade.data.btanalysis import get_latest_hyperopt_file
|
||||
from freqtrade.optimize.hyperopt_output import HyperoptOutput
|
||||
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptTools
|
||||
|
||||
config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)
|
||||
|
|
3
freqtrade/optimize/hyperopt/__init__.py
Normal file
3
freqtrade/optimize/hyperopt/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
|||
# flake8: noqa: F401
|
||||
from freqtrade.optimize.hyperopt.hyperopt import Hyperopt
|
||||
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
|
352
freqtrade/optimize/hyperopt/hyperopt.py
Normal file
352
freqtrade/optimize/hyperopt/hyperopt.py
Normal file
|
@ -0,0 +1,352 @@
|
|||
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
|
||||
|
||||
"""
|
||||
This module contains the hyperopt logic
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from math import ceil
|
||||
from multiprocessing import Manager
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import rapidjson
|
||||
from joblib import Parallel, cpu_count, delayed, wrap_non_picklable_objects
|
||||
from joblib.externals import cloudpickle
|
||||
from rich.console import Console
|
||||
|
||||
from freqtrade.constants import FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
|
||||
from freqtrade.enums import HyperoptState
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.misc import file_dump_json, plural
|
||||
from freqtrade.optimize.hyperopt.hyperopt_logger import logging_mp_handle, logging_mp_setup
|
||||
from freqtrade.optimize.hyperopt.hyperopt_optimizer import HyperOptimizer
|
||||
from freqtrade.optimize.hyperopt.hyperopt_output import HyperoptOutput
|
||||
from freqtrade.optimize.hyperopt_tools import (
|
||||
HyperoptStateContainer,
|
||||
HyperoptTools,
|
||||
hyperopt_serializer,
|
||||
)
|
||||
from freqtrade.util import get_progress_tracker
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
INITIAL_POINTS = 30
|
||||
|
||||
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
|
||||
# in the skopt model queue, to optimize memory consumption
|
||||
SKOPT_MODEL_QUEUE_SIZE = 10
|
||||
|
||||
log_queue: Any
|
||||
|
||||
|
||||
class Hyperopt:
|
||||
"""
|
||||
Hyperopt class, this class contains all the logic to run a hyperopt simulation
|
||||
|
||||
To start a hyperopt run:
|
||||
hyperopt = Hyperopt(config)
|
||||
hyperopt.start()
|
||||
"""
|
||||
|
||||
def __init__(self, config: Config) -> None:
|
||||
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
|
||||
|
||||
self.config = config
|
||||
|
||||
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
|
||||
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
|
||||
|
||||
if self.config.get("hyperopt"):
|
||||
raise OperationalException(
|
||||
"Using separate Hyperopt files has been removed in 2021.9. Please convert "
|
||||
"your existing Hyperopt file to the new Hyperoptable strategy interface"
|
||||
)
|
||||
|
||||
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
strategy = str(self.config["strategy"])
|
||||
self.results_file: Path = (
|
||||
self.config["user_data_dir"]
|
||||
/ "hyperopt_results"
|
||||
/ f"strategy_{strategy}_{time_now}.fthypt"
|
||||
)
|
||||
self.data_pickle_file = (
|
||||
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
|
||||
)
|
||||
self.total_epochs = config.get("epochs", 0)
|
||||
|
||||
self.current_best_loss = 100
|
||||
|
||||
self.clean_hyperopt()
|
||||
|
||||
self.num_epochs_saved = 0
|
||||
self.current_best_epoch: dict[str, Any] | None = None
|
||||
|
||||
if HyperoptTools.has_space(self.config, "sell"):
|
||||
# Make sure use_exit_signal is enabled
|
||||
self.config["use_exit_signal"] = True
|
||||
|
||||
self.print_all = self.config.get("print_all", False)
|
||||
self.hyperopt_table_header = 0
|
||||
self.print_colorized = self.config.get("print_colorized", False)
|
||||
self.print_json = self.config.get("print_json", False)
|
||||
|
||||
self.hyperopter = HyperOptimizer(self.config)
|
||||
|
||||
@staticmethod
|
||||
def get_lock_filename(config: Config) -> str:
|
||||
return str(config["user_data_dir"] / "hyperopt.lock")
|
||||
|
||||
def clean_hyperopt(self) -> None:
|
||||
"""
|
||||
Remove hyperopt pickle files to restart hyperopt.
|
||||
"""
|
||||
for f in [self.data_pickle_file, self.results_file]:
|
||||
p = Path(f)
|
||||
if p.is_file():
|
||||
logger.info(f"Removing `{p}`.")
|
||||
p.unlink()
|
||||
|
||||
def hyperopt_pickle_magic(self, bases) -> None:
|
||||
"""
|
||||
Hyperopt magic to allow strategy inheritance across files.
|
||||
For this to properly work, we need to register the module of the imported class
|
||||
to pickle as value.
|
||||
"""
|
||||
for modules in bases:
|
||||
if modules.__name__ != "IStrategy":
|
||||
cloudpickle.register_pickle_by_value(sys.modules[modules.__module__])
|
||||
self.hyperopt_pickle_magic(modules.__bases__)
|
||||
|
||||
def _save_result(self, epoch: dict) -> None:
|
||||
"""
|
||||
Save hyperopt results to file
|
||||
Store one line per epoch.
|
||||
While not a valid json object - this allows appending easily.
|
||||
:param epoch: result dictionary for this epoch.
|
||||
"""
|
||||
epoch[FTHYPT_FILEVERSION] = 2
|
||||
with self.results_file.open("a") as f:
|
||||
rapidjson.dump(
|
||||
epoch,
|
||||
f,
|
||||
default=hyperopt_serializer,
|
||||
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
|
||||
)
|
||||
f.write("\n")
|
||||
|
||||
self.num_epochs_saved += 1
|
||||
logger.debug(
|
||||
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'."
|
||||
)
|
||||
# Store hyperopt filename
|
||||
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
|
||||
|
||||
def print_results(self, results: dict[str, Any]) -> None:
|
||||
"""
|
||||
Log results if it is better than any previous evaluation
|
||||
TODO: this should be moved to HyperoptTools too
|
||||
"""
|
||||
is_best = results["is_best"]
|
||||
|
||||
if self.print_all or is_best:
|
||||
self._hyper_out.add_data(
|
||||
self.config,
|
||||
[results],
|
||||
self.total_epochs,
|
||||
self.print_all,
|
||||
)
|
||||
|
||||
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
|
||||
"""Start optimizer in a parallel way"""
|
||||
|
||||
def optimizer_wrapper(*args, **kwargs):
|
||||
# global log queue. This must happen in the file that initializes Parallel
|
||||
logging_mp_setup(
|
||||
log_queue, logging.INFO if self.config["verbosity"] < 1 else logging.DEBUG
|
||||
)
|
||||
|
||||
return self.hyperopter.generate_optimizer(*args, **kwargs)
|
||||
|
||||
return parallel(delayed(wrap_non_picklable_objects(optimizer_wrapper))(v) for v in asked)
|
||||
|
||||
def _set_random_state(self, random_state: int | None) -> int:
|
||||
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
|
||||
|
||||
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
|
||||
"""
|
||||
Enforce points returned from `self.opt.ask` have not been already evaluated
|
||||
|
||||
Steps:
|
||||
1. Try to get points using `self.opt.ask` first
|
||||
2. Discard the points that have already been evaluated
|
||||
3. Retry using `self.opt.ask` up to 3 times
|
||||
4. If still some points are missing in respect to `n_points`, random sample some points
|
||||
5. Repeat until at least `n_points` points in the `asked_non_tried` list
|
||||
6. Return a list with length truncated at `n_points`
|
||||
"""
|
||||
|
||||
def unique_list(a_list):
|
||||
new_list = []
|
||||
for item in a_list:
|
||||
if item not in new_list:
|
||||
new_list.append(item)
|
||||
return new_list
|
||||
|
||||
i = 0
|
||||
asked_non_tried: list[list[Any]] = []
|
||||
is_random_non_tried: list[bool] = []
|
||||
while i < 5 and len(asked_non_tried) < n_points:
|
||||
if i < 3:
|
||||
self.opt.cache_ = {}
|
||||
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
|
||||
is_random = [False for _ in range(len(asked))]
|
||||
else:
|
||||
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
|
||||
is_random = [True for _ in range(len(asked))]
|
||||
is_random_non_tried += [
|
||||
rand
|
||||
for x, rand in zip(asked, is_random, strict=False)
|
||||
if x not in self.opt.Xi and x not in asked_non_tried
|
||||
]
|
||||
asked_non_tried += [
|
||||
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
|
||||
]
|
||||
i += 1
|
||||
|
||||
if asked_non_tried:
|
||||
return (
|
||||
asked_non_tried[: min(len(asked_non_tried), n_points)],
|
||||
is_random_non_tried[: min(len(asked_non_tried), n_points)],
|
||||
)
|
||||
else:
|
||||
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
|
||||
|
||||
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
|
||||
"""
|
||||
Evaluate results returned from generate_optimizer
|
||||
"""
|
||||
val["current_epoch"] = current
|
||||
val["is_initial_point"] = current <= INITIAL_POINTS
|
||||
|
||||
logger.debug("Optimizer epoch evaluated: %s", val)
|
||||
|
||||
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
|
||||
# This value is assigned here and not in the optimization method
|
||||
# to keep proper order in the list of results. That's because
|
||||
# evaluations can take different time. Here they are aligned in the
|
||||
# order they will be shown to the user.
|
||||
val["is_best"] = is_best
|
||||
val["is_random"] = is_random
|
||||
self.print_results(val)
|
||||
|
||||
if is_best:
|
||||
self.current_best_loss = val["loss"]
|
||||
self.current_best_epoch = val
|
||||
|
||||
self._save_result(val)
|
||||
|
||||
def _setup_logging_mp_workaround(self) -> None:
|
||||
"""
|
||||
Workaround for logging in child processes.
|
||||
local_queue must be a global in the file that initializes Parallel.
|
||||
"""
|
||||
global log_queue
|
||||
m = Manager()
|
||||
log_queue = m.Queue()
|
||||
|
||||
def start(self) -> None:
|
||||
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
|
||||
logger.info(f"Using optimizer random state: {self.random_state}")
|
||||
self.hyperopt_table_header = -1
|
||||
self.hyperopter.prepare_hyperopt()
|
||||
|
||||
cpus = cpu_count()
|
||||
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
|
||||
config_jobs = self.config.get("hyperopt_jobs", -1)
|
||||
logger.info(f"Number of parallel jobs set as: {config_jobs}")
|
||||
|
||||
self.opt = self.hyperopter.get_optimizer(
|
||||
config_jobs, self.random_state, INITIAL_POINTS, SKOPT_MODEL_QUEUE_SIZE
|
||||
)
|
||||
self._setup_logging_mp_workaround()
|
||||
try:
|
||||
with Parallel(n_jobs=config_jobs) as parallel:
|
||||
jobs = parallel._effective_n_jobs()
|
||||
logger.info(f"Effective number of parallel workers used: {jobs}")
|
||||
console = Console(
|
||||
color_system="auto" if self.print_colorized else None,
|
||||
)
|
||||
|
||||
# Define progressbar
|
||||
with get_progress_tracker(
|
||||
console=console,
|
||||
cust_callables=[self._hyper_out],
|
||||
) as pbar:
|
||||
task = pbar.add_task("Epochs", total=self.total_epochs)
|
||||
|
||||
start = 0
|
||||
|
||||
if self.analyze_per_epoch:
|
||||
# First analysis not in parallel mode when using --analyze-per-epoch.
|
||||
# This allows dataprovider to load it's informative cache.
|
||||
asked, is_random = self.get_asked_points(n_points=1)
|
||||
f_val0 = self.hyperopter.generate_optimizer(asked[0])
|
||||
self.opt.tell(asked, [f_val0["loss"]])
|
||||
self.evaluate_result(f_val0, 1, is_random[0])
|
||||
pbar.update(task, advance=1)
|
||||
start += 1
|
||||
|
||||
evals = ceil((self.total_epochs - start) / jobs)
|
||||
for i in range(evals):
|
||||
# Correct the number of epochs to be processed for the last
|
||||
# iteration (should not exceed self.total_epochs in total)
|
||||
n_rest = (i + 1) * jobs - (self.total_epochs - start)
|
||||
current_jobs = jobs - n_rest if n_rest > 0 else jobs
|
||||
|
||||
asked, is_random = self.get_asked_points(n_points=current_jobs)
|
||||
f_val = self.run_optimizer_parallel(parallel, asked)
|
||||
self.opt.tell(asked, [v["loss"] for v in f_val])
|
||||
|
||||
for j, val in enumerate(f_val):
|
||||
# Use human-friendly indexes here (starting from 1)
|
||||
current = i * jobs + j + 1 + start
|
||||
|
||||
self.evaluate_result(val, current, is_random[j])
|
||||
pbar.update(task, advance=1)
|
||||
logging_mp_handle(log_queue)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("User interrupted..")
|
||||
|
||||
logger.info(
|
||||
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'."
|
||||
)
|
||||
|
||||
if self.current_best_epoch:
|
||||
HyperoptTools.try_export_params(
|
||||
self.config,
|
||||
self.hyperopter.get_strategy_name(),
|
||||
self.current_best_epoch,
|
||||
)
|
||||
|
||||
HyperoptTools.show_epoch_details(
|
||||
self.current_best_epoch, self.total_epochs, self.print_json
|
||||
)
|
||||
elif self.num_epochs_saved > 0:
|
||||
print(
|
||||
f"No good result found for given optimization function in {self.num_epochs_saved} "
|
||||
f"{plural(self.num_epochs_saved, 'epoch')}."
|
||||
)
|
||||
else:
|
||||
# This is printed when Ctrl+C is pressed quickly, before first epochs have
|
||||
# a chance to be evaluated.
|
||||
print("No epochs evaluated yet, no best result.")
|
|
@ -14,7 +14,7 @@ from freqtrade.exceptions import OperationalException
|
|||
with suppress(ImportError):
|
||||
from skopt.space import Dimension
|
||||
|
||||
from freqtrade.optimize.hyperopt_interface import EstimatorType, IHyperOpt
|
||||
from freqtrade.optimize.hyperopt.hyperopt_interface import EstimatorType, IHyperOpt
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
40
freqtrade/optimize/hyperopt/hyperopt_logger.py
Normal file
40
freqtrade/optimize/hyperopt/hyperopt_logger.py
Normal file
|
@ -0,0 +1,40 @@
|
|||
import logging
|
||||
from logging.handlers import QueueHandler
|
||||
from multiprocessing import Queue, current_process
|
||||
from queue import Empty
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def logging_mp_setup(log_queue: Queue, verbosity: int):
|
||||
"""
|
||||
Setup logging in a child process.
|
||||
Must be called in the child process before logging.
|
||||
log_queue MUST be passed to the child process via inheritance
|
||||
Which essentially means that the log_queue must be a global, created in the same
|
||||
file as Parallel is initialized.
|
||||
"""
|
||||
current_proc = current_process().name
|
||||
if current_proc != "MainProcess":
|
||||
h = QueueHandler(log_queue)
|
||||
root = logging.getLogger()
|
||||
root.setLevel(verbosity)
|
||||
root.addHandler(h)
|
||||
|
||||
|
||||
def logging_mp_handle(q: Queue):
|
||||
"""
|
||||
Handle logging from a child process.
|
||||
Must be called in the parent process to handle log messages from the child process.
|
||||
"""
|
||||
|
||||
try:
|
||||
while True:
|
||||
record = q.get(block=False)
|
||||
if record is None:
|
||||
break
|
||||
logger.handle(record)
|
||||
|
||||
except Empty:
|
||||
pass
|
|
@ -1,45 +1,33 @@
|
|||
# pragma pylint: disable=too-many-instance-attributes, pointless-string-statement
|
||||
|
||||
"""
|
||||
This module contains the hyperopt logic
|
||||
This module contains the hyperopt optimizer class, which needs to be pickled
|
||||
and will be sent to the hyperopt worker processes.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import random
|
||||
import sys
|
||||
import warnings
|
||||
from datetime import datetime, timezone
|
||||
from math import ceil
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import rapidjson
|
||||
from joblib import Parallel, cpu_count, delayed, dump, load, wrap_non_picklable_objects
|
||||
from joblib import dump, load
|
||||
from joblib.externals import cloudpickle
|
||||
from pandas import DataFrame
|
||||
from rich.console import Console
|
||||
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, FTHYPT_FILEVERSION, LAST_BT_RESULT_FN, Config
|
||||
from freqtrade.constants import DATETIME_PRINT_FORMAT, Config
|
||||
from freqtrade.data.converter import trim_dataframes
|
||||
from freqtrade.data.history import get_timerange
|
||||
from freqtrade.data.metrics import calculate_market_change
|
||||
from freqtrade.enums import HyperoptState
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.misc import deep_merge_dicts, file_dump_json, plural
|
||||
from freqtrade.misc import deep_merge_dicts
|
||||
from freqtrade.optimize.backtesting import Backtesting
|
||||
|
||||
# Import IHyperOpt and IHyperOptLoss to allow unpickling classes from these modules
|
||||
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
|
||||
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
|
||||
from freqtrade.optimize.hyperopt_output import HyperoptOutput
|
||||
from freqtrade.optimize.hyperopt_tools import (
|
||||
HyperoptStateContainer,
|
||||
HyperoptTools,
|
||||
hyperopt_serializer,
|
||||
)
|
||||
# Import IHyperOptLoss to allow unpickling classes from these modules
|
||||
from freqtrade.optimize.hyperopt.hyperopt_auto import HyperOptAuto
|
||||
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptStateContainer, HyperoptTools
|
||||
from freqtrade.optimize.optimize_reports import generate_strategy_stats
|
||||
from freqtrade.resolvers.hyperopt_resolver import HyperOptLossResolver
|
||||
from freqtrade.util import get_progress_tracker
|
||||
|
||||
|
||||
# Suppress scikit-learn FutureWarnings from skopt
|
||||
|
@ -51,22 +39,13 @@ with warnings.catch_warnings():
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
INITIAL_POINTS = 30
|
||||
|
||||
# Keep no more than SKOPT_MODEL_QUEUE_SIZE models
|
||||
# in the skopt model queue, to optimize memory consumption
|
||||
SKOPT_MODEL_QUEUE_SIZE = 10
|
||||
|
||||
MAX_LOSS = 100000 # just a big enough number to be bad result in loss optimization
|
||||
|
||||
|
||||
class Hyperopt:
|
||||
class HyperOptimizer:
|
||||
"""
|
||||
Hyperopt class, this class contains all the logic to run a hyperopt simulation
|
||||
|
||||
To start a hyperopt run:
|
||||
hyperopt = Hyperopt(config)
|
||||
hyperopt.start()
|
||||
HyperoptOptimizer class
|
||||
This class is sent to the hyperopt worker processes.
|
||||
"""
|
||||
|
||||
def __init__(self, config: Config) -> None:
|
||||
|
@ -79,8 +58,6 @@ class Hyperopt:
|
|||
self.max_open_trades_space: list[Dimension] = []
|
||||
self.dimensions: list[Dimension] = []
|
||||
|
||||
self._hyper_out: HyperoptOutput = HyperoptOutput(streaming=True)
|
||||
|
||||
self.config = config
|
||||
self.min_date: datetime
|
||||
self.max_date: datetime
|
||||
|
@ -89,7 +66,6 @@ class Hyperopt:
|
|||
self.pairlist = self.backtesting.pairlists.whitelist
|
||||
self.custom_hyperopt: HyperOptAuto
|
||||
self.analyze_per_epoch = self.config.get("analyze_per_epoch", False)
|
||||
HyperoptStateContainer.set_state(HyperoptState.STARTUP)
|
||||
|
||||
if not self.config.get("hyperopt"):
|
||||
self.custom_hyperopt = HyperOptAuto(self.config)
|
||||
|
@ -107,48 +83,35 @@ class Hyperopt:
|
|||
self.config
|
||||
)
|
||||
self.calculate_loss = self.custom_hyperoptloss.hyperopt_loss_function
|
||||
time_now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
strategy = str(self.config["strategy"])
|
||||
self.results_file: Path = (
|
||||
self.config["user_data_dir"]
|
||||
/ "hyperopt_results"
|
||||
/ f"strategy_{strategy}_{time_now}.fthypt"
|
||||
)
|
||||
|
||||
self.data_pickle_file = (
|
||||
self.config["user_data_dir"] / "hyperopt_results" / "hyperopt_tickerdata.pkl"
|
||||
)
|
||||
self.total_epochs = config.get("epochs", 0)
|
||||
|
||||
self.current_best_loss = 100
|
||||
|
||||
self.clean_hyperopt()
|
||||
|
||||
self.market_change = 0.0
|
||||
self.num_epochs_saved = 0
|
||||
self.current_best_epoch: dict[str, Any] | None = None
|
||||
|
||||
if HyperoptTools.has_space(self.config, "sell"):
|
||||
# Make sure use_exit_signal is enabled
|
||||
self.config["use_exit_signal"] = True
|
||||
|
||||
self.print_all = self.config.get("print_all", False)
|
||||
self.hyperopt_table_header = 0
|
||||
self.print_colorized = self.config.get("print_colorized", False)
|
||||
self.print_json = self.config.get("print_json", False)
|
||||
def prepare_hyperopt(self) -> None:
|
||||
# Initialize spaces ...
|
||||
self.init_spaces()
|
||||
|
||||
@staticmethod
|
||||
def get_lock_filename(config: Config) -> str:
|
||||
return str(config["user_data_dir"] / "hyperopt.lock")
|
||||
self.prepare_hyperopt_data()
|
||||
|
||||
def clean_hyperopt(self) -> None:
|
||||
"""
|
||||
Remove hyperopt pickle files to restart hyperopt.
|
||||
"""
|
||||
for f in [self.data_pickle_file, self.results_file]:
|
||||
p = Path(f)
|
||||
if p.is_file():
|
||||
logger.info(f"Removing `{p}`.")
|
||||
p.unlink()
|
||||
# We don't need exchange instance anymore while running hyperopt
|
||||
self.backtesting.exchange.close()
|
||||
self.backtesting.exchange._api = None
|
||||
self.backtesting.exchange._api_async = None
|
||||
self.backtesting.exchange.loop = None # type: ignore
|
||||
self.backtesting.exchange._loop_lock = None # type: ignore
|
||||
self.backtesting.exchange._cache_lock = None # type: ignore
|
||||
# self.backtesting.exchange = None # type: ignore
|
||||
self.backtesting.pairlists = None # type: ignore
|
||||
|
||||
def get_strategy_name(self) -> str:
|
||||
return self.backtesting.strategy.get_strategy_name()
|
||||
|
||||
def hyperopt_pickle_magic(self, bases) -> None:
|
||||
"""
|
||||
|
@ -173,32 +136,6 @@ class Hyperopt:
|
|||
# and the values are taken from the list of parameters.
|
||||
return {d.name: v for d, v in zip(dimensions, raw_params, strict=False)}
|
||||
|
||||
def _save_result(self, epoch: dict) -> None:
|
||||
"""
|
||||
Save hyperopt results to file
|
||||
Store one line per epoch.
|
||||
While not a valid json object - this allows appending easily.
|
||||
:param epoch: result dictionary for this epoch.
|
||||
"""
|
||||
epoch[FTHYPT_FILEVERSION] = 2
|
||||
with self.results_file.open("a") as f:
|
||||
rapidjson.dump(
|
||||
epoch,
|
||||
f,
|
||||
default=hyperopt_serializer,
|
||||
number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN,
|
||||
)
|
||||
f.write("\n")
|
||||
|
||||
self.num_epochs_saved += 1
|
||||
logger.debug(
|
||||
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'."
|
||||
)
|
||||
# Store hyperopt filename
|
||||
latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
|
||||
file_dump_json(latest_filename, {"latest_hyperopt": str(self.results_file.name)}, log=False)
|
||||
|
||||
def _get_params_details(self, params: dict) -> dict:
|
||||
"""
|
||||
Return the params for each space
|
||||
|
@ -251,21 +188,6 @@ class Hyperopt:
|
|||
result["max_open_trades"] = {"max_open_trades": strategy.max_open_trades}
|
||||
return result
|
||||
|
||||
def print_results(self, results: dict[str, Any]) -> None:
|
||||
"""
|
||||
Log results if it is better than any previous evaluation
|
||||
TODO: this should be moved to HyperoptTools too
|
||||
"""
|
||||
is_best = results["is_best"]
|
||||
|
||||
if self.print_all or is_best:
|
||||
self._hyper_out.add_data(
|
||||
self.config,
|
||||
[results],
|
||||
self.total_epochs,
|
||||
self.print_all,
|
||||
)
|
||||
|
||||
def init_spaces(self):
|
||||
"""
|
||||
Assign the dimensions in the hyperoptimization space.
|
||||
|
@ -452,7 +374,14 @@ class Hyperopt:
|
|||
"total_profit": total_profit,
|
||||
}
|
||||
|
||||
def get_optimizer(self, dimensions: list[Dimension], cpu_count) -> Optimizer:
|
||||
def get_optimizer(
|
||||
self,
|
||||
cpu_count: int,
|
||||
random_state: int,
|
||||
initial_points: int,
|
||||
model_queue_size: int,
|
||||
) -> Optimizer:
|
||||
dimensions = self.dimensions
|
||||
estimator = self.custom_hyperopt.generate_estimator(dimensions=dimensions)
|
||||
|
||||
acq_optimizer = "sampling"
|
||||
|
@ -467,21 +396,12 @@ class Hyperopt:
|
|||
dimensions,
|
||||
base_estimator=estimator,
|
||||
acq_optimizer=acq_optimizer,
|
||||
n_initial_points=INITIAL_POINTS,
|
||||
n_initial_points=initial_points,
|
||||
acq_optimizer_kwargs={"n_jobs": cpu_count},
|
||||
random_state=self.random_state,
|
||||
model_queue_size=SKOPT_MODEL_QUEUE_SIZE,
|
||||
random_state=random_state,
|
||||
model_queue_size=model_queue_size,
|
||||
)
|
||||
|
||||
def run_optimizer_parallel(self, parallel: Parallel, asked: list[list]) -> list[dict[str, Any]]:
|
||||
"""Start optimizer in a parallel way"""
|
||||
return parallel(
|
||||
delayed(wrap_non_picklable_objects(self.generate_optimizer))(v) for v in asked
|
||||
)
|
||||
|
||||
def _set_random_state(self, random_state: int | None) -> int:
|
||||
return random_state or random.randint(1, 2**16 - 1) # noqa: S311
|
||||
|
||||
def advise_and_trim(self, data: dict[str, DataFrame]) -> dict[str, DataFrame]:
|
||||
preprocessed = self.backtesting.strategy.advise_all_indicators(data)
|
||||
|
||||
|
@ -517,173 +437,3 @@ class Hyperopt:
|
|||
dump(preprocessed, self.data_pickle_file)
|
||||
else:
|
||||
dump(data, self.data_pickle_file)
|
||||
|
||||
def get_asked_points(self, n_points: int) -> tuple[list[list[Any]], list[bool]]:
|
||||
"""
|
||||
Enforce points returned from `self.opt.ask` have not been already evaluated
|
||||
|
||||
Steps:
|
||||
1. Try to get points using `self.opt.ask` first
|
||||
2. Discard the points that have already been evaluated
|
||||
3. Retry using `self.opt.ask` up to 3 times
|
||||
4. If still some points are missing in respect to `n_points`, random sample some points
|
||||
5. Repeat until at least `n_points` points in the `asked_non_tried` list
|
||||
6. Return a list with length truncated at `n_points`
|
||||
"""
|
||||
|
||||
def unique_list(a_list):
|
||||
new_list = []
|
||||
for item in a_list:
|
||||
if item not in new_list:
|
||||
new_list.append(item)
|
||||
return new_list
|
||||
|
||||
i = 0
|
||||
asked_non_tried: list[list[Any]] = []
|
||||
is_random_non_tried: list[bool] = []
|
||||
while i < 5 and len(asked_non_tried) < n_points:
|
||||
if i < 3:
|
||||
self.opt.cache_ = {}
|
||||
asked = unique_list(self.opt.ask(n_points=n_points * 5 if i > 0 else n_points))
|
||||
is_random = [False for _ in range(len(asked))]
|
||||
else:
|
||||
asked = unique_list(self.opt.space.rvs(n_samples=n_points * 5))
|
||||
is_random = [True for _ in range(len(asked))]
|
||||
is_random_non_tried += [
|
||||
rand
|
||||
for x, rand in zip(asked, is_random, strict=False)
|
||||
if x not in self.opt.Xi and x not in asked_non_tried
|
||||
]
|
||||
asked_non_tried += [
|
||||
x for x in asked if x not in self.opt.Xi and x not in asked_non_tried
|
||||
]
|
||||
i += 1
|
||||
|
||||
if asked_non_tried:
|
||||
return (
|
||||
asked_non_tried[: min(len(asked_non_tried), n_points)],
|
||||
is_random_non_tried[: min(len(asked_non_tried), n_points)],
|
||||
)
|
||||
else:
|
||||
return self.opt.ask(n_points=n_points), [False for _ in range(n_points)]
|
||||
|
||||
def evaluate_result(self, val: dict[str, Any], current: int, is_random: bool):
|
||||
"""
|
||||
Evaluate results returned from generate_optimizer
|
||||
"""
|
||||
val["current_epoch"] = current
|
||||
val["is_initial_point"] = current <= INITIAL_POINTS
|
||||
|
||||
logger.debug("Optimizer epoch evaluated: %s", val)
|
||||
|
||||
is_best = HyperoptTools.is_best_loss(val, self.current_best_loss)
|
||||
# This value is assigned here and not in the optimization method
|
||||
# to keep proper order in the list of results. That's because
|
||||
# evaluations can take different time. Here they are aligned in the
|
||||
# order they will be shown to the user.
|
||||
val["is_best"] = is_best
|
||||
val["is_random"] = is_random
|
||||
self.print_results(val)
|
||||
|
||||
if is_best:
|
||||
self.current_best_loss = val["loss"]
|
||||
self.current_best_epoch = val
|
||||
|
||||
self._save_result(val)
|
||||
|
||||
def start(self) -> None:
|
||||
self.random_state = self._set_random_state(self.config.get("hyperopt_random_state"))
|
||||
logger.info(f"Using optimizer random state: {self.random_state}")
|
||||
self.hyperopt_table_header = -1
|
||||
# Initialize spaces ...
|
||||
self.init_spaces()
|
||||
|
||||
self.prepare_hyperopt_data()
|
||||
|
||||
# We don't need exchange instance anymore while running hyperopt
|
||||
self.backtesting.exchange.close()
|
||||
self.backtesting.exchange._api = None
|
||||
self.backtesting.exchange._api_async = None
|
||||
self.backtesting.exchange.loop = None # type: ignore
|
||||
self.backtesting.exchange._loop_lock = None # type: ignore
|
||||
self.backtesting.exchange._cache_lock = None # type: ignore
|
||||
# self.backtesting.exchange = None # type: ignore
|
||||
self.backtesting.pairlists = None # type: ignore
|
||||
|
||||
cpus = cpu_count()
|
||||
logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
|
||||
config_jobs = self.config.get("hyperopt_jobs", -1)
|
||||
logger.info(f"Number of parallel jobs set as: {config_jobs}")
|
||||
|
||||
self.opt = self.get_optimizer(self.dimensions, config_jobs)
|
||||
|
||||
try:
|
||||
with Parallel(n_jobs=config_jobs) as parallel:
|
||||
jobs = parallel._effective_n_jobs()
|
||||
logger.info(f"Effective number of parallel workers used: {jobs}")
|
||||
console = Console(
|
||||
color_system="auto" if self.print_colorized else None,
|
||||
)
|
||||
|
||||
# Define progressbar
|
||||
with get_progress_tracker(
|
||||
console=console,
|
||||
cust_callables=[self._hyper_out],
|
||||
) as pbar:
|
||||
task = pbar.add_task("Epochs", total=self.total_epochs)
|
||||
|
||||
start = 0
|
||||
|
||||
if self.analyze_per_epoch:
|
||||
# First analysis not in parallel mode when using --analyze-per-epoch.
|
||||
# This allows dataprovider to load it's informative cache.
|
||||
asked, is_random = self.get_asked_points(n_points=1)
|
||||
f_val0 = self.generate_optimizer(asked[0])
|
||||
self.opt.tell(asked, [f_val0["loss"]])
|
||||
self.evaluate_result(f_val0, 1, is_random[0])
|
||||
pbar.update(task, advance=1)
|
||||
start += 1
|
||||
|
||||
evals = ceil((self.total_epochs - start) / jobs)
|
||||
for i in range(evals):
|
||||
# Correct the number of epochs to be processed for the last
|
||||
# iteration (should not exceed self.total_epochs in total)
|
||||
n_rest = (i + 1) * jobs - (self.total_epochs - start)
|
||||
current_jobs = jobs - n_rest if n_rest > 0 else jobs
|
||||
|
||||
asked, is_random = self.get_asked_points(n_points=current_jobs)
|
||||
f_val = self.run_optimizer_parallel(parallel, asked)
|
||||
self.opt.tell(asked, [v["loss"] for v in f_val])
|
||||
|
||||
for j, val in enumerate(f_val):
|
||||
# Use human-friendly indexes here (starting from 1)
|
||||
current = i * jobs + j + 1 + start
|
||||
|
||||
self.evaluate_result(val, current, is_random[j])
|
||||
pbar.update(task, advance=1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("User interrupted..")
|
||||
|
||||
logger.info(
|
||||
f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
|
||||
f"saved to '{self.results_file}'."
|
||||
)
|
||||
|
||||
if self.current_best_epoch:
|
||||
HyperoptTools.try_export_params(
|
||||
self.config, self.backtesting.strategy.get_strategy_name(), self.current_best_epoch
|
||||
)
|
||||
|
||||
HyperoptTools.show_epoch_details(
|
||||
self.current_best_epoch, self.total_epochs, self.print_json
|
||||
)
|
||||
elif self.num_epochs_saved > 0:
|
||||
print(
|
||||
f"No good result found for given optimization function in {self.num_epochs_saved} "
|
||||
f"{plural(self.num_epochs_saved, 'epoch')}."
|
||||
)
|
||||
else:
|
||||
# This is printed when Ctrl+C is pressed quickly, before first epochs have
|
||||
# a chance to be evaluated.
|
||||
print("No epochs evaluated yet, no best result.")
|
|
@ -9,7 +9,7 @@ from pathlib import Path
|
|||
|
||||
from freqtrade.constants import HYPEROPT_LOSS_BUILTIN, USERPATH_HYPEROPTS, Config
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.optimize.hyperopt_loss_interface import IHyperOptLoss
|
||||
from freqtrade.optimize.hyperopt_loss.hyperopt_loss_interface import IHyperOptLoss
|
||||
from freqtrade.resolvers import IResolver
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ from freqtrade.data.history import load_data
|
|||
from freqtrade.enums import ExitType, RunMode
|
||||
from freqtrade.exceptions import OperationalException
|
||||
from freqtrade.optimize.hyperopt import Hyperopt
|
||||
from freqtrade.optimize.hyperopt_auto import HyperOptAuto
|
||||
from freqtrade.optimize.hyperopt.hyperopt_auto import HyperOptAuto
|
||||
from freqtrade.optimize.hyperopt_tools import HyperoptTools
|
||||
from freqtrade.optimize.optimize_reports import generate_strategy_stats
|
||||
from freqtrade.optimize.space import SKDecimal
|
||||
|
@ -222,7 +222,7 @@ def test_start_no_data(mocker, hyperopt_conf, tmp_path) -> None:
|
|||
patched_configuration_load_config_file(mocker, hyperopt_conf)
|
||||
mocker.patch("freqtrade.data.history.load_pair_history", MagicMock(return_value=pd.DataFrame))
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -315,12 +315,17 @@ def test_roi_table_generation(hyperopt) -> None:
|
|||
"roi_p3": 3,
|
||||
}
|
||||
|
||||
assert hyperopt.custom_hyperopt.generate_roi_table(params) == {0: 6, 15: 3, 25: 1, 30: 0}
|
||||
assert hyperopt.hyperopter.custom_hyperopt.generate_roi_table(params) == {
|
||||
0: 6,
|
||||
15: 3,
|
||||
25: 1,
|
||||
30: 0,
|
||||
}
|
||||
|
||||
|
||||
def test_params_no_optimize_details(hyperopt) -> None:
|
||||
hyperopt.config["spaces"] = ["buy"]
|
||||
res = hyperopt._get_no_optimize_details()
|
||||
hyperopt.hyperopter.config["spaces"] = ["buy"]
|
||||
res = hyperopt.hyperopter._get_no_optimize_details()
|
||||
assert isinstance(res, dict)
|
||||
assert "trailing" in res
|
||||
assert res["trailing"]["trailing_stop"] is False
|
||||
|
@ -333,21 +338,23 @@ def test_params_no_optimize_details(hyperopt) -> None:
|
|||
|
||||
|
||||
def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
# Dummy-reduce points to ensure scikit-learn is forced to generate new values
|
||||
mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.INITIAL_POINTS", 2)
|
||||
|
||||
parallel = mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.Hyperopt.run_optimizer_parallel",
|
||||
|
@ -367,8 +374,8 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
|||
del hyperopt_conf["timeframe"]
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -379,10 +386,12 @@ def test_start_calls_optimizer(mocker, hyperopt_conf, capsys) -> None:
|
|||
# Should be called for historical candle data
|
||||
assert dumper.call_count == 1
|
||||
assert dumper2.call_count == 1
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_entry")
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
assert hasattr(hyperopt.backtesting, "_position_stacking")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_entry")
|
||||
assert (
|
||||
hyperopt.hyperopter.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
)
|
||||
assert hasattr(hyperopt.hyperopter.backtesting, "_position_stacking")
|
||||
|
||||
|
||||
def test_hyperopt_format_results(hyperopt):
|
||||
|
@ -461,7 +470,7 @@ def test_hyperopt_format_results(hyperopt):
|
|||
|
||||
def test_populate_indicators(hyperopt, testdatadir) -> None:
|
||||
data = load_data(testdatadir, "1m", ["UNITTEST/BTC"], fill_up_missing=True)
|
||||
dataframes = hyperopt.backtesting.strategy.advise_all_indicators(data)
|
||||
dataframes = hyperopt.hyperopter.backtesting.strategy.advise_all_indicators(data)
|
||||
dataframe = dataframes["UNITTEST/BTC"]
|
||||
|
||||
# Check if some indicators are generated. We will not test all of them
|
||||
|
@ -521,15 +530,20 @@ def test_generate_optimizer(mocker, hyperopt_conf) -> None:
|
|||
"final_balance": 1000,
|
||||
}
|
||||
|
||||
mocker.patch("freqtrade.optimize.hyperopt.Backtesting.backtest", return_value=backtest_result)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.Backtesting.backtest",
|
||||
return_value=backtest_result,
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
return_value=(dt_utc(2017, 12, 10), dt_utc(2017, 12, 13)),
|
||||
)
|
||||
patch_exchange(mocker)
|
||||
mocker.patch.object(Path, "open")
|
||||
mocker.patch("freqtrade.configuration.config_validation.validate_config_schema")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.load", return_value={"XRP/BTC": None})
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.load", return_value={"XRP/BTC": None}
|
||||
)
|
||||
|
||||
optimizer_param = {
|
||||
"buy_plusdi": 0.02,
|
||||
|
@ -589,10 +603,12 @@ def test_generate_optimizer(mocker, hyperopt_conf) -> None:
|
|||
}
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.min_date = dt_utc(2017, 12, 10)
|
||||
hyperopt.max_date = dt_utc(2017, 12, 13)
|
||||
hyperopt.init_spaces()
|
||||
generate_optimizer_value = hyperopt.generate_optimizer(list(optimizer_param.values()))
|
||||
hyperopt.hyperopter.min_date = dt_utc(2017, 12, 10)
|
||||
hyperopt.hyperopter.max_date = dt_utc(2017, 12, 13)
|
||||
hyperopt.hyperopter.init_spaces()
|
||||
generate_optimizer_value = hyperopt.hyperopter.generate_optimizer(
|
||||
list(optimizer_param.values())
|
||||
)
|
||||
assert generate_optimizer_value == response_expected
|
||||
|
||||
|
||||
|
@ -603,8 +619,8 @@ def test_clean_hyperopt(mocker, hyperopt_conf, caplog):
|
|||
"freqtrade.strategy.hyper.HyperStrategyMixin.load_params_from_file",
|
||||
MagicMock(return_value={}),
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.Path.is_file", MagicMock(return_value=True))
|
||||
unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.Path.unlink", MagicMock())
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.Path.is_file", MagicMock(return_value=True))
|
||||
unlinkmock = mocker.patch("freqtrade.optimize.hyperopt.hyperopt.Path.unlink", MagicMock())
|
||||
h = Hyperopt(hyperopt_conf)
|
||||
|
||||
assert unlinkmock.call_count == 2
|
||||
|
@ -612,17 +628,19 @@ def test_clean_hyperopt(mocker, hyperopt_conf, caplog):
|
|||
|
||||
|
||||
def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -658,8 +676,8 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
|
|||
)
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -677,16 +695,18 @@ def test_print_json_spaces_all(mocker, hyperopt_conf, capsys) -> None:
|
|||
|
||||
|
||||
def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -714,8 +734,8 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
|||
hyperopt_conf.update({"print_json": True})
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -732,16 +752,18 @@ def test_print_json_spaces_default(mocker, hyperopt_conf, capsys) -> None:
|
|||
|
||||
|
||||
def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -770,8 +792,8 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
|||
)
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -785,16 +807,18 @@ def test_print_json_spaces_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
|||
|
||||
|
||||
def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -816,8 +840,8 @@ def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> Non
|
|||
hyperopt_conf.update({"spaces": "roi stoploss"})
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -828,21 +852,23 @@ def test_simplified_interface_roi_stoploss(mocker, hyperopt_conf, capsys) -> Non
|
|||
assert dumper.call_count == 1
|
||||
assert dumper2.call_count == 1
|
||||
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_entry")
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
assert hasattr(hyperopt.backtesting, "_position_stacking")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_entry")
|
||||
assert (
|
||||
hyperopt.hyperopter.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
)
|
||||
assert hasattr(hyperopt.hyperopter.backtesting, "_position_stacking")
|
||||
|
||||
|
||||
def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None:
|
||||
mocker.patch("freqtrade.optimize.hyperopt.dump", MagicMock())
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump", MagicMock())
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -855,34 +881,37 @@ def test_simplified_interface_all_failed(mocker, hyperopt_conf, caplog) -> None:
|
|||
)
|
||||
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space", return_value=[]
|
||||
"freqtrade.optimize.hyperopt.hyperopt_auto.HyperOptAuto._generate_indicator_space",
|
||||
return_value=[],
|
||||
)
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
with pytest.raises(OperationalException, match=r"The 'protection' space is included into *"):
|
||||
hyperopt.init_spaces()
|
||||
hyperopt.hyperopter.init_spaces()
|
||||
|
||||
hyperopt.config["hyperopt_ignore_missing_space"] = True
|
||||
caplog.clear()
|
||||
hyperopt.init_spaces()
|
||||
hyperopt.hyperopter.init_spaces()
|
||||
assert log_has_re(r"The 'protection' space is included into *", caplog)
|
||||
assert hyperopt.protection_space == []
|
||||
assert hyperopt.hyperopter.protection_space == []
|
||||
|
||||
|
||||
def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -904,8 +933,8 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
|||
hyperopt_conf.update({"spaces": "buy"})
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -916,23 +945,27 @@ def test_simplified_interface_buy(mocker, hyperopt_conf, capsys) -> None:
|
|||
assert dumper.called
|
||||
assert dumper.call_count == 1
|
||||
assert dumper2.call_count == 1
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_entry")
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
assert hasattr(hyperopt.backtesting, "_position_stacking")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_entry")
|
||||
assert (
|
||||
hyperopt.hyperopter.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
)
|
||||
assert hasattr(hyperopt.hyperopter.backtesting, "_position_stacking")
|
||||
|
||||
|
||||
def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.dump")
|
||||
dumper = mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump")
|
||||
dumper2 = mocker.patch("freqtrade.optimize.hyperopt.Hyperopt._save_result")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.calculate_market_change", return_value=1.5)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.calculate_market_change", return_value=1.5
|
||||
)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
|
||||
|
@ -958,8 +991,8 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
|||
)
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -970,10 +1003,12 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
|||
assert dumper.called
|
||||
assert dumper.call_count == 1
|
||||
assert dumper2.call_count == 1
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.backtesting.strategy, "advise_entry")
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
assert hasattr(hyperopt.backtesting, "_position_stacking")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_exit")
|
||||
assert hasattr(hyperopt.hyperopter.backtesting.strategy, "advise_entry")
|
||||
assert (
|
||||
hyperopt.hyperopter.backtesting.strategy.max_open_trades == hyperopt_conf["max_open_trades"]
|
||||
)
|
||||
assert hasattr(hyperopt.hyperopter.backtesting, "_position_stacking")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -985,18 +1020,19 @@ def test_simplified_interface_sell(mocker, hyperopt_conf, capsys) -> None:
|
|||
],
|
||||
)
|
||||
def test_simplified_interface_failed(mocker, hyperopt_conf, space) -> None:
|
||||
mocker.patch("freqtrade.optimize.hyperopt.dump", MagicMock())
|
||||
mocker.patch("freqtrade.optimize.hyperopt.file_dump_json")
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt_optimizer.dump", MagicMock())
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.file_dump_json")
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.backtesting.Backtesting.load_bt_data",
|
||||
MagicMock(return_value=(MagicMock(), None)),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.get_timerange",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.get_timerange",
|
||||
MagicMock(return_value=(datetime(2017, 12, 10), datetime(2017, 12, 13))),
|
||||
)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt_auto.HyperOptAuto._generate_indicator_space", return_value=[]
|
||||
"freqtrade.optimize.hyperopt.hyperopt_auto.HyperOptAuto._generate_indicator_space",
|
||||
return_value=[],
|
||||
)
|
||||
|
||||
patch_exchange(mocker)
|
||||
|
@ -1004,8 +1040,8 @@ def test_simplified_interface_failed(mocker, hyperopt_conf, space) -> None:
|
|||
hyperopt_conf.update({"spaces": space})
|
||||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
hyperopt.hyperopter.backtesting.strategy.advise_all_indicators = MagicMock()
|
||||
hyperopt.hyperopter.custom_hyperopt.generate_roi_table = MagicMock(return_value={})
|
||||
|
||||
with pytest.raises(OperationalException, match=f"The '{space}' space is included into *"):
|
||||
hyperopt.start()
|
||||
|
@ -1015,7 +1051,7 @@ def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmp_path, fee) -> None
|
|||
patch_exchange(mocker)
|
||||
mocker.patch(f"{EXMS}.get_fee", fee)
|
||||
# Dummy-reduce points to ensure scikit-learn is forced to generate new values
|
||||
mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.INITIAL_POINTS", 2)
|
||||
(tmp_path / "hyperopt_results").mkdir(parents=True)
|
||||
# No hyperopt needed
|
||||
hyperopt_conf.update(
|
||||
|
@ -1027,32 +1063,33 @@ def test_in_strategy_auto_hyperopt(mocker, hyperopt_conf, tmp_path, fee) -> None
|
|||
}
|
||||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0)
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert hyperopt.backtesting.strategy.bot_started is True
|
||||
assert hyperopt.backtesting.strategy.bot_loop_started is False
|
||||
opt = hyperopt.hyperopter
|
||||
opt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0)
|
||||
assert isinstance(opt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(opt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert opt.backtesting.strategy.bot_started is True
|
||||
assert opt.backtesting.strategy.bot_loop_started is False
|
||||
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert hyperopt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert hyperopt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == 1
|
||||
buy_rsi_range = hyperopt.backtesting.strategy.buy_rsi.range
|
||||
assert opt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert opt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert opt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert opt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
assert opt.backtesting.strategy.max_open_trades == 1
|
||||
buy_rsi_range = opt.backtesting.strategy.buy_rsi.range
|
||||
assert isinstance(buy_rsi_range, range)
|
||||
# Range from 0 - 50 (inclusive)
|
||||
assert len(list(buy_rsi_range)) == 51
|
||||
|
||||
hyperopt.start()
|
||||
# All values should've changed.
|
||||
assert hyperopt.backtesting.strategy.protection_cooldown_lookback.value != 30
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.value != 35
|
||||
assert hyperopt.backtesting.strategy.sell_rsi.value != 74
|
||||
assert hyperopt.backtesting.strategy.max_open_trades != 1
|
||||
assert opt.backtesting.strategy.protection_cooldown_lookback.value != 30
|
||||
assert opt.backtesting.strategy.buy_rsi.value != 35
|
||||
assert opt.backtesting.strategy.sell_rsi.value != 74
|
||||
assert opt.backtesting.strategy.max_open_trades != 1
|
||||
|
||||
hyperopt.custom_hyperopt.generate_estimator = lambda *args, **kwargs: "ET1"
|
||||
opt.custom_hyperopt.generate_estimator = lambda *args, **kwargs: "ET1"
|
||||
with pytest.raises(OperationalException, match="Estimator ET1 not supported."):
|
||||
hyperopt.get_optimizer([], 2)
|
||||
opt.get_optimizer(2, 42, 2, 2)
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
|
||||
|
@ -1063,7 +1100,7 @@ def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmp_path
|
|||
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=get_markets()))
|
||||
(tmp_path / "hyperopt_results").mkdir(parents=True)
|
||||
# Dummy-reduce points to ensure scikit-learn is forced to generate new values
|
||||
mocker.patch("freqtrade.optimize.hyperopt.INITIAL_POINTS", 2)
|
||||
mocker.patch("freqtrade.optimize.hyperopt.hyperopt.INITIAL_POINTS", 2)
|
||||
# No hyperopt needed
|
||||
hyperopt_conf.update(
|
||||
{
|
||||
|
@ -1078,21 +1115,22 @@ def test_in_strategy_auto_hyperopt_with_parallel(mocker, hyperopt_conf, tmp_path
|
|||
}
|
||||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.exchange.get_max_leverage = lambda *x, **xx: 1.0
|
||||
hyperopt.backtesting.exchange.get_min_pair_stake_amount = lambda *x, **xx: 0.00001
|
||||
hyperopt.backtesting.exchange.get_max_pair_stake_amount = lambda *x, **xx: 100.0
|
||||
hyperopt.backtesting.exchange._markets = get_markets()
|
||||
opt = hyperopt.hyperopter
|
||||
opt.backtesting.exchange.get_max_leverage = lambda *x, **xx: 1.0
|
||||
opt.backtesting.exchange.get_min_pair_stake_amount = lambda *x, **xx: 0.00001
|
||||
opt.backtesting.exchange.get_max_pair_stake_amount = lambda *x, **xx: 100.0
|
||||
opt.backtesting.exchange._markets = get_markets()
|
||||
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert hyperopt.backtesting.strategy.bot_started is True
|
||||
assert hyperopt.backtesting.strategy.bot_loop_started is False
|
||||
assert isinstance(opt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(opt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert opt.backtesting.strategy.bot_started is True
|
||||
assert opt.backtesting.strategy.bot_loop_started is False
|
||||
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert hyperopt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert hyperopt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
buy_rsi_range = hyperopt.backtesting.strategy.buy_rsi.range
|
||||
assert opt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert opt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert opt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert opt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
buy_rsi_range = opt.backtesting.strategy.buy_rsi.range
|
||||
assert isinstance(buy_rsi_range, range)
|
||||
# Range from 0 - 50 (inclusive)
|
||||
assert len(list(buy_rsi_range)) == 51
|
||||
|
@ -1116,7 +1154,7 @@ def test_in_strategy_auto_hyperopt_per_epoch(mocker, hyperopt_conf, tmp_path, fe
|
|||
}
|
||||
)
|
||||
go = mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.Hyperopt.generate_optimizer",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.HyperOptimizer.generate_optimizer",
|
||||
return_value={
|
||||
"loss": 0.05,
|
||||
"results_explanation": "foo result",
|
||||
|
@ -1125,17 +1163,18 @@ def test_in_strategy_auto_hyperopt_per_epoch(mocker, hyperopt_conf, tmp_path, fe
|
|||
},
|
||||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
hyperopt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0)
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert hyperopt.backtesting.strategy.bot_loop_started is False
|
||||
assert hyperopt.backtesting.strategy.bot_started is True
|
||||
opt = hyperopt.hyperopter
|
||||
opt.backtesting.exchange.get_max_leverage = MagicMock(return_value=1.0)
|
||||
assert isinstance(opt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(opt.backtesting.strategy.buy_rsi, IntParameter)
|
||||
assert opt.backtesting.strategy.bot_loop_started is False
|
||||
assert opt.backtesting.strategy.bot_started is True
|
||||
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert hyperopt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert hyperopt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert hyperopt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
buy_rsi_range = hyperopt.backtesting.strategy.buy_rsi.range
|
||||
assert opt.backtesting.strategy.buy_rsi.in_space is True
|
||||
assert opt.backtesting.strategy.buy_rsi.value == 35
|
||||
assert opt.backtesting.strategy.sell_rsi.value == 74
|
||||
assert opt.backtesting.strategy.protection_cooldown_lookback.value == 30
|
||||
buy_rsi_range = opt.backtesting.strategy.buy_rsi.range
|
||||
assert isinstance(buy_rsi_range, range)
|
||||
# Range from 0 - 50 (inclusive)
|
||||
assert len(list(buy_rsi_range)) == 51
|
||||
|
@ -1179,17 +1218,17 @@ def test_stake_amount_unlimited_max_open_trades(mocker, hyperopt_conf, tmp_path,
|
|||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.Hyperopt._get_params_dict",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.HyperOptimizer._get_params_dict",
|
||||
return_value={"max_open_trades": -1},
|
||||
)
|
||||
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.hyperopter.custom_hyperopt, HyperOptAuto)
|
||||
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == 1
|
||||
assert hyperopt.hyperopter.backtesting.strategy.max_open_trades == 1
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == 1
|
||||
assert hyperopt.hyperopter.backtesting.strategy.max_open_trades == 1
|
||||
|
||||
|
||||
def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> None:
|
||||
|
@ -1208,11 +1247,11 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N
|
|||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.Hyperopt._get_params_dict",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.HyperOptimizer._get_params_dict",
|
||||
return_value={"max_open_trades": -1},
|
||||
)
|
||||
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.hyperopter.custom_hyperopt, HyperOptAuto)
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -1227,11 +1266,11 @@ def test_max_open_trades_dump(mocker, hyperopt_conf, tmp_path, fee, capsys) -> N
|
|||
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
mocker.patch(
|
||||
"freqtrade.optimize.hyperopt.Hyperopt._get_params_dict",
|
||||
"freqtrade.optimize.hyperopt.hyperopt_optimizer.HyperOptimizer._get_params_dict",
|
||||
return_value={"max_open_trades": -1},
|
||||
)
|
||||
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.hyperopter.custom_hyperopt, HyperOptAuto)
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
|
@ -1262,9 +1301,9 @@ def test_max_open_trades_consistency(mocker, hyperopt_conf, tmp_path, fee) -> No
|
|||
)
|
||||
hyperopt = Hyperopt(hyperopt_conf)
|
||||
|
||||
assert isinstance(hyperopt.custom_hyperopt, HyperOptAuto)
|
||||
assert isinstance(hyperopt.hyperopter.custom_hyperopt, HyperOptAuto)
|
||||
|
||||
hyperopt.custom_hyperopt.max_open_trades_space = lambda: [
|
||||
hyperopt.hyperopter.custom_hyperopt.max_open_trades_space = lambda: [
|
||||
Integer(1, 10, name="max_open_trades")
|
||||
]
|
||||
|
||||
|
@ -1282,11 +1321,13 @@ def test_max_open_trades_consistency(mocker, hyperopt_conf, tmp_path, fee) -> No
|
|||
|
||||
return wrapper
|
||||
|
||||
hyperopt.backtesting.wallets._calculate_unlimited_stake_amount = stake_amount_interceptor(
|
||||
hyperopt.backtesting.wallets._calculate_unlimited_stake_amount
|
||||
hyperopt.hyperopter.backtesting.wallets._calculate_unlimited_stake_amount = (
|
||||
stake_amount_interceptor(
|
||||
hyperopt.hyperopter.backtesting.wallets._calculate_unlimited_stake_amount
|
||||
)
|
||||
)
|
||||
|
||||
hyperopt.start()
|
||||
|
||||
assert hyperopt.backtesting.strategy.max_open_trades == 8
|
||||
assert hyperopt.hyperopter.backtesting.strategy.max_open_trades == 8
|
||||
assert hyperopt.config["max_open_trades"] == 8
|
||||
|
|
Loading…
Reference in New Issue
Block a user