2022-05-31 09:58:21 +00:00
|
|
|
import collections
|
2022-11-24 18:04:35 +00:00
|
|
|
import importlib
|
2022-05-23 19:05:05 +00:00
|
|
|
import logging
|
2022-05-31 09:58:21 +00:00
|
|
|
import re
|
|
|
|
import shutil
|
2022-06-06 23:07:30 +00:00
|
|
|
import threading
|
2022-11-20 01:27:58 +00:00
|
|
|
from datetime import datetime, timedelta, timezone
|
2022-05-23 19:05:05 +00:00
|
|
|
from pathlib import Path
|
2022-08-09 13:30:25 +00:00
|
|
|
from typing import Any, Dict, Tuple, TypedDict
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
import numpy as np
|
2022-07-01 12:00:30 +00:00
|
|
|
import pandas as pd
|
2022-10-09 18:22:42 +00:00
|
|
|
import psutil
|
2022-08-09 13:30:25 +00:00
|
|
|
import rapidjson
|
2022-07-26 08:51:39 +00:00
|
|
|
from joblib import dump, load
|
2022-07-22 15:37:51 +00:00
|
|
|
from joblib.externals import cloudpickle
|
2022-08-13 18:07:31 +00:00
|
|
|
from numpy.typing import NDArray
|
2022-05-30 19:35:48 +00:00
|
|
|
from pandas import DataFrame
|
2022-07-26 08:51:39 +00:00
|
|
|
|
|
|
|
from freqtrade.configuration import TimeRange
|
2022-09-18 11:20:36 +00:00
|
|
|
from freqtrade.constants import Config
|
2022-07-26 08:51:39 +00:00
|
|
|
from freqtrade.data.history import load_pair_history
|
2023-06-11 09:58:18 +00:00
|
|
|
from freqtrade.enums import CandleType
|
2022-07-26 08:51:39 +00:00
|
|
|
from freqtrade.exceptions import OperationalException
|
2022-07-26 08:24:14 +00:00
|
|
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
|
|
|
from freqtrade.strategy.interface import IStrategy
|
2022-07-26 08:51:39 +00:00
|
|
|
|
2023-03-06 15:56:07 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2023-06-08 10:19:42 +00:00
|
|
|
FEATURE_PIPELINE = "feature_pipeline"
|
|
|
|
LABEL_PIPELINE = "label_pipeline"
|
|
|
|
TRAINDF = "trained_df"
|
|
|
|
METADATA = "metadata"
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-08-09 13:30:25 +00:00
|
|
|
class pair_info(TypedDict):
|
|
|
|
model_filename: str
|
|
|
|
trained_timestamp: int
|
|
|
|
data_path: str
|
2022-08-09 14:03:10 +00:00
|
|
|
extras: dict
|
2022-08-09 13:30:25 +00:00
|
|
|
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
class FreqaiDataDrawer:
|
|
|
|
"""
|
|
|
|
Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving
|
|
|
|
/loading to/from disk.
|
2022-08-14 18:24:29 +00:00
|
|
|
This object remains persistent throughout live/dry.
|
2022-07-23 11:04:06 +00:00
|
|
|
|
|
|
|
Record of contribution:
|
|
|
|
FreqAI was developed by a group of individuals who all contributed specific skillsets to the
|
|
|
|
project.
|
|
|
|
|
|
|
|
Conception and software development:
|
|
|
|
Robert Caulk @robcaulk
|
|
|
|
|
|
|
|
Theoretical brainstorming:
|
2022-08-02 18:14:02 +00:00
|
|
|
Elin Törnquist @th0rntwig
|
2022-07-23 11:04:06 +00:00
|
|
|
|
|
|
|
Code review, software architecture brainstorming:
|
|
|
|
@xmatthias
|
|
|
|
|
|
|
|
Beta testing and bug reporting:
|
|
|
|
@bloodhunter4rc, Salah Lamkadem @ikonx, @ken11o2, @longyu, @paranoidandy, @smidelis, @smarm
|
2022-08-14 18:24:29 +00:00
|
|
|
Juha Nykänen @suikula, Wagner Costa @wagnercosta, Johan Vlugt @Jooopieeert
|
2022-05-23 19:05:05 +00:00
|
|
|
"""
|
2022-07-03 08:59:38 +00:00
|
|
|
|
2023-02-02 10:40:23 +00:00
|
|
|
def __init__(self, full_path: Path, config: Config):
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-05-31 12:35:04 +00:00
|
|
|
self.config = config
|
2022-07-03 08:59:38 +00:00
|
|
|
self.freqai_info = config.get("freqai", {})
|
2022-05-23 19:05:05 +00:00
|
|
|
# dictionary holding all pair metadata necessary to load in from disk
|
2022-08-09 13:30:25 +00:00
|
|
|
self.pair_dict: Dict[str, pair_info] = {}
|
2022-05-23 19:05:05 +00:00
|
|
|
# dictionary holding all actively inferenced models in memory given a model filename
|
|
|
|
self.model_dictionary: Dict[str, Any] = {}
|
2022-10-15 11:50:55 +00:00
|
|
|
# all additional metadata that we want to keep in ram
|
|
|
|
self.meta_data_dictionary: Dict[str, Dict[str, Any]] = {}
|
2022-08-09 13:30:25 +00:00
|
|
|
self.model_return_values: Dict[str, DataFrame] = {}
|
|
|
|
self.historic_data: Dict[str, Dict[str, DataFrame]] = {}
|
|
|
|
self.historic_predictions: Dict[str, DataFrame] = {}
|
2022-05-23 19:05:05 +00:00
|
|
|
self.full_path = full_path
|
2022-07-22 15:37:51 +00:00
|
|
|
self.historic_predictions_path = Path(self.full_path / "historic_predictions.pkl")
|
2022-09-04 13:56:07 +00:00
|
|
|
self.historic_predictions_bkp_path = Path(
|
|
|
|
self.full_path / "historic_predictions.backup.pkl")
|
2022-07-22 15:37:51 +00:00
|
|
|
self.pair_dictionary_path = Path(self.full_path / "pair_dictionary.json")
|
2022-11-19 17:15:58 +00:00
|
|
|
self.global_metadata_path = Path(self.full_path / "global_metadata.json")
|
2022-10-09 18:22:42 +00:00
|
|
|
self.metric_tracker_path = Path(self.full_path / "metric_tracker.json")
|
2022-05-23 19:05:05 +00:00
|
|
|
self.load_drawer_from_disk()
|
2022-07-11 20:01:48 +00:00
|
|
|
self.load_historic_predictions_from_disk()
|
2022-11-11 16:24:09 +00:00
|
|
|
self.metric_tracker: Dict[str, Dict[str, Dict[str, list]]] = {}
|
2022-10-09 18:22:42 +00:00
|
|
|
self.load_metric_tracker_from_disk()
|
2022-05-24 13:28:38 +00:00
|
|
|
self.training_queue: Dict[str, int] = {}
|
2022-06-06 23:07:30 +00:00
|
|
|
self.history_lock = threading.Lock()
|
2022-08-14 18:24:29 +00:00
|
|
|
self.save_lock = threading.Lock()
|
2022-08-14 14:41:50 +00:00
|
|
|
self.pair_dict_lock = threading.Lock()
|
2022-10-09 18:22:42 +00:00
|
|
|
self.metric_tracker_lock = threading.Lock()
|
2022-08-04 15:41:58 +00:00
|
|
|
self.old_DBSCAN_eps: Dict[str, float] = {}
|
2022-08-09 13:30:25 +00:00
|
|
|
self.empty_pair_dict: pair_info = {
|
2023-03-06 15:56:07 +00:00
|
|
|
"model_filename": "", "trained_timestamp": 0,
|
|
|
|
"data_path": "", "extras": {}}
|
2022-11-28 13:02:17 +00:00
|
|
|
self.model_type = self.freqai_info.get('model_save_type', 'joblib')
|
2022-10-09 18:22:42 +00:00
|
|
|
|
|
|
|
def update_metric_tracker(self, metric: str, value: float, pair: str) -> None:
|
|
|
|
"""
|
|
|
|
General utility for adding and updating custom metrics. Typically used
|
|
|
|
for adding training performance, train timings, inferenc timings, cpu loads etc.
|
|
|
|
"""
|
|
|
|
with self.metric_tracker_lock:
|
|
|
|
if pair not in self.metric_tracker:
|
|
|
|
self.metric_tracker[pair] = {}
|
|
|
|
if metric not in self.metric_tracker[pair]:
|
2022-10-15 11:23:01 +00:00
|
|
|
self.metric_tracker[pair][metric] = {'timestamp': [], 'value': []}
|
2022-10-09 18:22:42 +00:00
|
|
|
|
2022-10-15 11:23:01 +00:00
|
|
|
timestamp = int(datetime.now(timezone.utc).timestamp())
|
|
|
|
self.metric_tracker[pair][metric]['value'].append(value)
|
|
|
|
self.metric_tracker[pair][metric]['timestamp'].append(timestamp)
|
2022-10-09 18:22:42 +00:00
|
|
|
|
|
|
|
def collect_metrics(self, time_spent: float, pair: str):
|
|
|
|
"""
|
|
|
|
Add metrics to the metric tracker dictionary
|
|
|
|
"""
|
|
|
|
load1, load5, load15 = psutil.getloadavg()
|
|
|
|
cpus = psutil.cpu_count()
|
|
|
|
self.update_metric_tracker('train_time', time_spent, pair)
|
|
|
|
self.update_metric_tracker('cpu_load1min', load1 / cpus, pair)
|
|
|
|
self.update_metric_tracker('cpu_load5min', load5 / cpus, pair)
|
|
|
|
self.update_metric_tracker('cpu_load15min', load15 / cpus, pair)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-11-19 17:15:58 +00:00
|
|
|
def load_global_metadata_from_disk(self):
|
|
|
|
"""
|
|
|
|
Locate and load a previously saved global metadata in present model folder.
|
|
|
|
"""
|
|
|
|
exists = self.global_metadata_path.is_file()
|
|
|
|
if exists:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.global_metadata_path.open("r") as fp:
|
2022-11-19 17:15:58 +00:00
|
|
|
metatada_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
|
|
|
return metatada_dict
|
|
|
|
return {}
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
def load_drawer_from_disk(self):
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Locate and load a previously saved data drawer full of all pair model metadata in
|
|
|
|
present model folder.
|
2022-10-09 18:22:42 +00:00
|
|
|
Load any existing metric tracker that may be present.
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-07-23 11:35:44 +00:00
|
|
|
exists = self.pair_dictionary_path.is_file()
|
2022-05-23 19:05:05 +00:00
|
|
|
if exists:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.pair_dictionary_path.open("r") as fp:
|
2022-10-15 11:23:01 +00:00
|
|
|
self.pair_dict = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
2022-05-30 19:35:48 +00:00
|
|
|
else:
|
2023-02-02 10:40:23 +00:00
|
|
|
logger.info("Could not find existing datadrawer, starting from scratch")
|
2022-05-30 19:35:48 +00:00
|
|
|
|
2022-10-09 18:22:42 +00:00
|
|
|
def load_metric_tracker_from_disk(self):
|
|
|
|
"""
|
|
|
|
Tries to load an existing metrics dictionary if the user
|
|
|
|
wants to collect metrics.
|
|
|
|
"""
|
|
|
|
if self.freqai_info.get('write_metrics_to_disk', False):
|
|
|
|
exists = self.metric_tracker_path.is_file()
|
|
|
|
if exists:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.metric_tracker_path.open("r") as fp:
|
2022-10-15 11:23:01 +00:00
|
|
|
self.metric_tracker = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
2022-11-11 16:24:09 +00:00
|
|
|
logger.info("Loading existing metric tracker from disk.")
|
2022-10-09 18:22:42 +00:00
|
|
|
else:
|
|
|
|
logger.info("Could not find existing metric tracker, starting from scratch")
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-07-11 20:01:48 +00:00
|
|
|
def load_historic_predictions_from_disk(self):
|
|
|
|
"""
|
|
|
|
Locate and load a previously saved historic predictions.
|
2022-07-27 05:27:24 +00:00
|
|
|
:return: bool - whether or not the drawer was located
|
2022-07-11 20:01:48 +00:00
|
|
|
"""
|
2022-07-23 11:35:44 +00:00
|
|
|
exists = self.historic_predictions_path.is_file()
|
2022-07-11 20:01:48 +00:00
|
|
|
if exists:
|
2022-09-04 13:56:07 +00:00
|
|
|
try:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.historic_predictions_path.open("rb") as fp:
|
2022-09-04 13:56:07 +00:00
|
|
|
self.historic_predictions = cloudpickle.load(fp)
|
|
|
|
logger.info(
|
|
|
|
f"Found existing historic predictions at {self.full_path}, but beware "
|
|
|
|
"that statistics may be inaccurate if the bot has been offline for "
|
|
|
|
"an extended period of time."
|
|
|
|
)
|
|
|
|
except EOFError:
|
|
|
|
logger.warning(
|
|
|
|
'Historical prediction file was corrupted. Trying to load backup file.')
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.historic_predictions_bkp_path.open("rb") as fp:
|
2022-09-04 13:56:07 +00:00
|
|
|
self.historic_predictions = cloudpickle.load(fp)
|
|
|
|
logger.warning('FreqAI successfully loaded the backup historical predictions file.')
|
|
|
|
|
2022-07-11 20:01:48 +00:00
|
|
|
else:
|
2023-02-02 10:40:23 +00:00
|
|
|
logger.info("Could not find existing historic_predictions, starting from scratch")
|
2022-07-11 20:01:48 +00:00
|
|
|
|
|
|
|
return exists
|
|
|
|
|
2022-07-16 19:16:59 +00:00
|
|
|
def save_historic_predictions_to_disk(self):
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-10-09 18:22:42 +00:00
|
|
|
Save historic predictions pickle to disk
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.historic_predictions_path.open("wb") as fp:
|
2022-07-22 15:37:51 +00:00
|
|
|
cloudpickle.dump(self.historic_predictions, fp, protocol=cloudpickle.DEFAULT_PROTOCOL)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-09-04 13:56:07 +00:00
|
|
|
# create a backup
|
|
|
|
shutil.copy(self.historic_predictions_path, self.historic_predictions_bkp_path)
|
|
|
|
|
2022-10-09 18:22:42 +00:00
|
|
|
def save_metric_tracker_to_disk(self):
|
|
|
|
"""
|
|
|
|
Save metric tracker of all pair metrics collected.
|
|
|
|
"""
|
|
|
|
with self.save_lock:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.metric_tracker_path.open('w') as fp:
|
2022-10-09 18:22:42 +00:00
|
|
|
rapidjson.dump(self.metric_tracker, fp, default=self.np_encoder,
|
|
|
|
number_mode=rapidjson.NM_NATIVE)
|
|
|
|
|
2023-02-25 16:17:05 +00:00
|
|
|
def save_drawer_to_disk(self) -> None:
|
2022-07-11 20:01:48 +00:00
|
|
|
"""
|
|
|
|
Save data drawer full of all pair model metadata in present model folder.
|
|
|
|
"""
|
2022-08-14 18:24:29 +00:00
|
|
|
with self.save_lock:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.pair_dictionary_path.open('w') as fp:
|
2022-08-14 18:24:29 +00:00
|
|
|
rapidjson.dump(self.pair_dict, fp, default=self.np_encoder,
|
|
|
|
number_mode=rapidjson.NM_NATIVE)
|
2022-07-11 20:01:48 +00:00
|
|
|
|
2022-11-19 17:15:58 +00:00
|
|
|
def save_global_metadata_to_disk(self, metadata: Dict[str, Any]):
|
|
|
|
"""
|
|
|
|
Save global metadata json to disk
|
|
|
|
"""
|
|
|
|
with self.save_lock:
|
2023-02-25 16:17:05 +00:00
|
|
|
with self.global_metadata_path.open('w') as fp:
|
2022-11-19 17:15:58 +00:00
|
|
|
rapidjson.dump(metadata, fp, default=self.np_encoder,
|
|
|
|
number_mode=rapidjson.NM_NATIVE)
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
def np_encoder(self, object):
|
|
|
|
if isinstance(object, np.generic):
|
|
|
|
return object.item()
|
|
|
|
|
2023-02-22 21:01:41 +00:00
|
|
|
def get_pair_dict_info(self, pair: str) -> Tuple[str, int]:
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
|
|
|
Locate and load existing model metadata from persistent storage. If not located,
|
|
|
|
create a new one and append the current pair to it and prepare it for its first
|
|
|
|
training
|
2022-07-24 14:51:48 +00:00
|
|
|
:param pair: str: pair to lookup
|
|
|
|
:return:
|
|
|
|
model_filename: str = unique filename used for loading persistent objects from disk
|
|
|
|
trained_timestamp: int = the last time the coin was trained
|
2022-06-03 13:19:46 +00:00
|
|
|
"""
|
2022-08-09 13:30:25 +00:00
|
|
|
|
2022-07-26 13:58:40 +00:00
|
|
|
pair_dict = self.pair_dict.get(pair)
|
2022-05-30 23:48:48 +00:00
|
|
|
|
2022-07-26 13:58:40 +00:00
|
|
|
if pair_dict:
|
|
|
|
model_filename = pair_dict["model_filename"]
|
|
|
|
trained_timestamp = pair_dict["trained_timestamp"]
|
2023-02-02 10:40:23 +00:00
|
|
|
else:
|
2022-08-09 13:30:25 +00:00
|
|
|
self.pair_dict[pair] = self.empty_pair_dict.copy()
|
|
|
|
model_filename = ""
|
|
|
|
trained_timestamp = 0
|
2022-05-30 23:48:48 +00:00
|
|
|
|
2023-02-22 21:01:41 +00:00
|
|
|
return model_filename, trained_timestamp
|
2022-05-24 10:58:53 +00:00
|
|
|
|
|
|
|
def set_pair_dict_info(self, metadata: dict) -> None:
|
2022-07-03 08:59:38 +00:00
|
|
|
pair_in_dict = self.pair_dict.get(metadata["pair"])
|
2022-05-24 10:58:53 +00:00
|
|
|
if pair_in_dict:
|
|
|
|
return
|
|
|
|
else:
|
2022-08-09 13:30:25 +00:00
|
|
|
self.pair_dict[metadata["pair"]] = self.empty_pair_dict.copy()
|
2022-05-24 10:58:53 +00:00
|
|
|
return
|
2022-05-24 13:28:38 +00:00
|
|
|
|
2023-09-12 10:19:12 +00:00
|
|
|
def set_initial_return_values(self, pair: str,
|
|
|
|
pred_df: DataFrame,
|
|
|
|
dataframe: DataFrame
|
|
|
|
) -> None:
|
2022-07-02 16:09:38 +00:00
|
|
|
"""
|
2022-08-12 14:12:28 +00:00
|
|
|
Set the initial return values to the historical predictions dataframe. This avoids needing
|
|
|
|
to repredict on historical candles, and also stores historical predictions despite
|
|
|
|
retrainings (so stored predictions are true predictions, not just inferencing on trained
|
2023-09-12 10:19:12 +00:00
|
|
|
data).
|
|
|
|
|
|
|
|
We also aim to keep the date from historical predictions so that the FreqUI displays
|
|
|
|
zeros during any downtime (between FreqAI reloads).
|
2022-07-02 16:09:38 +00:00
|
|
|
"""
|
2022-07-09 08:13:33 +00:00
|
|
|
|
2023-09-12 10:19:12 +00:00
|
|
|
new_pred = pred_df.copy()
|
|
|
|
# set new_pred values to nans (we want to signal to user that there was nothing
|
|
|
|
# historically made during downtime. The newest pred will get appeneded later in
|
|
|
|
# append_model_predictions)
|
|
|
|
new_pred.iloc[:, :] = np.nan
|
|
|
|
new_pred["date"] = dataframe["date"]
|
|
|
|
|
|
|
|
hist_preds = self.historic_predictions[pair].copy()
|
|
|
|
# rename date_pred column to date so that we can merge on date
|
|
|
|
hist_preds = hist_preds.rename(columns={"date_pred": "date"})
|
|
|
|
|
|
|
|
# find the closest common date between new_pred and historic predictions
|
|
|
|
# and cut off the new_pred dataframe at that date
|
|
|
|
common_dates = pd.merge(new_pred, hist_preds, on="date", how="inner")
|
|
|
|
if len(common_dates.index) > 0:
|
|
|
|
new_pred = new_pred.iloc[len(common_dates):]
|
2022-08-05 16:27:05 +00:00
|
|
|
else:
|
2023-09-12 10:19:12 +00:00
|
|
|
logger.error("No common dates found between new predictions and historic predictions. "
|
|
|
|
"You likely left your FreqAI instance offline for more than "
|
|
|
|
f"{len(dataframe.index)} candles.")
|
|
|
|
|
|
|
|
df_concat = pd.concat([hist_preds, new_pred], ignore_index=True, keys=hist_preds.keys())
|
|
|
|
|
|
|
|
# remove last row because we will append that later in append_model_predictions()
|
|
|
|
df_concat = df_concat.iloc[:-1]
|
|
|
|
# any missing values will get zeroed out so users can see the exact
|
|
|
|
# downtime in FreqUI
|
2022-08-12 11:13:08 +00:00
|
|
|
df_concat = df_concat.fillna(0)
|
2023-09-12 10:19:12 +00:00
|
|
|
|
|
|
|
# rename date column back to date_pred
|
|
|
|
df_concat = df_concat.rename(columns={"date": "date_pred"})
|
|
|
|
|
|
|
|
self.historic_predictions[pair] = df_concat
|
|
|
|
|
|
|
|
self.model_return_values[pair] = df_concat.tail(len(dataframe.index)).reset_index(drop=True)
|
2022-07-31 15:51:19 +00:00
|
|
|
|
2022-07-29 06:12:50 +00:00
|
|
|
def append_model_predictions(self, pair: str, predictions: DataFrame,
|
|
|
|
do_preds: NDArray[np.int_],
|
2022-10-02 16:33:39 +00:00
|
|
|
dk: FreqaiDataKitchen, strat_df: DataFrame) -> None:
|
2022-08-12 14:12:28 +00:00
|
|
|
"""
|
|
|
|
Append model predictions to historic predictions dataframe, then set the
|
|
|
|
strategy return dataframe to the tail of the historic predictions. The length of
|
|
|
|
the tail is equivalent to the length of the dataframe that entered FreqAI from
|
|
|
|
the strategy originally. Doing this allows FreqUI to always display the correct
|
|
|
|
historic predictions.
|
|
|
|
"""
|
|
|
|
|
2022-10-02 16:33:39 +00:00
|
|
|
len_df = len(strat_df)
|
2022-08-14 14:41:50 +00:00
|
|
|
index = self.historic_predictions[pair].index[-1:]
|
|
|
|
columns = self.historic_predictions[pair].columns
|
|
|
|
|
|
|
|
nan_df = pd.DataFrame(np.nan, index=index, columns=columns)
|
|
|
|
self.historic_predictions[pair] = pd.concat(
|
|
|
|
[self.historic_predictions[pair], nan_df], ignore_index=True, axis=0)
|
|
|
|
df = self.historic_predictions[pair]
|
2022-05-30 11:55:46 +00:00
|
|
|
|
2022-08-12 14:12:28 +00:00
|
|
|
# model outputs and associated statistics
|
2022-08-06 11:51:19 +00:00
|
|
|
for label in predictions.columns:
|
2022-07-02 16:09:38 +00:00
|
|
|
df[label].iloc[-1] = predictions[label].iloc[-1]
|
2022-07-09 08:13:33 +00:00
|
|
|
if df[label].dtype == object:
|
|
|
|
continue
|
2022-07-03 08:59:38 +00:00
|
|
|
df[f"{label}_mean"].iloc[-1] = dk.data["labels_mean"][label]
|
|
|
|
df[f"{label}_std"].iloc[-1] = dk.data["labels_std"][label]
|
2022-07-29 06:12:50 +00:00
|
|
|
|
2022-08-12 14:12:28 +00:00
|
|
|
# outlier indicators
|
2022-07-03 08:59:38 +00:00
|
|
|
df["do_predict"].iloc[-1] = do_preds[-1]
|
2022-07-29 06:12:50 +00:00
|
|
|
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
2022-07-03 08:59:38 +00:00
|
|
|
df["DI_values"].iloc[-1] = dk.DI_values[-1]
|
2022-05-30 19:35:48 +00:00
|
|
|
|
2022-08-12 14:12:28 +00:00
|
|
|
# extra values the user added within custom prediction model
|
2022-08-02 18:14:02 +00:00
|
|
|
if dk.data['extra_returns_per_train']:
|
|
|
|
rets = dk.data['extra_returns_per_train']
|
|
|
|
for return_str in rets:
|
|
|
|
df[return_str].iloc[-1] = rets[return_str]
|
|
|
|
|
2022-10-03 15:28:45 +00:00
|
|
|
# this logic carries users between version without needing to
|
2022-10-03 07:55:57 +00:00
|
|
|
# change their identifier
|
|
|
|
if 'close_price' not in df.columns:
|
2022-10-03 09:58:22 +00:00
|
|
|
df['close_price'] = np.nan
|
|
|
|
df['date_pred'] = np.nan
|
2022-10-03 07:55:57 +00:00
|
|
|
|
2022-10-02 16:33:39 +00:00
|
|
|
df['close_price'].iloc[-1] = strat_df['close'].iloc[-1]
|
|
|
|
df['date_pred'].iloc[-1] = strat_df['date'].iloc[-1]
|
|
|
|
|
2022-08-12 11:13:08 +00:00
|
|
|
self.model_return_values[pair] = df.tail(len_df).reset_index(drop=True)
|
2022-07-11 20:01:48 +00:00
|
|
|
|
2022-07-24 14:51:48 +00:00
|
|
|
def attach_return_values_to_return_dataframe(
|
|
|
|
self, pair: str, dataframe: DataFrame) -> DataFrame:
|
2022-07-02 16:09:38 +00:00
|
|
|
"""
|
|
|
|
Attach the return values to the strat dataframe
|
2022-07-24 14:51:48 +00:00
|
|
|
:param dataframe: DataFrame = strategy dataframe
|
|
|
|
:return: DataFrame = strat dataframe with return values attached
|
2022-07-02 16:09:38 +00:00
|
|
|
"""
|
|
|
|
df = self.model_return_values[pair]
|
2022-07-03 08:59:38 +00:00
|
|
|
to_keep = [col for col in dataframe.columns if not col.startswith("&")]
|
2022-07-02 16:09:38 +00:00
|
|
|
dataframe = pd.concat([dataframe[to_keep], df], axis=1)
|
|
|
|
return dataframe
|
|
|
|
|
2022-07-28 05:07:40 +00:00
|
|
|
def return_null_values_to_strategy(self, dataframe: DataFrame, dk: FreqaiDataKitchen) -> None:
|
2022-07-02 16:09:38 +00:00
|
|
|
"""
|
|
|
|
Build 0 filled dataframe to return to strategy
|
|
|
|
"""
|
2022-05-30 19:35:48 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.find_features(dataframe)
|
2022-09-25 09:18:10 +00:00
|
|
|
dk.find_labels(dataframe)
|
2022-07-02 16:09:38 +00:00
|
|
|
|
2022-08-10 13:16:50 +00:00
|
|
|
full_labels = dk.label_list + dk.unique_class_list
|
2022-08-06 11:51:19 +00:00
|
|
|
|
|
|
|
for label in full_labels:
|
2022-07-02 16:09:38 +00:00
|
|
|
dataframe[label] = 0
|
|
|
|
dataframe[f"{label}_mean"] = 0
|
|
|
|
dataframe[f"{label}_std"] = 0
|
|
|
|
|
2022-07-03 08:59:38 +00:00
|
|
|
dataframe["do_predict"] = 0
|
2022-07-01 12:00:30 +00:00
|
|
|
|
2022-07-29 06:12:50 +00:00
|
|
|
if self.freqai_info["feature_parameters"].get("DI_threshold", 0) > 0:
|
2022-08-02 18:14:02 +00:00
|
|
|
dataframe["DI_values"] = 0
|
|
|
|
|
|
|
|
if dk.data['extra_returns_per_train']:
|
|
|
|
rets = dk.data['extra_returns_per_train']
|
|
|
|
for return_str in rets:
|
|
|
|
dataframe[return_str] = 0
|
2022-05-30 19:35:48 +00:00
|
|
|
|
2022-07-02 16:09:38 +00:00
|
|
|
dk.return_dataframe = dataframe
|
|
|
|
|
2022-05-31 09:58:21 +00:00
|
|
|
def purge_old_models(self) -> None:
|
|
|
|
|
2023-02-22 21:27:56 +00:00
|
|
|
num_keep = self.freqai_info["purge_old_models"]
|
|
|
|
if not num_keep:
|
|
|
|
return
|
2023-08-14 07:11:19 +00:00
|
|
|
elif isinstance(num_keep, bool):
|
2023-02-22 21:27:56 +00:00
|
|
|
num_keep = 2
|
|
|
|
|
2022-05-31 09:58:21 +00:00
|
|
|
model_folders = [x for x in self.full_path.iterdir() if x.is_dir()]
|
|
|
|
|
2022-07-21 09:25:28 +00:00
|
|
|
pattern = re.compile(r"sub-train-(\w+)_(\d{10})")
|
2022-05-31 09:58:21 +00:00
|
|
|
|
|
|
|
delete_dict: Dict[str, Any] = {}
|
|
|
|
|
|
|
|
for dir in model_folders:
|
|
|
|
result = pattern.match(str(dir.name))
|
|
|
|
if result is None:
|
2022-09-14 00:40:13 +00:00
|
|
|
continue
|
2022-05-31 09:58:21 +00:00
|
|
|
coin = result.group(1)
|
|
|
|
timestamp = result.group(2)
|
|
|
|
|
|
|
|
if coin not in delete_dict:
|
|
|
|
delete_dict[coin] = {}
|
2022-07-03 08:59:38 +00:00
|
|
|
delete_dict[coin]["num_folders"] = 1
|
|
|
|
delete_dict[coin]["timestamps"] = {int(timestamp): dir}
|
2022-05-31 09:58:21 +00:00
|
|
|
else:
|
2022-07-03 08:59:38 +00:00
|
|
|
delete_dict[coin]["num_folders"] += 1
|
|
|
|
delete_dict[coin]["timestamps"][int(timestamp)] = dir
|
2022-05-31 09:58:21 +00:00
|
|
|
|
|
|
|
for coin in delete_dict:
|
2023-02-22 21:27:56 +00:00
|
|
|
if delete_dict[coin]["num_folders"] > num_keep:
|
2022-05-31 09:58:21 +00:00
|
|
|
sorted_dict = collections.OrderedDict(
|
2022-07-03 08:59:38 +00:00
|
|
|
sorted(delete_dict[coin]["timestamps"].items())
|
|
|
|
)
|
2023-02-22 21:27:56 +00:00
|
|
|
num_delete = len(sorted_dict) - num_keep
|
2022-05-31 09:58:21 +00:00
|
|
|
deleted = 0
|
|
|
|
for k, v in sorted_dict.items():
|
|
|
|
if deleted >= num_delete:
|
|
|
|
break
|
2022-07-03 08:59:38 +00:00
|
|
|
logger.info(f"Freqai purging old model file {v}")
|
2022-05-31 09:58:21 +00:00
|
|
|
shutil.rmtree(v)
|
|
|
|
deleted += 1
|
|
|
|
|
2022-09-25 18:22:19 +00:00
|
|
|
def save_metadata(self, dk: FreqaiDataKitchen) -> None:
|
2022-09-25 09:18:10 +00:00
|
|
|
"""
|
|
|
|
Saves only metadata for backtesting studies if user prefers
|
|
|
|
not to save model data. This saves tremendous amounts of space
|
|
|
|
for users generating huge studies.
|
|
|
|
This is only active when `save_backtest_models`: false (not default)
|
|
|
|
"""
|
|
|
|
if not dk.data_path.is_dir():
|
|
|
|
dk.data_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
save_path = Path(dk.data_path)
|
|
|
|
|
|
|
|
dk.data["data_path"] = str(dk.data_path)
|
|
|
|
dk.data["model_filename"] = str(dk.model_filename)
|
|
|
|
dk.data["training_features_list"] = list(dk.data_dictionary["train_features"].columns)
|
|
|
|
dk.data["label_list"] = dk.label_list
|
|
|
|
|
2023-06-08 10:19:42 +00:00
|
|
|
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
|
2022-09-25 09:18:10 +00:00
|
|
|
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
|
|
|
|
|
|
|
return
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
def save_data(self, model: Any, coin: str, dk: FreqaiDataKitchen) -> None:
|
|
|
|
"""
|
|
|
|
Saves all data associated with a model for a single sub-train time range
|
2022-10-10 12:15:30 +00:00
|
|
|
:param model: User trained model which can be reused for inferencing to generate
|
|
|
|
predictions
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
if not dk.data_path.is_dir():
|
|
|
|
dk.data_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
|
|
|
|
save_path = Path(dk.data_path)
|
|
|
|
|
|
|
|
# Save the trained model
|
2022-10-03 16:42:20 +00:00
|
|
|
if self.model_type == 'joblib':
|
2022-07-26 08:24:14 +00:00
|
|
|
dump(model, save_path / f"{dk.model_filename}_model.joblib")
|
2022-10-03 16:42:20 +00:00
|
|
|
elif self.model_type == 'keras':
|
2022-07-26 08:24:14 +00:00
|
|
|
model.save(save_path / f"{dk.model_filename}_model.h5")
|
2023-03-14 20:13:30 +00:00
|
|
|
elif self.model_type in ["stable_baselines3", "sb3_contrib", "pytorch"]:
|
2022-08-08 13:41:16 +00:00
|
|
|
model.save(save_path / f"{dk.model_filename}_model.zip")
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
dk.data["data_path"] = str(dk.data_path)
|
|
|
|
dk.data["model_filename"] = str(dk.model_filename)
|
2022-10-01 12:18:46 +00:00
|
|
|
dk.data["training_features_list"] = dk.training_features_list
|
2022-07-26 08:24:14 +00:00
|
|
|
dk.data["label_list"] = dk.label_list
|
|
|
|
# store the metadata
|
2023-06-08 10:19:42 +00:00
|
|
|
with (save_path / f"{dk.model_filename}_{METADATA}.json").open("w") as fp:
|
2022-08-09 13:30:25 +00:00
|
|
|
rapidjson.dump(dk.data, fp, default=self.np_encoder, number_mode=rapidjson.NM_NATIVE)
|
2022-07-26 08:24:14 +00:00
|
|
|
|
2023-05-26 16:40:14 +00:00
|
|
|
# save the pipelines to pickle files
|
2023-06-08 10:19:42 +00:00
|
|
|
with (save_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("wb") as fp:
|
2023-05-29 11:33:29 +00:00
|
|
|
cloudpickle.dump(dk.feature_pipeline, fp)
|
2023-05-26 16:40:14 +00:00
|
|
|
|
2023-06-08 10:19:42 +00:00
|
|
|
with (save_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("wb") as fp:
|
2023-05-26 16:40:14 +00:00
|
|
|
cloudpickle.dump(dk.label_pipeline, fp)
|
|
|
|
|
2023-06-10 10:07:03 +00:00
|
|
|
# save the train data to file for post processing if desired
|
2022-07-26 08:24:14 +00:00
|
|
|
dk.data_dictionary["train_features"].to_pickle(
|
2023-06-08 10:19:42 +00:00
|
|
|
save_path / f"{dk.model_filename}_{TRAINDF}.pkl"
|
2022-07-26 08:24:14 +00:00
|
|
|
)
|
|
|
|
|
2022-08-09 13:30:25 +00:00
|
|
|
dk.data_dictionary["train_dates"].to_pickle(
|
|
|
|
save_path / f"{dk.model_filename}_trained_dates_df.pkl"
|
|
|
|
)
|
|
|
|
|
2022-08-22 11:30:30 +00:00
|
|
|
self.model_dictionary[coin] = model
|
2022-07-26 08:24:14 +00:00
|
|
|
self.pair_dict[coin]["model_filename"] = dk.model_filename
|
|
|
|
self.pair_dict[coin]["data_path"] = str(dk.data_path)
|
2022-11-13 16:14:47 +00:00
|
|
|
|
2022-10-15 11:50:55 +00:00
|
|
|
if coin not in self.meta_data_dictionary:
|
|
|
|
self.meta_data_dictionary[coin] = {}
|
2023-06-08 10:19:42 +00:00
|
|
|
self.meta_data_dictionary[coin][METADATA] = dk.data
|
|
|
|
self.meta_data_dictionary[coin][FEATURE_PIPELINE] = dk.feature_pipeline
|
|
|
|
self.meta_data_dictionary[coin][LABEL_PIPELINE] = dk.label_pipeline
|
2022-07-26 08:24:14 +00:00
|
|
|
self.save_drawer_to_disk()
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2022-09-24 11:21:01 +00:00
|
|
|
def load_metadata(self, dk: FreqaiDataKitchen) -> None:
|
|
|
|
"""
|
|
|
|
Load only metadata into datakitchen to increase performance during
|
|
|
|
presaved backtesting (prediction file loading).
|
|
|
|
"""
|
2023-06-08 10:19:42 +00:00
|
|
|
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
|
2022-10-15 11:23:01 +00:00
|
|
|
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
2022-09-24 11:21:01 +00:00
|
|
|
dk.training_features_list = dk.data["training_features_list"]
|
|
|
|
dk.label_list = dk.data["label_list"]
|
|
|
|
|
2023-03-14 20:13:30 +00:00
|
|
|
def load_data(self, coin: str, dk: FreqaiDataKitchen) -> Any: # noqa: C901
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
|
|
|
loads all data required to make a prediction on a sub-train time range
|
|
|
|
:returns:
|
|
|
|
:model: User trained model which can be inferenced for new predictions
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not self.pair_dict[coin]["model_filename"]:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if dk.live:
|
|
|
|
dk.model_filename = self.pair_dict[coin]["model_filename"]
|
|
|
|
dk.data_path = Path(self.pair_dict[coin]["data_path"])
|
|
|
|
|
2022-10-15 11:50:55 +00:00
|
|
|
if coin in self.meta_data_dictionary:
|
2023-06-08 10:19:42 +00:00
|
|
|
dk.data = self.meta_data_dictionary[coin][METADATA]
|
|
|
|
dk.feature_pipeline = self.meta_data_dictionary[coin][FEATURE_PIPELINE]
|
|
|
|
dk.label_pipeline = self.meta_data_dictionary[coin][LABEL_PIPELINE]
|
2022-10-15 11:50:55 +00:00
|
|
|
else:
|
2023-06-08 10:19:42 +00:00
|
|
|
with (dk.data_path / f"{dk.model_filename}_{METADATA}.json").open("r") as fp:
|
2022-10-15 11:52:14 +00:00
|
|
|
dk.data = rapidjson.load(fp, number_mode=rapidjson.NM_NATIVE)
|
2022-07-26 08:24:14 +00:00
|
|
|
|
2023-06-08 10:19:42 +00:00
|
|
|
with (dk.data_path / f"{dk.model_filename}_{FEATURE_PIPELINE}.pkl").open("rb") as fp:
|
2023-05-29 11:33:29 +00:00
|
|
|
dk.feature_pipeline = cloudpickle.load(fp)
|
2023-06-08 10:19:42 +00:00
|
|
|
with (dk.data_path / f"{dk.model_filename}_{LABEL_PIPELINE}.pkl").open("rb") as fp:
|
2023-05-26 16:40:14 +00:00
|
|
|
dk.label_pipeline = cloudpickle.load(fp)
|
2022-10-15 11:50:55 +00:00
|
|
|
|
|
|
|
dk.training_features_list = dk.data["training_features_list"]
|
|
|
|
dk.label_list = dk.data["label_list"]
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
# try to access model in memory instead of loading object from disk to save time
|
2022-08-22 11:30:30 +00:00
|
|
|
if dk.live and coin in self.model_dictionary:
|
|
|
|
model = self.model_dictionary[coin]
|
2022-10-03 16:42:20 +00:00
|
|
|
elif self.model_type == 'joblib':
|
2022-07-29 06:12:50 +00:00
|
|
|
model = load(dk.data_path / f"{dk.model_filename}_model.joblib")
|
2022-12-01 08:03:51 +00:00
|
|
|
elif 'stable_baselines' in self.model_type or 'sb3_contrib' == self.model_type:
|
2022-11-24 18:04:35 +00:00
|
|
|
mod = importlib.import_module(
|
2022-12-01 07:08:42 +00:00
|
|
|
self.model_type, self.freqai_info['rl_config']['model_type'])
|
2022-08-20 14:35:29 +00:00
|
|
|
MODELCLASS = getattr(mod, self.freqai_info['rl_config']['model_type'])
|
|
|
|
model = MODELCLASS.load(dk.data_path / f"{dk.model_filename}_model")
|
2023-03-05 14:59:24 +00:00
|
|
|
elif self.model_type == 'pytorch':
|
|
|
|
import torch
|
2023-04-08 10:09:53 +00:00
|
|
|
zip = torch.load(dk.data_path / f"{dk.model_filename}_model.zip")
|
|
|
|
model = zip["pytrainer"]
|
|
|
|
model = model.load_from_checkpoint(zip)
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
if not model:
|
|
|
|
raise OperationalException(
|
|
|
|
f"Unable to load model, ensure model exists at " f"{dk.data_path} "
|
|
|
|
)
|
|
|
|
|
2022-08-24 10:54:02 +00:00
|
|
|
# load it into ram if it was loaded from disk
|
2022-11-13 16:14:47 +00:00
|
|
|
if coin not in self.model_dictionary:
|
2022-08-24 10:54:02 +00:00
|
|
|
self.model_dictionary[coin] = model
|
|
|
|
|
2022-07-26 08:24:14 +00:00
|
|
|
return model
|
|
|
|
|
|
|
|
def update_historic_data(self, strategy: IStrategy, dk: FreqaiDataKitchen) -> None:
|
|
|
|
"""
|
|
|
|
Append new candles to our stores historic data (in memory) so that
|
|
|
|
we do not need to load candle history from disk and we dont need to
|
|
|
|
pinging exchange multiple times for the same candle.
|
2022-10-10 12:15:30 +00:00
|
|
|
:param dataframe: DataFrame = strategy provided dataframe
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
2022-07-29 06:12:50 +00:00
|
|
|
feat_params = self.freqai_info["feature_parameters"]
|
2022-07-26 08:24:14 +00:00
|
|
|
with self.history_lock:
|
|
|
|
history_data = self.historic_data
|
|
|
|
|
|
|
|
for pair in dk.all_pairs:
|
|
|
|
for tf in feat_params.get("include_timeframes"):
|
2023-02-21 20:23:58 +00:00
|
|
|
hist_df = history_data[pair][tf]
|
2022-07-26 08:24:14 +00:00
|
|
|
# check if newest candle is already appended
|
|
|
|
df_dp = strategy.dp.get_pair_dataframe(pair, tf)
|
|
|
|
if len(df_dp.index) == 0:
|
|
|
|
continue
|
2023-02-21 20:23:58 +00:00
|
|
|
if str(hist_df.iloc[-1]["date"]) == str(
|
2023-03-06 15:56:07 +00:00
|
|
|
df_dp.iloc[-1:]["date"].iloc[-1]
|
2022-07-26 08:24:14 +00:00
|
|
|
):
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
index = (
|
2023-03-06 15:56:07 +00:00
|
|
|
df_dp.loc[
|
|
|
|
df_dp["date"] == hist_df.iloc[-1]["date"]
|
|
|
|
].index[0]
|
|
|
|
+ 1
|
2022-07-26 08:24:14 +00:00
|
|
|
)
|
|
|
|
except IndexError:
|
2023-02-21 20:23:58 +00:00
|
|
|
if hist_df.iloc[-1]['date'] < df_dp['date'].iloc[0]:
|
|
|
|
raise OperationalException("In memory historical data is older than "
|
|
|
|
f"oldest DataProvider candle for {pair} on "
|
|
|
|
f"timeframe {tf}")
|
2023-01-04 10:41:06 +00:00
|
|
|
else:
|
|
|
|
index = -1
|
2023-02-21 20:23:58 +00:00
|
|
|
logger.warning(
|
|
|
|
f"No common dates in historical data and dataprovider for {pair}. "
|
|
|
|
f"Appending latest dataprovider candle to historical data "
|
|
|
|
"but please be aware that there is likely a gap in the historical "
|
|
|
|
"data. \n"
|
|
|
|
f"Historical data ends at {hist_df.iloc[-1]['date']} "
|
|
|
|
f"while dataprovider starts at {df_dp['date'].iloc[0]} and"
|
|
|
|
f"ends at {df_dp['date'].iloc[0]}."
|
|
|
|
)
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
history_data[pair][tf] = pd.concat(
|
|
|
|
[
|
2023-02-21 20:23:58 +00:00
|
|
|
hist_df,
|
2022-08-14 14:41:50 +00:00
|
|
|
df_dp.iloc[index:],
|
2022-07-26 08:24:14 +00:00
|
|
|
],
|
|
|
|
ignore_index=True,
|
|
|
|
axis=0,
|
|
|
|
)
|
|
|
|
|
2022-11-02 18:32:22 +00:00
|
|
|
self.current_candle = history_data[dk.pair][self.config['timeframe']].iloc[-1]['date']
|
|
|
|
|
2022-07-26 08:24:14 +00:00
|
|
|
def load_all_pair_histories(self, timerange: TimeRange, dk: FreqaiDataKitchen) -> None:
|
|
|
|
"""
|
|
|
|
Load pair histories for all whitelist and corr_pairlist pairs.
|
|
|
|
Only called once upon startup of bot.
|
2022-10-10 12:15:30 +00:00
|
|
|
:param timerange: TimeRange = full timerange required to populate all indicators
|
|
|
|
for training according to user defined train_period_days
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
|
|
|
history_data = self.historic_data
|
|
|
|
|
|
|
|
for pair in dk.all_pairs:
|
|
|
|
if pair not in history_data:
|
|
|
|
history_data[pair] = {}
|
2022-07-29 06:12:50 +00:00
|
|
|
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
2022-07-26 08:24:14 +00:00
|
|
|
history_data[pair][tf] = load_pair_history(
|
|
|
|
datadir=self.config["datadir"],
|
|
|
|
timeframe=tf,
|
|
|
|
pair=pair,
|
|
|
|
timerange=timerange,
|
2023-07-09 12:29:48 +00:00
|
|
|
data_format=self.config.get("dataformat_ohlcv", "feather"),
|
2023-06-11 09:58:18 +00:00
|
|
|
candle_type=self.config.get("candle_type_def", CandleType.SPOT),
|
2022-07-26 08:24:14 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
def get_base_and_corr_dataframes(
|
2023-03-06 15:56:07 +00:00
|
|
|
self, timerange: TimeRange, pair: str, dk: FreqaiDataKitchen
|
2022-07-26 08:24:14 +00:00
|
|
|
) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
|
|
|
|
"""
|
|
|
|
Searches through our historic_data in memory and returns the dataframes relevant
|
|
|
|
to the present pair.
|
2022-10-10 12:15:30 +00:00
|
|
|
:param timerange: TimeRange = full timerange required to populate all indicators
|
|
|
|
for training according to user defined train_period_days
|
|
|
|
:param metadata: dict = strategy furnished pair metadata
|
2022-07-26 08:24:14 +00:00
|
|
|
"""
|
|
|
|
with self.history_lock:
|
|
|
|
corr_dataframes: Dict[Any, Any] = {}
|
|
|
|
base_dataframes: Dict[Any, Any] = {}
|
|
|
|
historic_data = self.historic_data
|
2022-07-29 06:12:50 +00:00
|
|
|
pairs = self.freqai_info["feature_parameters"].get(
|
2022-07-26 08:24:14 +00:00
|
|
|
"include_corr_pairlist", []
|
|
|
|
)
|
|
|
|
|
2022-07-29 06:12:50 +00:00
|
|
|
for tf in self.freqai_info["feature_parameters"].get("include_timeframes"):
|
2022-10-07 16:05:49 +00:00
|
|
|
base_dataframes[tf] = dk.slice_dataframe(
|
|
|
|
timerange, historic_data[pair][tf]).reset_index(drop=True)
|
2022-07-26 08:24:14 +00:00
|
|
|
if pairs:
|
|
|
|
for p in pairs:
|
|
|
|
if pair in p:
|
|
|
|
continue # dont repeat anything from whitelist
|
|
|
|
if p not in corr_dataframes:
|
|
|
|
corr_dataframes[p] = {}
|
|
|
|
corr_dataframes[p][tf] = dk.slice_dataframe(
|
|
|
|
timerange, historic_data[p][tf]
|
2022-10-07 16:05:49 +00:00
|
|
|
).reset_index(drop=True)
|
2022-07-26 08:24:14 +00:00
|
|
|
|
|
|
|
return corr_dataframes, base_dataframes
|
2022-11-20 01:27:58 +00:00
|
|
|
|
2022-11-22 16:09:09 +00:00
|
|
|
def get_timerange_from_live_historic_predictions(self) -> TimeRange:
|
2022-11-20 01:27:58 +00:00
|
|
|
"""
|
|
|
|
Returns timerange information based on historic predictions file
|
|
|
|
:return: timerange calculated from saved live data
|
|
|
|
"""
|
|
|
|
if not self.historic_predictions_path.is_file():
|
|
|
|
raise OperationalException(
|
|
|
|
'Historic predictions not found. Historic predictions data is required '
|
|
|
|
'to run backtest with the freqai-backtest-live-models option '
|
|
|
|
)
|
|
|
|
|
|
|
|
self.load_historic_predictions_from_disk()
|
|
|
|
|
|
|
|
all_pairs_end_dates = []
|
|
|
|
for pair in self.historic_predictions:
|
|
|
|
pair_historic_data = self.historic_predictions[pair]
|
|
|
|
all_pairs_end_dates.append(pair_historic_data.date_pred.max())
|
|
|
|
|
|
|
|
global_metadata = self.load_global_metadata_from_disk()
|
|
|
|
start_date = datetime.fromtimestamp(int(global_metadata["start_dry_live_date"]))
|
|
|
|
end_date = max(all_pairs_end_dates)
|
|
|
|
# add 1 day to string timerange to ensure BT module will load all dataframe data
|
|
|
|
end_date = end_date + timedelta(days=1)
|
|
|
|
backtesting_timerange = TimeRange(
|
|
|
|
'date', 'date', int(start_date.timestamp()), int(end_date.timestamp())
|
|
|
|
)
|
|
|
|
return backtesting_timerange
|