2022-05-19 19:15:58 +00:00
|
|
|
# import contextlib
|
2022-05-04 15:42:34 +00:00
|
|
|
import gc
|
2022-05-04 15:53:40 +00:00
|
|
|
import logging
|
2022-05-19 19:15:58 +00:00
|
|
|
# import sys
|
|
|
|
import threading
|
2022-05-04 15:53:40 +00:00
|
|
|
from abc import ABC, abstractmethod
|
2022-05-04 15:42:34 +00:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Dict, Tuple
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-06 14:20:52 +00:00
|
|
|
import numpy.typing as npt
|
2022-05-03 08:14:17 +00:00
|
|
|
import pandas as pd
|
|
|
|
from pandas import DataFrame
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-22 22:06:26 +00:00
|
|
|
from freqtrade.configuration import TimeRange
|
2022-05-06 14:20:52 +00:00
|
|
|
from freqtrade.enums import RunMode
|
2022-05-23 19:05:05 +00:00
|
|
|
from freqtrade.freqai.data_drawer import FreqaiDataDrawer
|
2022-05-06 10:54:49 +00:00
|
|
|
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
2022-05-09 13:25:00 +00:00
|
|
|
from freqtrade.strategy.interface import IStrategy
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
pd.options.mode.chained_assignment = None
|
2022-05-04 15:53:40 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-23 10:07:09 +00:00
|
|
|
# FIXME: suppress stdout for background training?
|
2022-05-19 19:15:58 +00:00
|
|
|
# class DummyFile(object):
|
|
|
|
# def write(self, x): pass
|
|
|
|
|
|
|
|
|
|
|
|
# @contextlib.contextmanager
|
|
|
|
# def nostdout():
|
|
|
|
# save_stdout = sys.stdout
|
|
|
|
# sys.stdout = DummyFile()
|
|
|
|
# yield
|
|
|
|
# sys.stdout = save_stdout
|
|
|
|
|
|
|
|
|
|
|
|
def threaded(fn):
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
threading.Thread(target=fn, args=args, kwargs=kwargs).start()
|
|
|
|
return wrapper
|
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
class IFreqaiModel(ABC):
|
|
|
|
"""
|
|
|
|
Class containing all tools for training and prediction in the strategy.
|
2022-05-04 15:42:34 +00:00
|
|
|
User models should inherit from this class as shown in
|
2022-05-03 08:14:17 +00:00
|
|
|
templates/ExamplePredictionModel.py where the user overrides
|
|
|
|
train(), predict(), fit(), and make_labels().
|
2022-05-03 08:28:13 +00:00
|
|
|
Author: Robert Caulk, rob.caulk@gmail.com
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, config: Dict[str, Any]) -> None:
|
|
|
|
|
|
|
|
self.config = config
|
2022-05-23 10:07:09 +00:00
|
|
|
self.assert_config(self.config)
|
2022-05-04 15:42:34 +00:00
|
|
|
self.freqai_info = config["freqai"]
|
|
|
|
self.data_split_parameters = config["freqai"]["data_split_parameters"]
|
|
|
|
self.model_training_parameters = config["freqai"]["model_training_parameters"]
|
|
|
|
self.feature_parameters = config["freqai"]["feature_parameters"]
|
2022-05-09 13:25:00 +00:00
|
|
|
# self.backtest_timerange = config["timerange"]
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
self.time_last_trained = None
|
|
|
|
self.current_time = None
|
|
|
|
self.model = None
|
|
|
|
self.predictions = None
|
2022-05-19 19:15:58 +00:00
|
|
|
self.training_on_separate_thread = False
|
|
|
|
self.retrain = False
|
2022-05-22 15:51:49 +00:00
|
|
|
self.first = True
|
2022-05-23 19:05:05 +00:00
|
|
|
# if self.freqai_info.get('live_trained_timerange'):
|
|
|
|
# self.new_trained_timerange = TimeRange.parse_timerange(
|
|
|
|
# self.freqai_info['live_trained_timerange'])
|
|
|
|
# else:
|
|
|
|
# self.new_trained_timerange = TimeRange()
|
|
|
|
|
|
|
|
self.set_full_path()
|
|
|
|
self.data_drawer = FreqaiDataDrawer(Path(self.full_path))
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-23 10:07:09 +00:00
|
|
|
def assert_config(self, config: Dict[str, Any]) -> None:
|
|
|
|
|
|
|
|
assert config.get('freqai'), "No Freqai parameters found in config file."
|
|
|
|
assert config.get('freqai', {}).get('data_split_parameters'), ("No Freqai"
|
|
|
|
"data_split_parameters"
|
|
|
|
"in config file.")
|
|
|
|
assert config.get('freqai', {}).get('model_training_parameters'), ("No Freqai"
|
|
|
|
"modeltrainingparameters"
|
|
|
|
"found in config file.")
|
|
|
|
assert config.get('freqai', {}).get('feature_parameters'), ("No Freqai"
|
|
|
|
"feature_parameters found in"
|
|
|
|
"config file.")
|
|
|
|
|
2022-05-09 13:25:00 +00:00
|
|
|
def start(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> DataFrame:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2022-05-23 19:05:05 +00:00
|
|
|
Entry point to the FreqaiModel from a specific pair, it will train a new model if
|
2022-05-15 14:25:08 +00:00
|
|
|
necessary before making the prediction.
|
2022-05-03 08:14:17 +00:00
|
|
|
The backtesting and training paradigm is a sliding training window
|
|
|
|
with a following backtest window. Both windows slide according to the
|
2022-05-04 15:42:34 +00:00
|
|
|
length of the backtest window. This function is not intended to be
|
|
|
|
overridden by children of IFreqaiModel, but technically, it can be
|
2022-05-03 08:14:17 +00:00
|
|
|
if the user wishes to make deeper changes to the sliding window
|
|
|
|
logic.
|
|
|
|
:params:
|
|
|
|
:dataframe: Full dataframe coming from strategy - it contains entire
|
2022-05-04 15:42:34 +00:00
|
|
|
backtesting timerange + additional historical data necessary to train
|
2022-05-03 08:14:17 +00:00
|
|
|
the model.
|
2022-05-15 14:25:08 +00:00
|
|
|
:metadata: pair metadata coming from strategy.
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
self.live = strategy.dp.runmode in (RunMode.DRY_RUN, RunMode.LIVE)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
# FreqaiDataKitchen is reinstantiated for each coin
|
|
|
|
self.dh = FreqaiDataKitchen(self.config, self.data_drawer, self.live, metadata["pair"])
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
if self.live:
|
2022-05-09 13:25:00 +00:00
|
|
|
# logger.info('testing live')
|
|
|
|
self.start_live(dataframe, metadata, strategy)
|
|
|
|
|
|
|
|
return (self.dh.full_predictions, self.dh.full_do_predict,
|
|
|
|
self.dh.full_target_mean, self.dh.full_target_std)
|
2022-05-06 14:20:52 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
logger.info(f'Training {len(self.dh.training_timeranges)} timeranges')
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-15 14:25:08 +00:00
|
|
|
# Loop enforcing the sliding window training/backtesting paradigm
|
2022-05-03 08:14:17 +00:00
|
|
|
# tr_train is the training time range e.g. 1 historical month
|
2022-05-04 15:42:34 +00:00
|
|
|
# tr_backtest is the backtesting time range e.g. the week directly
|
|
|
|
# following tr_train. Both of these windows slide through the
|
2022-05-03 08:14:17 +00:00
|
|
|
# entire backtest
|
2022-05-04 15:42:34 +00:00
|
|
|
for tr_train, tr_backtest in zip(
|
|
|
|
self.dh.training_timeranges, self.dh.backtesting_timeranges
|
|
|
|
):
|
2022-05-03 08:14:17 +00:00
|
|
|
gc.collect()
|
2022-05-04 15:42:34 +00:00
|
|
|
# self.config['timerange'] = tr_train
|
|
|
|
self.dh.data = {} # clean the pair specific data between models
|
2022-05-05 13:35:51 +00:00
|
|
|
self.training_timerange = tr_train
|
2022-05-03 08:14:17 +00:00
|
|
|
dataframe_train = self.dh.slice_dataframe(tr_train, dataframe)
|
|
|
|
dataframe_backtest = self.dh.slice_dataframe(tr_backtest, dataframe)
|
2022-05-23 19:05:05 +00:00
|
|
|
logger.info("training %s for %s", metadata["pair"], tr_train)
|
|
|
|
trained_timestamp = TimeRange.parse_timerange(tr_train)
|
|
|
|
self.dh.data_path = Path(self.dh.full_path /
|
|
|
|
str("sub-train" + "-" + metadata['pair'].split("/")[0] +
|
|
|
|
str(int(trained_timestamp.stopts))))
|
|
|
|
if not self.model_exists(metadata["pair"], trained_timestamp=trained_timestamp.stopts):
|
2022-05-03 08:14:17 +00:00
|
|
|
self.model = self.train(dataframe_train, metadata)
|
|
|
|
self.dh.save_data(self.model)
|
|
|
|
else:
|
2022-05-05 12:37:37 +00:00
|
|
|
self.model = self.dh.load_data()
|
2022-05-22 15:51:49 +00:00
|
|
|
# strategy_provided_features = self.dh.find_features(dataframe_train)
|
|
|
|
# # TOFIX doesnt work with PCA
|
|
|
|
# if strategy_provided_features != self.dh.training_features_list:
|
|
|
|
# logger.info("User changed input features, retraining model.")
|
|
|
|
# self.model = self.train(dataframe_train, metadata)
|
|
|
|
# self.dh.save_data(self.model)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-09 15:01:49 +00:00
|
|
|
preds, do_preds = self.predict(dataframe_backtest, metadata)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-04 15:42:34 +00:00
|
|
|
self.dh.append_predictions(preds, do_preds, len(dataframe_backtest))
|
2022-05-06 13:10:11 +00:00
|
|
|
print('predictions', len(self.dh.full_predictions),
|
|
|
|
'do_predict', len(self.dh.full_do_predict))
|
2022-05-04 15:42:34 +00:00
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
self.dh.fill_predictions(len(dataframe))
|
|
|
|
|
2022-05-06 13:10:11 +00:00
|
|
|
return (self.dh.full_predictions, self.dh.full_do_predict,
|
|
|
|
self.dh.full_target_mean, self.dh.full_target_std)
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-09 13:25:00 +00:00
|
|
|
def start_live(self, dataframe: DataFrame, metadata: dict, strategy: IStrategy) -> None:
|
2022-05-17 15:13:38 +00:00
|
|
|
"""
|
|
|
|
The main broad execution for dry/live. This function will check if a retraining should be
|
|
|
|
performed, and if so, retrain and reset the model.
|
|
|
|
|
|
|
|
"""
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
(model_filename,
|
|
|
|
trained_timestamp,
|
|
|
|
coin_first) = self.data_drawer.get_pair_dict_info(metadata)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
if trained_timestamp != 0:
|
|
|
|
self.dh.set_paths(trained_timestamp)
|
|
|
|
# data_drawer thinks the file eixts, verify here
|
|
|
|
file_exists = self.model_exists(metadata['pair'],
|
|
|
|
trained_timestamp=trained_timestamp,
|
|
|
|
model_filename=model_filename)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-19 19:15:58 +00:00
|
|
|
if not self.training_on_separate_thread:
|
|
|
|
# this will also prevent other pairs from trying to train simultaneously.
|
|
|
|
(self.retrain,
|
2022-05-23 19:05:05 +00:00
|
|
|
new_trained_timerange) = self.dh.check_if_new_training_required(
|
|
|
|
trained_timestamp)
|
|
|
|
self.dh.set_paths(new_trained_timerange.stopts)
|
2022-05-19 19:15:58 +00:00
|
|
|
else:
|
|
|
|
logger.info("FreqAI training a new model on background thread.")
|
|
|
|
self.retrain = False
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-19 19:15:58 +00:00
|
|
|
if self.retrain or not file_exists:
|
2022-05-23 19:05:05 +00:00
|
|
|
if coin_first:
|
|
|
|
self.train_model_in_series(new_trained_timerange, metadata, strategy)
|
2022-05-22 15:51:49 +00:00
|
|
|
else:
|
|
|
|
self.training_on_separate_thread = True # acts like a lock
|
2022-05-23 19:05:05 +00:00
|
|
|
self.retrain_model_on_separate_thread(new_trained_timerange,
|
2022-05-22 15:51:49 +00:00
|
|
|
metadata, strategy)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
self.model = self.dh.load_data(coin=metadata['pair'])
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-19 19:15:58 +00:00
|
|
|
strategy_provided_features = self.dh.find_features(dataframe)
|
|
|
|
if strategy_provided_features != self.dh.training_features_list:
|
2022-05-23 19:05:05 +00:00
|
|
|
self.train_model_in_series(new_trained_timerange, metadata, strategy)
|
2022-05-09 13:25:00 +00:00
|
|
|
|
2022-05-09 15:01:49 +00:00
|
|
|
preds, do_preds = self.predict(dataframe, metadata)
|
2022-05-09 13:25:00 +00:00
|
|
|
self.dh.append_predictions(preds, do_preds, len(dataframe))
|
|
|
|
|
|
|
|
return
|
|
|
|
|
2022-05-03 08:14:17 +00:00
|
|
|
def make_labels(self, dataframe: DataFrame) -> DataFrame:
|
|
|
|
"""
|
|
|
|
User defines the labels here (target values).
|
|
|
|
:params:
|
|
|
|
:dataframe: the full dataframe for the present training period
|
|
|
|
"""
|
|
|
|
|
2022-05-06 14:20:52 +00:00
|
|
|
return
|
2022-05-03 08:14:17 +00:00
|
|
|
|
2022-05-22 15:51:49 +00:00
|
|
|
def data_cleaning_train(self) -> None:
|
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
Base data cleaning method for train
|
2022-05-22 15:51:49 +00:00
|
|
|
Any function inside this method should drop training data points from the filtered_dataframe
|
|
|
|
based on user decided logic. See FreqaiDataKitchen::remove_outliers() for an example
|
|
|
|
of how outlier data points are dropped from the dataframe used for training.
|
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('principal_component_analysis'):
|
|
|
|
self.dh.principal_component_analysis()
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-05-23 10:07:09 +00:00
|
|
|
# if self.feature_parameters["determine_statistical_distributions"]:
|
|
|
|
# self.dh.determine_statistical_distributions()
|
|
|
|
# if self.feature_parameters["remove_outliers"]:
|
|
|
|
# self.dh.remove_outliers(predict=False)
|
|
|
|
|
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('use_SVM_to_remove_outliers'):
|
|
|
|
self.dh.use_SVM_to_remove_outliers(predict=False)
|
|
|
|
|
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('DI_threshold'):
|
|
|
|
self.dh.data["avg_mean_dist"] = self.dh.compute_distances()
|
|
|
|
|
|
|
|
def data_cleaning_predict(self, filtered_dataframe: DataFrame) -> None:
|
2022-05-22 15:51:49 +00:00
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
Base data cleaning method for predict.
|
2022-05-22 15:51:49 +00:00
|
|
|
These functions each modify self.dh.do_predict, which is a dataframe with equal length
|
|
|
|
to the number of candles coming from and returning to the strategy. Inside do_predict,
|
|
|
|
1 allows prediction and < 0 signals to the strategy that the model is not confident in
|
|
|
|
the prediction.
|
|
|
|
See FreqaiDataKitchen::remove_outliers() for an example
|
|
|
|
of how the do_predict vector is modified. do_predict is ultimately passed back to strategy
|
|
|
|
for buy signals.
|
|
|
|
"""
|
2022-05-23 10:07:09 +00:00
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('principal_component_analysis'):
|
|
|
|
self.dh.pca_transform()
|
|
|
|
|
|
|
|
# if self.feature_parameters["determine_statistical_distributions"]:
|
|
|
|
# self.dh.determine_statistical_distributions()
|
|
|
|
# if self.feature_parameters["remove_outliers"]:
|
|
|
|
# self.dh.remove_outliers(predict=True) # creates dropped index
|
|
|
|
|
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('use_SVM_to_remove_outliers'):
|
|
|
|
self.dh.use_SVM_to_remove_outliers(predict=True)
|
|
|
|
|
|
|
|
if self.freqai_info.get('feature_parameters', {}).get('DI_threshold'):
|
|
|
|
self.dh.check_if_pred_in_training_spaces() # sets do_predict
|
2022-05-22 15:51:49 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
def model_exists(self, pair: str, trained_timestamp: int = None,
|
|
|
|
model_filename: str = '') -> bool:
|
2022-05-03 08:14:17 +00:00
|
|
|
"""
|
|
|
|
Given a pair and path, check if a model already exists
|
|
|
|
:param pair: pair e.g. BTC/USD
|
|
|
|
:param path: path to model
|
|
|
|
"""
|
2022-05-04 15:42:34 +00:00
|
|
|
coin, _ = pair.split("/")
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
if self.live and trained_timestamp is None:
|
|
|
|
self.dh.model_filename = model_filename
|
|
|
|
else:
|
|
|
|
self.dh.model_filename = "cb_" + coin.lower() + "_" + str(trained_timestamp)
|
|
|
|
|
|
|
|
path_to_modelfile = Path(self.dh.data_path / str(self.dh.model_filename + "_model.joblib"))
|
2022-05-04 15:42:34 +00:00
|
|
|
file_exists = path_to_modelfile.is_file()
|
2022-05-03 08:14:17 +00:00
|
|
|
if file_exists:
|
2022-05-23 19:05:05 +00:00
|
|
|
logger.info("Found model at %s", self.dh.data_path / self.dh.model_filename)
|
2022-05-04 15:42:34 +00:00
|
|
|
else:
|
2022-05-23 19:05:05 +00:00
|
|
|
logger.info("Could not find model at %s", self.dh.data_path / self.dh.model_filename)
|
2022-05-03 08:14:17 +00:00
|
|
|
return file_exists
|
2022-05-19 19:15:58 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
def set_full_path(self) -> None:
|
|
|
|
self.full_path = Path(self.config['user_data_dir'] /
|
|
|
|
"models" /
|
|
|
|
str(self.freqai_info.get('live_full_backtestrange') +
|
|
|
|
self.freqai_info.get('identifier')))
|
|
|
|
|
2022-05-19 19:15:58 +00:00
|
|
|
@threaded
|
2022-05-22 22:06:26 +00:00
|
|
|
def retrain_model_on_separate_thread(self, new_trained_timerange: TimeRange, metadata: dict,
|
2022-05-19 19:15:58 +00:00
|
|
|
strategy: IStrategy):
|
|
|
|
|
|
|
|
# with nostdout():
|
|
|
|
self.dh.download_new_data_for_retraining(new_trained_timerange, metadata)
|
|
|
|
corr_dataframes, base_dataframes = self.dh.load_pairs_histories(new_trained_timerange,
|
|
|
|
metadata)
|
|
|
|
|
|
|
|
unfiltered_dataframe = self.dh.use_strategy_to_populate_indicators(strategy,
|
|
|
|
corr_dataframes,
|
|
|
|
base_dataframes,
|
|
|
|
metadata)
|
|
|
|
|
|
|
|
self.model = self.train(unfiltered_dataframe, metadata)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
self.data_drawer.pair_dict[metadata['pair']][
|
|
|
|
'trained_timestamp'] = new_trained_timerange.stopts
|
|
|
|
|
|
|
|
self.dh.set_new_model_names(metadata, new_trained_timerange)
|
|
|
|
|
|
|
|
self.dh.save_data(self.model, coin=metadata['pair'])
|
2022-05-19 19:15:58 +00:00
|
|
|
|
|
|
|
self.training_on_separate_thread = False
|
|
|
|
self.retrain = False
|
|
|
|
|
2022-05-22 22:06:26 +00:00
|
|
|
def train_model_in_series(self, new_trained_timerange: TimeRange, metadata: dict,
|
2022-05-19 19:15:58 +00:00
|
|
|
strategy: IStrategy):
|
|
|
|
|
|
|
|
self.dh.download_new_data_for_retraining(new_trained_timerange, metadata)
|
|
|
|
corr_dataframes, base_dataframes = self.dh.load_pairs_histories(new_trained_timerange,
|
|
|
|
metadata)
|
|
|
|
|
|
|
|
unfiltered_dataframe = self.dh.use_strategy_to_populate_indicators(strategy,
|
|
|
|
corr_dataframes,
|
|
|
|
base_dataframes,
|
|
|
|
metadata)
|
|
|
|
|
|
|
|
self.model = self.train(unfiltered_dataframe, metadata)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
self.data_drawer.pair_dict[metadata['pair']][
|
|
|
|
'trained_timestamp'] = new_trained_timerange.stopts
|
|
|
|
|
|
|
|
self.dh.set_new_model_names(metadata, new_trained_timerange)
|
|
|
|
|
|
|
|
self.data_drawer.pair_dict[metadata['pair']]['first'] = False
|
|
|
|
self.dh.save_data(self.model, coin=metadata['pair'])
|
2022-05-22 15:51:49 +00:00
|
|
|
self.retrain = False
|
2022-05-23 10:07:09 +00:00
|
|
|
|
|
|
|
# Methods which are overridden by user made prediction models.
|
|
|
|
# See freqai/prediction_models/CatboostPredictionModlel.py for an example.
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def train(self, unfiltered_dataframe: DataFrame, metadata: dict) -> Any:
|
|
|
|
"""
|
|
|
|
Filter the training data and train a model to it. Train makes heavy use of the datahandler
|
|
|
|
for storing, saving, loading, and analyzing the data.
|
|
|
|
:params:
|
|
|
|
:unfiltered_dataframe: Full dataframe for the current training period
|
|
|
|
:metadata: pair metadata from strategy.
|
|
|
|
:returns:
|
|
|
|
:model: Trained model which can be used to inference (self.predict)
|
|
|
|
"""
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def fit(self) -> Any:
|
|
|
|
"""
|
|
|
|
Most regressors use the same function names and arguments e.g. user
|
|
|
|
can drop in LGBMRegressor in place of CatBoostRegressor and all data
|
|
|
|
management will be properly handled by Freqai.
|
|
|
|
:params:
|
|
|
|
:data_dictionary: the dictionary constructed by DataHandler to hold
|
|
|
|
all the training and test data/labels.
|
|
|
|
"""
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def predict(self, dataframe: DataFrame, metadata: dict) -> Tuple[npt.ArrayLike, npt.ArrayLike]:
|
|
|
|
"""
|
|
|
|
Filter the prediction features data and predict with it.
|
|
|
|
:param: unfiltered_dataframe: Full dataframe for the current backtest period.
|
|
|
|
:return:
|
|
|
|
:predictions: np.array of predictions
|
|
|
|
:do_predict: np.array of 1s and 0s to indicate places where freqai needed to remove
|
|
|
|
data (NaNs) or felt uncertain about data (PCA and DI index)
|
|
|
|
"""
|