2022-05-23 19:05:05 +00:00
|
|
|
|
2022-05-31 09:58:21 +00:00
|
|
|
import collections
|
2022-05-30 09:37:05 +00:00
|
|
|
import copy
|
2022-05-23 19:05:05 +00:00
|
|
|
import json
|
|
|
|
import logging
|
2022-05-31 09:58:21 +00:00
|
|
|
import re
|
|
|
|
import shutil
|
2022-05-23 19:05:05 +00:00
|
|
|
from pathlib import Path
|
|
|
|
from typing import Any, Dict, Tuple
|
|
|
|
|
|
|
|
# import pickle as pk
|
|
|
|
import numpy as np
|
2022-05-30 19:35:48 +00:00
|
|
|
from pandas import DataFrame
|
|
|
|
|
|
|
|
|
|
|
|
# from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class FreqaiDataDrawer:
|
|
|
|
"""
|
|
|
|
Class aimed at holding all pair models/info in memory for better inferencing/retrainig/saving
|
|
|
|
/loading to/from disk.
|
|
|
|
This object remains persistent throughout live/dry, unlike FreqaiDataKitchen, which is
|
|
|
|
reinstantiated for each coin.
|
|
|
|
"""
|
2022-05-30 19:35:48 +00:00
|
|
|
def __init__(self, full_path: Path, pair_whitelist, follow_mode: bool = False):
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
# dictionary holding all pair metadata necessary to load in from disk
|
|
|
|
self.pair_dict: Dict[str, Any] = {}
|
|
|
|
# dictionary holding all actively inferenced models in memory given a model filename
|
|
|
|
self.model_dictionary: Dict[str, Any] = {}
|
2022-05-30 09:37:05 +00:00
|
|
|
self.model_return_values: Dict[str, Any] = {}
|
2022-05-24 10:01:01 +00:00
|
|
|
self.pair_data_dict: Dict[str, Any] = {}
|
2022-05-23 19:05:05 +00:00
|
|
|
self.full_path = full_path
|
2022-05-30 19:35:48 +00:00
|
|
|
self.follow_mode = follow_mode
|
2022-05-23 19:05:05 +00:00
|
|
|
self.load_drawer_from_disk()
|
2022-05-24 13:28:38 +00:00
|
|
|
self.training_queue: Dict[str, int] = {}
|
2022-05-28 16:26:19 +00:00
|
|
|
# self.create_training_queue(pair_whitelist)
|
2022-05-23 19:05:05 +00:00
|
|
|
|
|
|
|
def load_drawer_from_disk(self):
|
|
|
|
exists = Path(self.full_path / str('pair_dictionary.json')).resolve().exists()
|
|
|
|
if exists:
|
|
|
|
with open(self.full_path / str('pair_dictionary.json'), "r") as fp:
|
|
|
|
self.pair_dict = json.load(fp)
|
2022-05-30 19:35:48 +00:00
|
|
|
elif not self.follow_mode:
|
2022-05-23 19:05:05 +00:00
|
|
|
logger.info("Could not find existing datadrawer, starting from scratch")
|
2022-05-30 19:35:48 +00:00
|
|
|
else:
|
|
|
|
logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '
|
|
|
|
'sending null values back to strategy')
|
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
return exists
|
|
|
|
|
|
|
|
def save_drawer_to_disk(self):
|
|
|
|
with open(self.full_path / str('pair_dictionary.json'), "w") as fp:
|
|
|
|
json.dump(self.pair_dict, fp, default=self.np_encoder)
|
|
|
|
|
|
|
|
def np_encoder(self, object):
|
|
|
|
if isinstance(object, np.generic):
|
|
|
|
return object.item()
|
|
|
|
|
2022-05-30 19:35:48 +00:00
|
|
|
def get_pair_dict_info(self, metadata: dict) -> Tuple[str, int, bool, bool]:
|
2022-05-23 19:05:05 +00:00
|
|
|
pair_in_dict = self.pair_dict.get(metadata['pair'])
|
2022-05-30 23:48:48 +00:00
|
|
|
data_path_set = self.pair_dict.get(metadata['pair'], {}).get('data_path', None)
|
2022-05-30 19:35:48 +00:00
|
|
|
return_null_array = False
|
2022-05-30 23:48:48 +00:00
|
|
|
|
2022-05-23 19:05:05 +00:00
|
|
|
if pair_in_dict:
|
|
|
|
model_filename = self.pair_dict[metadata['pair']]['model_filename']
|
|
|
|
trained_timestamp = self.pair_dict[metadata['pair']]['trained_timestamp']
|
|
|
|
coin_first = self.pair_dict[metadata['pair']]['first']
|
2022-05-30 19:35:48 +00:00
|
|
|
elif not self.follow_mode:
|
2022-05-23 19:05:05 +00:00
|
|
|
self.pair_dict[metadata['pair']] = {}
|
|
|
|
model_filename = self.pair_dict[metadata['pair']]['model_filename'] = ''
|
|
|
|
coin_first = self.pair_dict[metadata['pair']]['first'] = True
|
|
|
|
trained_timestamp = self.pair_dict[metadata['pair']]['trained_timestamp'] = 0
|
2022-05-30 23:48:48 +00:00
|
|
|
|
|
|
|
if not data_path_set and self.follow_mode:
|
|
|
|
logger.warning(f'Follower could not find current pair {metadata["pair"]} in '
|
|
|
|
f'pair_dictionary at path {self.full_path}, sending null values '
|
2022-05-30 19:35:48 +00:00
|
|
|
'back to strategy.')
|
|
|
|
return_null_array = True
|
2022-05-23 19:05:05 +00:00
|
|
|
|
2022-05-30 19:35:48 +00:00
|
|
|
return model_filename, trained_timestamp, coin_first, return_null_array
|
2022-05-24 10:58:53 +00:00
|
|
|
|
|
|
|
def set_pair_dict_info(self, metadata: dict) -> None:
|
|
|
|
pair_in_dict = self.pair_dict.get(metadata['pair'])
|
|
|
|
if pair_in_dict:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
self.pair_dict[metadata['pair']] = {}
|
|
|
|
self.pair_dict[metadata['pair']]['model_filename'] = ''
|
|
|
|
self.pair_dict[metadata['pair']]['first'] = True
|
|
|
|
self.pair_dict[metadata['pair']]['trained_timestamp'] = 0
|
2022-05-28 16:26:19 +00:00
|
|
|
self.pair_dict[metadata['pair']]['priority'] = len(self.pair_dict)
|
2022-05-24 10:58:53 +00:00
|
|
|
return
|
2022-05-24 13:28:38 +00:00
|
|
|
|
2022-05-28 16:26:19 +00:00
|
|
|
# def create_training_queue(self, pairs: list) -> None:
|
|
|
|
# for i, pair in enumerate(pairs):
|
|
|
|
# self.training_queue[pair] = i + 1
|
2022-05-24 13:28:38 +00:00
|
|
|
|
|
|
|
def pair_to_end_of_training_queue(self, pair: str) -> None:
|
|
|
|
# march all pairs up in the queue
|
2022-05-28 16:26:19 +00:00
|
|
|
for p in self.pair_dict:
|
|
|
|
self.pair_dict[p]['priority'] -= 1
|
2022-05-24 13:28:38 +00:00
|
|
|
# send pair to end of queue
|
2022-05-28 16:26:19 +00:00
|
|
|
self.pair_dict[pair]['priority'] = len(self.pair_dict)
|
2022-05-30 09:37:05 +00:00
|
|
|
|
2022-05-30 10:48:22 +00:00
|
|
|
def set_initial_return_values(self, pair: str, dh):
|
|
|
|
|
2022-05-30 09:37:05 +00:00
|
|
|
self.model_return_values[pair] = {}
|
|
|
|
self.model_return_values[pair]['predictions'] = dh.full_predictions
|
|
|
|
self.model_return_values[pair]['do_preds'] = dh.full_do_predict
|
|
|
|
self.model_return_values[pair]['target_mean'] = dh.full_target_mean
|
|
|
|
self.model_return_values[pair]['target_std'] = dh.full_target_std
|
|
|
|
|
2022-05-30 19:35:48 +00:00
|
|
|
# if not self.follow_mode:
|
|
|
|
# self.save_model_return_values_to_disk()
|
|
|
|
|
2022-05-30 10:48:22 +00:00
|
|
|
def append_model_predictions(self, pair: str, predictions, do_preds,
|
2022-05-30 11:55:46 +00:00
|
|
|
target_mean, target_std, dh, len_df) -> None:
|
|
|
|
|
|
|
|
# strat seems to feed us variable sized dataframes - and since we are trying to build our
|
|
|
|
# own return array in the same shape, we need to figure out how the size has changed
|
|
|
|
# and adapt our stored/returned info accordingly.
|
|
|
|
length_difference = len(self.model_return_values[pair]['predictions']) - len_df
|
|
|
|
i = 0
|
|
|
|
|
|
|
|
if length_difference == 0:
|
|
|
|
i = 1
|
|
|
|
elif length_difference > 0:
|
|
|
|
i = length_difference + 1
|
|
|
|
|
|
|
|
self.model_return_values[pair]['predictions'] = np.append(
|
|
|
|
self.model_return_values[pair]['predictions'][i:], predictions[-1])
|
|
|
|
self.model_return_values[pair]['do_preds'] = np.append(
|
|
|
|
self.model_return_values[pair]['do_preds'][i:], do_preds[-1])
|
|
|
|
self.model_return_values[pair]['target_mean'] = np.append(
|
|
|
|
self.model_return_values[pair]['target_mean'][i:], target_mean)
|
|
|
|
self.model_return_values[pair]['target_std'] = np.append(
|
|
|
|
self.model_return_values[pair]['target_std'][i:], target_std)
|
|
|
|
|
|
|
|
if length_difference < 0:
|
|
|
|
prepend = np.zeros(abs(length_difference) - 1)
|
|
|
|
self.model_return_values[pair]['predictions'] = np.insert(
|
|
|
|
self.model_return_values[pair]['predictions'], 0, prepend)
|
|
|
|
self.model_return_values[pair]['do_preds'] = np.insert(
|
|
|
|
self.model_return_values[pair]['do_preds'], 0, prepend)
|
|
|
|
self.model_return_values[pair]['target_mean'] = np.insert(
|
|
|
|
self.model_return_values[pair]['target_mean'], 0, prepend)
|
|
|
|
self.model_return_values[pair]['target_std'] = np.insert(
|
|
|
|
self.model_return_values[pair]['target_std'], 0, prepend)
|
|
|
|
|
|
|
|
dh.full_predictions = copy.deepcopy(self.model_return_values[pair]['predictions'])
|
|
|
|
dh.full_do_predict = copy.deepcopy(self.model_return_values[pair]['do_preds'])
|
|
|
|
dh.full_target_mean = copy.deepcopy(self.model_return_values[pair]['target_mean'])
|
|
|
|
dh.full_target_std = copy.deepcopy(self.model_return_values[pair]['target_std'])
|
2022-05-30 19:35:48 +00:00
|
|
|
|
|
|
|
# if not self.follow_mode:
|
|
|
|
# self.save_model_return_values_to_disk()
|
|
|
|
|
|
|
|
def return_null_values_to_strategy(self, dataframe: DataFrame, dh) -> None:
|
|
|
|
|
|
|
|
len_df = len(dataframe)
|
|
|
|
dh.full_predictions = np.zeros(len_df)
|
|
|
|
dh.full_do_predict = np.zeros(len_df)
|
|
|
|
dh.full_target_mean = np.zeros(len_df)
|
|
|
|
dh.full_target_std = np.zeros(len_df)
|
|
|
|
|
2022-05-31 09:58:21 +00:00
|
|
|
def purge_old_models(self) -> None:
|
|
|
|
|
|
|
|
model_folders = [x for x in self.full_path.iterdir() if x.is_dir()]
|
|
|
|
|
|
|
|
pattern = re.compile(r"sub-train-(\w+)(\d{10})")
|
|
|
|
|
|
|
|
delete_dict: Dict[str, Any] = {}
|
|
|
|
|
|
|
|
for dir in model_folders:
|
|
|
|
result = pattern.match(str(dir.name))
|
|
|
|
if result is None:
|
|
|
|
break
|
|
|
|
coin = result.group(1)
|
|
|
|
timestamp = result.group(2)
|
|
|
|
|
|
|
|
if coin not in delete_dict:
|
|
|
|
delete_dict[coin] = {}
|
|
|
|
delete_dict[coin]['num_folders'] = 1
|
|
|
|
delete_dict[coin]['timestamps'] = {int(timestamp): dir}
|
|
|
|
else:
|
|
|
|
delete_dict[coin]['num_folders'] += 1
|
|
|
|
delete_dict[coin]['timestamps'][int(timestamp)] = dir
|
|
|
|
|
|
|
|
for coin in delete_dict:
|
|
|
|
if delete_dict[coin]['num_folders'] > 2:
|
|
|
|
sorted_dict = collections.OrderedDict(
|
|
|
|
sorted(delete_dict[coin]['timestamps'].items()))
|
|
|
|
num_delete = len(sorted_dict) - 2
|
|
|
|
deleted = 0
|
|
|
|
for k, v in sorted_dict.items():
|
|
|
|
if deleted >= num_delete:
|
|
|
|
break
|
|
|
|
logger.info(f'Freqai purging old model file {v}')
|
|
|
|
shutil.rmtree(v)
|
|
|
|
deleted += 1
|
|
|
|
|
2022-05-30 19:35:48 +00:00
|
|
|
# to be used if we want to send predictions directly to the follower instead of forcing
|
|
|
|
# follower to load models and inference
|
|
|
|
# def save_model_return_values_to_disk(self) -> None:
|
|
|
|
# with open(self.full_path / str('model_return_values.json'), "w") as fp:
|
|
|
|
# json.dump(self.model_return_values, fp, default=self.np_encoder)
|
|
|
|
|
|
|
|
# def load_model_return_values_from_disk(self, dh: FreqaiDataKitchen) -> FreqaiDataKitchen:
|
|
|
|
# exists = Path(self.full_path / str('model_return_values.json')).resolve().exists()
|
|
|
|
# if exists:
|
|
|
|
# with open(self.full_path / str('model_return_values.json'), "r") as fp:
|
|
|
|
# self.model_return_values = json.load(fp)
|
|
|
|
# elif not self.follow_mode:
|
|
|
|
# logger.info("Could not find existing datadrawer, starting from scratch")
|
|
|
|
# else:
|
|
|
|
# logger.warning(f'Follower could not find pair_dictionary at {self.full_path} '
|
|
|
|
# 'sending null values back to strategy')
|
|
|
|
|
|
|
|
# return exists, dh
|