From 075c8c23c8bf50294e4a49b60466291dd63c2522 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 21:16:04 +1100 Subject: [PATCH 1/7] add state/action info to callbacks --- .../prediction_models/ReinforcementLearner.py | 44 +++++++++++++++++-- 1 file changed, 41 insertions(+), 3 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index 61b01e21b..ff39a66e0 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -71,7 +71,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): model.learn( total_timesteps=int(total_timesteps), - callback=self.eval_callback + callback=[self.eval_callback, self.tensorboard_callback] ) if Path(dk.data_path / "best_model.zip").is_file(): @@ -88,6 +88,33 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. """ + def reset(self): + + # Reset custom info + self.custom_info = {} + self.custom_info["Invalid"] = 0 + self.custom_info["Hold"] = 0 + self.custom_info["Unknown"] = 0 + self.custom_info["pnl_factor"] = 0 + self.custom_info["duration_factor"] = 0 + self.custom_info["reward_exit"] = 0 + self.custom_info["reward_hold"] = 0 + for action in Actions: + self.custom_info[f"{action.name}"] = 0 + return super().reset() + + def step(self, action: int): + observation, step_reward, done, info = super().step(action) + info = dict( + tick=self._current_tick, + action=action, + total_reward=self.total_reward, + total_profit=self._total_profit, + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() + ) + return observation, step_reward, done, info def calculate_reward(self, action: int) -> float: """ @@ -100,17 +127,24 @@ class ReinforcementLearner(BaseReinforcementLearningModel): """ # first, penalize if the action is not valid if not self._is_valid(action): + self.custom_info["Invalid"] += 1 return -2 pnl = self.get_unrealized_profit() factor = 100. # reward agent for entering trades - if (action in (Actions.Long_enter.value, Actions.Short_enter.value) + if (action ==Actions.Long_enter.value and self._position == Positions.Neutral): + self.custom_info[f"{Actions.Long_enter.name}"] += 1 + return 25 + if (action == Actions.Short_enter.value + and self._position == Positions.Neutral): + self.custom_info[f"{Actions.Short_enter.name}"] += 1 return 25 # discourage agent from not entering trades if action == Actions.Neutral.value and self._position == Positions.Neutral: + self.custom_info[f"{Actions.Neutral.name}"] += 1 return -1 max_trade_duration = self.rl_config.get('max_trade_duration_candles', 300) @@ -124,18 +158,22 @@ class ReinforcementLearner(BaseReinforcementLearningModel): # discourage sitting in position if (self._position in (Positions.Short, Positions.Long) and action == Actions.Neutral.value): + self.custom_info["Hold"] += 1 return -1 * trade_duration / max_trade_duration # close long if action == Actions.Long_exit.value and self._position == Positions.Long: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + self.custom_info[f"{Actions.Long_exit.name}"] += 1 return float(pnl * factor) # close short if action == Actions.Short_exit.value and self._position == Positions.Short: if pnl > self.profit_aim * self.rr: factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) + self.custom_info[f"{Actions.Short_exit.name}"] += 1 return float(pnl * factor) - + + self.custom_info["Unknown"] += 1 return 0. From 469aa0d43fcc7e2176690ab834a3f2be98709e32 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 21:16:46 +1100 Subject: [PATCH 2/7] add state/action info to callbacks --- .../RL/BaseReinforcementLearningModel.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 81f8edfc4..15acde6fb 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -13,9 +13,11 @@ import torch as th import torch.multiprocessing from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback +from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv +from stable_baselines3.common.logger import HParam from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -155,6 +157,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + self.tensorboard_callback = TensorboardCallback() @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): @@ -398,3 +402,48 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, return env set_random_seed(seed) return _init + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard. + """ + def __init__(self, verbose=1): + super(TensorboardCallback, self).__init__(verbose) + + def _on_training_start(self) -> None: + hparam_dict = { + "algorithm": self.model.__class__.__name__, + "learning_rate": self.model.learning_rate, + "gamma": self.model.gamma, + "gae_lambda": self.model.gae_lambda, + "batch_size": self.model.batch_size, + "n_steps": self.model.n_steps, + } + metric_dict = { + "eval/mean_reward": 0, + "rollout/ep_rew_mean": 0, + "rollout/ep_len_mean":0 , + "train/value_loss": 0, + "train/explained_variance": 0, + } + self.logger.record( + "hparams", + HParam(hparam_dict, metric_dict), + exclude=("stdout", "log", "json", "csv"), + ) + + def _on_step(self) -> bool: + custom_info = self.training_env.get_attr("custom_info")[0] + self.logger.record(f"_state/position", self.locals["infos"][0]["position"]) + self.logger.record(f"_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record(f"_state/current_profit_pct", self.locals["infos"][0]["current_profit_pct"]) + self.logger.record(f"_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record(f"_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean(f"_reward/mean_trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record(f"_actions/action", self.locals["infos"][0]["action"]) + self.logger.record(f"_actions/_Invalid", custom_info["Invalid"]) + self.logger.record(f"_actions/_Unknown", custom_info["Unknown"]) + self.logger.record(f"_actions/Hold", custom_info["Hold"]) + for action in Actions: + self.logger.record(f"_actions/{action.name}", custom_info[action.name]) + return True \ No newline at end of file From d6f45a12ae0778c6de86bd8020a69299ee474d31 Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:30:04 +1100 Subject: [PATCH 3/7] add multiproc fix flake8 --- freqtrade/freqai/prediction_models/ReinforcementLearner.py | 6 +++--- .../prediction_models/ReinforcementLearner_multiproc.py | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index ff39a66e0..fa1087497 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -102,7 +102,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): for action in Actions: self.custom_info[f"{action.name}"] = 0 return super().reset() - + def step(self, action: int): observation, step_reward, done, info = super().step(action) info = dict( @@ -134,7 +134,7 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor = 100. # reward agent for entering trades - if (action ==Actions.Long_enter.value + if (action == Actions.Long_enter.value and self._position == Positions.Neutral): self.custom_info[f"{Actions.Long_enter.name}"] += 1 return 25 @@ -174,6 +174,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): factor *= self.rl_config['model_reward_parameters'].get('win_reward_factor', 2) self.custom_info[f"{Actions.Short_exit.name}"] += 1 return float(pnl * factor) - + self.custom_info["Unknown"] += 1 return 0. diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index 56636c1f6..dd5430aa7 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -8,7 +8,7 @@ from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner -from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env +from freqtrade.freqai.RL.BaseReinforcementLearningModel import TensorboardCallback, make_env logger = logging.getLogger(__name__) @@ -49,3 +49,5 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) + + self.tensorboard_callback = TensorboardCallback() From b2edc58089a98994861409a106a8804b9f92270c Mon Sep 17 00:00:00 2001 From: smarmau <42020297+smarmau@users.noreply.github.com> Date: Sat, 3 Dec 2022 22:31:02 +1100 Subject: [PATCH 4/7] fix flake8 --- .../RL/BaseReinforcementLearningModel.py | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index 15acde6fb..b9b6cdd96 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -12,12 +12,11 @@ import pandas as pd import torch as th import torch.multiprocessing from pandas import DataFrame -from stable_baselines3.common.callbacks import EvalCallback -from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.callbacks import BaseCallback, EvalCallback +from stable_baselines3.common.logger import HParam from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv -from stable_baselines3.common.logger import HParam from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen @@ -157,7 +156,7 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.eval_callback = EvalCallback(self.eval_env, deterministic=True, render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - + self.tensorboard_callback = TensorboardCallback() @abstractmethod @@ -403,6 +402,7 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, set_random_seed(seed) return _init + class TensorboardCallback(BaseCallback): """ Custom callback for plotting additional values in tensorboard. @@ -422,7 +422,7 @@ class TensorboardCallback(BaseCallback): metric_dict = { "eval/mean_reward": 0, "rollout/ep_rew_mean": 0, - "rollout/ep_len_mean":0 , + "rollout/ep_len_mean": 0, "train/value_loss": 0, "train/explained_variance": 0, } @@ -431,19 +431,21 @@ class TensorboardCallback(BaseCallback): HParam(hparam_dict, metric_dict), exclude=("stdout", "log", "json", "csv"), ) - + def _on_step(self) -> bool: custom_info = self.training_env.get_attr("custom_info")[0] - self.logger.record(f"_state/position", self.locals["infos"][0]["position"]) - self.logger.record(f"_state/trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record(f"_state/current_profit_pct", self.locals["infos"][0]["current_profit_pct"]) - self.logger.record(f"_reward/total_profit", self.locals["infos"][0]["total_profit"]) - self.logger.record(f"_reward/total_reward", self.locals["infos"][0]["total_reward"]) - self.logger.record_mean(f"_reward/mean_trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record(f"_actions/action", self.locals["infos"][0]["action"]) - self.logger.record(f"_actions/_Invalid", custom_info["Invalid"]) - self.logger.record(f"_actions/_Unknown", custom_info["Unknown"]) - self.logger.record(f"_actions/Hold", custom_info["Hold"]) + self.logger.record("_state/position", self.locals["infos"][0]["position"]) + self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record("_state/current_profit_pct", self.locals["infos"] + [0]["current_profit_pct"]) + self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] + [0]["trade_duration"]) + self.logger.record("_actions/action", self.locals["infos"][0]["action"]) + self.logger.record("_actions/_Invalid", custom_info["Invalid"]) + self.logger.record("_actions/_Unknown", custom_info["Unknown"]) + self.logger.record("_actions/Hold", custom_info["Hold"]) for action in Actions: self.logger.record(f"_actions/{action.name}", custom_info[action.name]) - return True \ No newline at end of file + return True From 24766928baddfed919be1138a64d51cdbb0d3764 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 13:54:30 +0100 Subject: [PATCH 5/7] reorganize/generalize tensorboard callback --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 8 ++- freqtrade/freqai/RL/Base5ActionRLEnv.py | 8 ++- freqtrade/freqai/RL/BaseEnvironment.py | 37 ++++++++++- .../RL/BaseReinforcementLearningModel.py | 63 +++---------------- freqtrade/freqai/RL/TensorboardCallback.py | 61 ++++++++++++++++++ .../prediction_models/ReinforcementLearner.py | 27 -------- .../ReinforcementLearner_multiproc.py | 9 +-- 7 files changed, 125 insertions(+), 88 deletions(-) create mode 100644 freqtrade/freqai/RL/TensorboardCallback.py diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index df4e79bea..7818ac51e 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -20,6 +20,9 @@ class Base4ActionRLEnv(BaseEnvironment): """ Base class for a 4 action environment """ + def __init__(self, *args): + super().__init__(*args) + self.actions = Actions def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) @@ -92,9 +95,12 @@ class Base4ActionRLEnv(BaseEnvironment): info = dict( tick=self._current_tick, + action=action, total_reward=self.total_reward, total_profit=self._total_profit, - position=self._position.value + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() ) observation = self._get_observation() diff --git a/freqtrade/freqai/RL/Base5ActionRLEnv.py b/freqtrade/freqai/RL/Base5ActionRLEnv.py index 68b2e011b..1c09f9386 100644 --- a/freqtrade/freqai/RL/Base5ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base5ActionRLEnv.py @@ -21,6 +21,9 @@ class Base5ActionRLEnv(BaseEnvironment): """ Base class for a 5 action environment """ + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.actions = Actions def set_action_space(self): self.action_space = spaces.Discrete(len(Actions)) @@ -98,9 +101,12 @@ class Base5ActionRLEnv(BaseEnvironment): info = dict( tick=self._current_tick, + action=action, total_reward=self.total_reward, total_profit=self._total_profit, - position=self._position.value + position=self._position.value, + trade_duration=self.get_trade_duration(), + current_profit_pct=self.get_unrealized_profit() ) observation = self._get_observation() diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index e7bd26a92..3fca6a25d 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -2,7 +2,7 @@ import logging import random from abc import abstractmethod from enum import Enum -from typing import Optional +from typing import Optional, Type import gym import numpy as np @@ -17,6 +17,17 @@ from freqtrade.data.dataprovider import DataProvider logger = logging.getLogger(__name__) +class BaseActions(Enum): + """ + Default action space, mostly used for type handling. + """ + Neutral = 0 + Long_enter = 1 + Long_exit = 2 + Short_enter = 3 + Short_exit = 4 + + class Positions(Enum): Short = 0 Long = 1 @@ -64,6 +75,9 @@ class BaseEnvironment(gym.Env): else: self.fee = 0.0015 + # set here to default 5Ac, but all children envs can overwrite this + self.actions: Type[Enum] = BaseActions + def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): """ @@ -106,6 +120,7 @@ class BaseEnvironment(gym.Env): self._total_unrealized_profit: float = 1 self.history: dict = {} self.trade_history: list = [] + self.custom_info: dict = {} @abstractmethod def set_action_space(self): @@ -118,6 +133,19 @@ class BaseEnvironment(gym.Env): return [seed] def reset(self): + """ + Reset is called at the beginning of every episode + """ + # custom_info is used for episodic reports and tensorboard logging + self.custom_info["Invalid"] = 0 + self.custom_info["Hold"] = 0 + self.custom_info["Unknown"] = 0 + self.custom_info["pnl_factor"] = 0 + self.custom_info["duration_factor"] = 0 + self.custom_info["reward_exit"] = 0 + self.custom_info["reward_hold"] = 0 + for action in self.actions: + self.custom_info[f"{action.name}"] = 0 self._done = False @@ -271,6 +299,13 @@ class BaseEnvironment(gym.Env): def current_price(self) -> float: return self.prices.iloc[self._current_tick].open + def get_actions(self) -> Type[Enum]: + """ + Used by SubprocVecEnv to get actions from + initialized env for tensorboard callback + """ + return self.actions + # Keeping around incase we want to start building more complex environment # templates in the future. # def most_recent_return(self): diff --git a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py index b9b6cdd96..5e9b81108 100644 --- a/freqtrade/freqai/RL/BaseReinforcementLearningModel.py +++ b/freqtrade/freqai/RL/BaseReinforcementLearningModel.py @@ -12,8 +12,7 @@ import pandas as pd import torch as th import torch.multiprocessing from pandas import DataFrame -from stable_baselines3.common.callbacks import BaseCallback, EvalCallback -from stable_baselines3.common.logger import HParam +from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.monitor import Monitor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.vec_env import SubprocVecEnv @@ -22,7 +21,8 @@ from freqtrade.exceptions import OperationalException from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.freqai_interface import IFreqaiModel from freqtrade.freqai.RL.Base5ActionRLEnv import Actions, Base5ActionRLEnv -from freqtrade.freqai.RL.BaseEnvironment import Positions +from freqtrade.freqai.RL.BaseEnvironment import BaseActions, Positions +from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback from freqtrade.persistence import Trade @@ -45,8 +45,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): 'cpu_count', 1), max(int(self.max_system_threads / 2), 1)) th.set_num_threads(self.max_threads) self.reward_params = self.freqai_info['rl_config']['model_reward_parameters'] - self.train_env: Union[SubprocVecEnv, gym.Env] = None - self.eval_env: Union[SubprocVecEnv, gym.Env] = None + self.train_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() + self.eval_env: Union[SubprocVecEnv, Type[gym.Env]] = gym.Env() self.eval_callback: Optional[EvalCallback] = None self.model_type = self.freqai_info['rl_config']['model_type'] self.rl_config = self.freqai_info['rl_config'] @@ -66,6 +66,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): self.unset_outlier_removal() self.net_arch = self.rl_config.get('net_arch', [128, 128]) self.dd.model_type = import_str + self.tensorboard_callback: TensorboardCallback = \ + TensorboardCallback(verbose=1, actions=BaseActions) def unset_outlier_removal(self): """ @@ -157,7 +159,8 @@ class BaseReinforcementLearningModel(IFreqaiModel): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - self.tensorboard_callback = TensorboardCallback() + actions = self.train_env.get_actions() + self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) @abstractmethod def fit(self, data_dictionary: Dict[str, Any], dk: FreqaiDataKitchen, **kwargs): @@ -401,51 +404,3 @@ def make_env(MyRLEnv: Type[gym.Env], env_id: str, rank: int, return env set_random_seed(seed) return _init - - -class TensorboardCallback(BaseCallback): - """ - Custom callback for plotting additional values in tensorboard. - """ - def __init__(self, verbose=1): - super(TensorboardCallback, self).__init__(verbose) - - def _on_training_start(self) -> None: - hparam_dict = { - "algorithm": self.model.__class__.__name__, - "learning_rate": self.model.learning_rate, - "gamma": self.model.gamma, - "gae_lambda": self.model.gae_lambda, - "batch_size": self.model.batch_size, - "n_steps": self.model.n_steps, - } - metric_dict = { - "eval/mean_reward": 0, - "rollout/ep_rew_mean": 0, - "rollout/ep_len_mean": 0, - "train/value_loss": 0, - "train/explained_variance": 0, - } - self.logger.record( - "hparams", - HParam(hparam_dict, metric_dict), - exclude=("stdout", "log", "json", "csv"), - ) - - def _on_step(self) -> bool: - custom_info = self.training_env.get_attr("custom_info")[0] - self.logger.record("_state/position", self.locals["infos"][0]["position"]) - self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) - self.logger.record("_state/current_profit_pct", self.locals["infos"] - [0]["current_profit_pct"]) - self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) - self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) - self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] - [0]["trade_duration"]) - self.logger.record("_actions/action", self.locals["infos"][0]["action"]) - self.logger.record("_actions/_Invalid", custom_info["Invalid"]) - self.logger.record("_actions/_Unknown", custom_info["Unknown"]) - self.logger.record("_actions/Hold", custom_info["Hold"]) - for action in Actions: - self.logger.record(f"_actions/{action.name}", custom_info[action.name]) - return True diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py new file mode 100644 index 000000000..4aea9bdf5 --- /dev/null +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -0,0 +1,61 @@ +from enum import Enum +from typing import Any, Dict, Type, Union + +from stable_baselines3.common.callbacks import BaseCallback +from stable_baselines3.common.logger import HParam + +from freqtrade.freqai.RL.BaseEnvironment import BaseActions + + +class TensorboardCallback(BaseCallback): + """ + Custom callback for plotting additional values in tensorboard and + episodic summary reports. + """ + def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): + super(TensorboardCallback, self).__init__(verbose) + self.model: Any = None + # An alias for self.model.get_env(), the environment used for training + self.logger = None # type: Any + # self.training_env = None # type: Union[gym.Env, VecEnv] + self.actions: Type[Enum] = actions + + def _on_training_start(self) -> None: + hparam_dict = { + "algorithm": self.model.__class__.__name__, + "learning_rate": self.model.learning_rate, + # "gamma": self.model.gamma, + # "gae_lambda": self.model.gae_lambda, + # "batch_size": self.model.batch_size, + # "n_steps": self.model.n_steps, + } + metric_dict: Dict[str, Union[float, int]] = { + "eval/mean_reward": 0, + "rollout/ep_rew_mean": 0, + "rollout/ep_len_mean": 0, + "train/value_loss": 0, + "train/explained_variance": 0, + } + self.logger.record( + "hparams", + HParam(hparam_dict, metric_dict), + exclude=("stdout", "log", "json", "csv"), + ) + + def _on_step(self) -> bool: + custom_info = self.training_env.get_attr("custom_info")[0] # type: ignore + self.logger.record("_state/position", self.locals["infos"][0]["position"]) + self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) + self.logger.record("_state/current_profit_pct", self.locals["infos"] + [0]["current_profit_pct"]) + self.logger.record("_reward/total_profit", self.locals["infos"][0]["total_profit"]) + self.logger.record("_reward/total_reward", self.locals["infos"][0]["total_reward"]) + self.logger.record_mean("_reward/mean_trade_duration", self.locals["infos"] + [0]["trade_duration"]) + self.logger.record("_actions/action", self.locals["infos"][0]["action"]) + self.logger.record("_actions/_Invalid", custom_info["Invalid"]) + self.logger.record("_actions/_Unknown", custom_info["Unknown"]) + self.logger.record("_actions/Hold", custom_info["Hold"]) + for action in self.actions: + self.logger.record(f"_actions/{action.name}", custom_info[action.name]) + return True diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner.py b/freqtrade/freqai/prediction_models/ReinforcementLearner.py index fa1087497..47dbaf99e 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner.py @@ -88,33 +88,6 @@ class ReinforcementLearner(BaseReinforcementLearningModel): User can override any function in BaseRLEnv and gym.Env. Here the user sets a custom reward based on profit and trade duration. """ - def reset(self): - - # Reset custom info - self.custom_info = {} - self.custom_info["Invalid"] = 0 - self.custom_info["Hold"] = 0 - self.custom_info["Unknown"] = 0 - self.custom_info["pnl_factor"] = 0 - self.custom_info["duration_factor"] = 0 - self.custom_info["reward_exit"] = 0 - self.custom_info["reward_hold"] = 0 - for action in Actions: - self.custom_info[f"{action.name}"] = 0 - return super().reset() - - def step(self, action: int): - observation, step_reward, done, info = super().step(action) - info = dict( - tick=self._current_tick, - action=action, - total_reward=self.total_reward, - total_profit=self._total_profit, - position=self._position.value, - trade_duration=self.get_trade_duration(), - current_profit_pct=self.get_unrealized_profit() - ) - return observation, step_reward, done, info def calculate_reward(self, action: int) -> float: """ diff --git a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py index dd5430aa7..32a2a2076 100644 --- a/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py +++ b/freqtrade/freqai/prediction_models/ReinforcementLearner_multiproc.py @@ -1,14 +1,14 @@ import logging -from typing import Any, Dict # , Tuple +from typing import Any, Dict -# import numpy.typing as npt from pandas import DataFrame from stable_baselines3.common.callbacks import EvalCallback from stable_baselines3.common.vec_env import SubprocVecEnv from freqtrade.freqai.data_kitchen import FreqaiDataKitchen from freqtrade.freqai.prediction_models.ReinforcementLearner import ReinforcementLearner -from freqtrade.freqai.RL.BaseReinforcementLearningModel import TensorboardCallback, make_env +from freqtrade.freqai.RL.BaseReinforcementLearningModel import make_env +from freqtrade.freqai.RL.TensorboardCallback import TensorboardCallback logger = logging.getLogger(__name__) @@ -50,4 +50,5 @@ class ReinforcementLearner_multiproc(ReinforcementLearner): render=False, eval_freq=len(train_df), best_model_save_path=str(dk.data_path)) - self.tensorboard_callback = TensorboardCallback() + actions = self.train_env.env_method("get_actions")[0] + self.tensorboard_callback = TensorboardCallback(verbose=1, actions=actions) From d8565261e1880f0458356fa2dc477ea487a56c0e Mon Sep 17 00:00:00 2001 From: robcaulk Date: Sun, 4 Dec 2022 14:10:33 +0100 Subject: [PATCH 6/7] ignore initializer type --- freqtrade/freqai/RL/BaseEnvironment.py | 1 + freqtrade/freqai/RL/TensorboardCallback.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index 3fca6a25d..e43951142 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -77,6 +77,7 @@ class BaseEnvironment(gym.Env): # set here to default 5Ac, but all children envs can overwrite this self.actions: Type[Enum] = BaseActions + self.custom_info: dict = {} def reset_env(self, df: DataFrame, prices: DataFrame, window_size: int, reward_kwargs: dict, starting_point=True): diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index 4aea9bdf5..b5b8ba23d 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Type, Union from stable_baselines3.common.callbacks import BaseCallback from stable_baselines3.common.logger import HParam -from freqtrade.freqai.RL.BaseEnvironment import BaseActions +from freqtrade.freqai.RL.BaseEnvironment import BaseActions, BaseEnvironment class TensorboardCallback(BaseCallback): @@ -15,9 +15,8 @@ class TensorboardCallback(BaseCallback): def __init__(self, verbose=1, actions: Type[Enum] = BaseActions): super(TensorboardCallback, self).__init__(verbose) self.model: Any = None - # An alias for self.model.get_env(), the environment used for training self.logger = None # type: Any - # self.training_env = None # type: Union[gym.Env, VecEnv] + self.training_env: BaseEnvironment = None # type: ignore self.actions: Type[Enum] = actions def _on_training_start(self) -> None: @@ -43,7 +42,7 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: - custom_info = self.training_env.get_attr("custom_info")[0] # type: ignore + custom_info = self.training_env.custom_info self.logger.record("_state/position", self.locals["infos"][0]["position"]) self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) self.logger.record("_state/current_profit_pct", self.locals["infos"] From 62c69bf2b5285196ce80760160712c04b339bad1 Mon Sep 17 00:00:00 2001 From: robcaulk Date: Mon, 5 Dec 2022 20:22:54 +0100 Subject: [PATCH 7/7] fix custom_info --- freqtrade/freqai/RL/Base4ActionRLEnv.py | 4 ++-- freqtrade/freqai/RL/BaseEnvironment.py | 3 +-- freqtrade/freqai/RL/TensorboardCallback.py | 2 +- tests/freqai/test_freqai_interface.py | 1 - 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/freqtrade/freqai/RL/Base4ActionRLEnv.py b/freqtrade/freqai/RL/Base4ActionRLEnv.py index 7818ac51e..79616d778 100644 --- a/freqtrade/freqai/RL/Base4ActionRLEnv.py +++ b/freqtrade/freqai/RL/Base4ActionRLEnv.py @@ -20,8 +20,8 @@ class Base4ActionRLEnv(BaseEnvironment): """ Base class for a 4 action environment """ - def __init__(self, *args): - super().__init__(*args) + def __init__(self, **kwargs): + super().__init__(**kwargs) self.actions = Actions def set_action_space(self): diff --git a/freqtrade/freqai/RL/BaseEnvironment.py b/freqtrade/freqai/RL/BaseEnvironment.py index e43951142..a31ded0c6 100644 --- a/freqtrade/freqai/RL/BaseEnvironment.py +++ b/freqtrade/freqai/RL/BaseEnvironment.py @@ -75,7 +75,7 @@ class BaseEnvironment(gym.Env): else: self.fee = 0.0015 - # set here to default 5Ac, but all children envs can overwrite this + # set here to default 5Ac, but all children envs can override this self.actions: Type[Enum] = BaseActions self.custom_info: dict = {} @@ -121,7 +121,6 @@ class BaseEnvironment(gym.Env): self._total_unrealized_profit: float = 1 self.history: dict = {} self.trade_history: list = [] - self.custom_info: dict = {} @abstractmethod def set_action_space(self): diff --git a/freqtrade/freqai/RL/TensorboardCallback.py b/freqtrade/freqai/RL/TensorboardCallback.py index b5b8ba23d..f590bdf84 100644 --- a/freqtrade/freqai/RL/TensorboardCallback.py +++ b/freqtrade/freqai/RL/TensorboardCallback.py @@ -42,7 +42,7 @@ class TensorboardCallback(BaseCallback): ) def _on_step(self) -> bool: - custom_info = self.training_env.custom_info + custom_info = self.training_env.get_attr("custom_info")[0] self.logger.record("_state/position", self.locals["infos"][0]["position"]) self.logger.record("_state/trade_duration", self.locals["infos"][0]["trade_duration"]) self.logger.record("_state/current_profit_pct", self.locals["infos"] diff --git a/tests/freqai/test_freqai_interface.py b/tests/freqai/test_freqai_interface.py index c53137093..f19acb018 100644 --- a/tests/freqai/test_freqai_interface.py +++ b/tests/freqai/test_freqai_interface.py @@ -237,7 +237,6 @@ def test_start_backtesting(mocker, freqai_conf, model, num_files, strat, caplog) df = freqai.cache_corr_pairlist_dfs(df, freqai.dk) for i in range(5): df[f'%-constant_{i}'] = i - # df.loc[:, f'%-constant_{i}'] = i metadata = {"pair": "LTC/BTC"} freqai.start_backtesting(df, metadata, freqai.dk)