use data loader, add evaluation on epoch

This commit is contained in:
Yinon Polak 2023-03-06 16:16:45 +02:00
parent 751b205618
commit b1ac2bf515
5 changed files with 167 additions and 91 deletions

View File

@ -1,6 +1,6 @@
import logging
from time import time
from typing import Any, Dict
from typing import Any
import torch
from pandas import DataFrame
@ -11,7 +11,7 @@ from freqtrade.freqai.freqai_interface import IFreqaiModel
logger = logging.getLogger(__name__)
class BasePytorchModel(IFreqaiModel):
class BasePyTorchModel(IFreqaiModel):
"""
Base class for TensorFlow type models.
User *must* inherit from this class and set fit() and predict().
@ -29,7 +29,6 @@ class BasePytorchModel(IFreqaiModel):
Filter the training data and train a model to it. Train makes heavy use of the datakitchen
for storing, saving, loading, and analyzing the data.
:param unfiltered_df: Full dataframe for the current training period
:param metadata: pair metadata from strategy.
:return:
:model: Trained model which can be used to inference (self.predict)
"""

View File

@ -0,0 +1,136 @@
import logging
from pathlib import Path
from typing import Dict
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import pandas as pd
logger = logging.getLogger(__name__)
class PyTorchModelTrainer:
def __init__(
self,
model: nn.Module,
optimizer: nn.Module,
criterion: nn.Module,
device: str,
batch_size: int,
max_iters: int,
eval_iters: int,
init_model: Dict
):
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.device = device
self.max_iters = max_iters
self.batch_size = batch_size
self.eval_iters = eval_iters
if init_model:
self.load_from_checkpoint(init_model)
def fit(self, data_dictionary: Dict[str, pd.DataFrame]):
data_loaders_dictionary = self.create_data_loaders_dictionary(data_dictionary)
epochs = self.calc_n_epochs(
n_obs=len(data_dictionary['train_features']),
batch_size=self.batch_size,
n_iters=self.max_iters
)
for epoch in range(epochs):
# evaluation
losses = self.estimate_loss(data_loaders_dictionary, data_dictionary)
logger.info(
f"epoch ({epoch}/{epochs}):"
f" train loss {losses['train']:.4f} ; test loss {losses['test']:.4f}"
)
# training
for batch_data in data_loaders_dictionary['train']:
xb, yb = batch_data
xb = xb.to(self.device) # type: ignore
yb = yb.to(self.device)
yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb)
self.optimizer.zero_grad(set_to_none=True)
loss.backward()
self.optimizer.step()
@torch.no_grad()
def estimate_loss(
self,
data_loader_dictionary: Dict[str, DataLoader],
data_dictionary: Dict[str, pd.DataFrame]
) -> Dict[str, float]:
self.model.eval()
epochs = self.calc_n_epochs(
n_obs=len(data_dictionary[f'test_features']),
batch_size=self.batch_size,
n_iters=self.eval_iters
)
loss_dictionary = {}
for split in ['train', 'test']:
losses = torch.zeros(epochs)
for i, batch in enumerate(data_loader_dictionary[split]):
xb, yb = batch
xb = xb.to(self.device)
yb = yb.to(self.device)
yb_pred = self.model(xb)
loss = self.criterion(yb_pred, yb)
losses[i] = loss.item()
loss_dictionary[split] = losses.mean()
self.model.train()
return loss_dictionary
def create_data_loaders_dictionary(
self,
data_dictionary: Dict[str, pd.DataFrame]
) -> Dict[str, DataLoader]:
data_loader_dictionary = {}
for split in ['train', 'test']:
labels_shape = data_dictionary[f'{split}_labels'].shape
labels_view = labels_shape[0] if labels_shape[1] == 1 else labels_shape
dataset = TensorDataset(
torch.from_numpy(data_dictionary[f'{split}_features'].values).float(),
torch.from_numpy(data_dictionary[f'{split}_labels'].astype(float).values)
.long()
.view(labels_view)
)
data_loader = DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=True,
drop_last=True,
num_workers=0,
)
data_loader_dictionary[split] = data_loader
return data_loader_dictionary
@staticmethod
def calc_n_epochs(n_obs: int, batch_size: int, n_iters: int) -> int:
n_batches = n_obs // batch_size
epochs = n_iters // n_batches
return epochs
def save(self, path: Path):
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, path)
def load_from_file(self, path: Path):
checkpoint = torch.load(path)
return self.load_from_checkpoint(checkpoint)
def load_from_checkpoint(self, checkpoint: Dict):
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return self

View File

@ -1,51 +0,0 @@
import logging
from pathlib import Path
from typing import Dict
import torch
import torch.nn as nn
logger = logging.getLogger(__name__)
class PytorchModelTrainer:
def __init__(self, model: nn.Module, optimizer, init_model: Dict):
self.model = model
self.optimizer = optimizer
if init_model:
self.load_from_checkpoint(init_model)
def fit(self, tensor_dictionary, max_iters, batch_size):
for iter in range(max_iters):
# todo add validation evaluation here
xb, yb = self.get_batch(tensor_dictionary, 'train', batch_size)
logits, loss = self.model(xb, yb)
self.optimizer.zero_grad(set_to_none=True)
loss.backward()
self.optimizer.step()
def save(self, path):
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, path)
def load_from_file(self, path: Path):
checkpoint = torch.load(path)
return self.load_from_checkpoint(checkpoint)
def load_from_checkpoint(self, checkpoint: Dict):
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return self
@staticmethod
def get_batch(tensor_dictionary: Dict, split: str, batch_size: int):
ix = torch.randint(len(tensor_dictionary[f'{split}_labels']), (batch_size,))
x = tensor_dictionary[f'{split}_features'][ix]
y = tensor_dictionary[f'{split}_labels'][ix]
return x, y

View File

@ -1,6 +1,5 @@
import logging
from typing import Dict
from typing import Any, Dict, Tuple
import numpy.typing as npt
@ -8,28 +7,29 @@ import numpy as np
import pandas as pd
import torch
from pandas import DataFrame
from torch.nn import functional as F
from freqtrade.freqai.base_models.BasePytorchModel import BasePytorchModel
from freqtrade.freqai.base_models.PytorchModelTrainer import PytorchModelTrainer
from freqtrade.freqai.data_kitchen import FreqaiDataKitchen
from freqtrade.freqai.prediction_models.PytorchMLPModel import MLP
from freqtrade.freqai.base_models.BasePyTorchModel import BasePyTorchModel
from freqtrade.freqai.base_models.PyTorchModelTrainer import PyTorchModelTrainer
from freqtrade.freqai.prediction_models.PyTorchMLPModel import PyTorchMLPModel
logger = logging.getLogger(__name__)
class PytorchClassifierMultiTarget(BasePytorchModel):
class PyTorchClassifierMultiTarget(BasePyTorchModel):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# todo move to config
self.n_hidden = 1024
self.labels = ['0.0', '1.0', '2.0']
self.n_hidden = 1024
self.max_iters = 100
self.batch_size = 64
self.learning_rate = 3e-4
self.eval_iters = 10
def fit(self, data_dictionary: Dict, dk: FreqaiDataKitchen, **kwargs) -> Any:
"""
@ -38,17 +38,27 @@ class PytorchClassifierMultiTarget(BasePytorchModel):
all the training and test data/labels.
"""
n_features = data_dictionary['train_features'].shape[-1]
tensor_dictionary = self.convert_data_to_tensors(data_dictionary)
model = MLP(
model = PyTorchMLPModel(
input_dim=n_features,
hidden_dim=self.n_hidden,
output_dim=len(self.labels)
)
model.to(self.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=self.learning_rate)
criterion = torch.nn.CrossEntropyLoss()
init_model = self.get_init_model(dk.pair)
trainer = PytorchModelTrainer(model, optimizer, init_model=init_model)
trainer.fit(tensor_dictionary, self.max_iters, self.batch_size)
trainer = PyTorchModelTrainer(
model=model,
optimizer=optimizer,
criterion=criterion,
device=self.device,
batch_size=self.batch_size,
max_iters=self.max_iters,
eval_iters=self.eval_iters,
init_model=init_model
)
trainer.fit(data_dictionary)
return trainer
def predict(
@ -73,9 +83,9 @@ class PytorchClassifierMultiTarget(BasePytorchModel):
self.data_cleaning_predict(dk)
dk.data_dictionary["prediction_features"] = torch.tensor(
dk.data_dictionary["prediction_features"].values
).to(self.device)
).float().to(self.device)
logits, _ = self.model.model(dk.data_dictionary["prediction_features"])
logits = self.model.model(dk.data_dictionary["prediction_features"])
probs = F.softmax(logits, dim=-1)
label_ints = torch.argmax(probs, dim=-1)
@ -83,15 +93,3 @@ class PytorchClassifierMultiTarget(BasePytorchModel):
pred_df = DataFrame(label_ints, columns=dk.label_list).astype(float).astype(str)
pred_df = pd.concat([pred_df, pred_df_prob], axis=1)
return (pred_df, dk.do_predict)
def convert_data_to_tensors(self, data_dictionary: Dict) -> Dict:
tensor_dictionary = {}
for split in ['train', 'test']:
tensor_dictionary[f'{split}_features'] = torch.tensor(
data_dictionary[f'{split}_features'].values
).to(self.device)
tensor_dictionary[f'{split}_labels'] = torch.tensor(
data_dictionary[f'{split}_labels'].astype(float).values
).long().to(self.device)
return tensor_dictionary

View File

@ -3,29 +3,23 @@ import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super(MLP, self).__init__()
class PyTorchMLPModel(nn.Module):
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int):
super(PyTorchMLPModel, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_dim)
self.hidden_layer = nn.Linear(hidden_dim, hidden_dim)
self.output_layer = nn.Linear(hidden_dim, output_dim)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.2)
def forward(self, x, targets=None):
def forward(self, x: torch.tensor) -> torch.tensor:
x = self.relu(self.input_layer(x))
x = self.dropout(x)
x = self.relu(self.hidden_layer(x))
x = self.dropout(x)
logits = self.output_layer(x)
if targets is None:
return logits, None
loss = F.cross_entropy(logits, targets.squeeze())
return logits, loss
return logits