freqtrade_origin/freqtrade/data/converter/trade_converter_kraken.py

89 lines
3.1 KiB
Python
Raw Normal View History

import logging
from pathlib import Path
import pandas as pd
from freqtrade.constants import DATETIME_PRINT_FORMAT, DEFAULT_TRADES_COLUMNS, Config
from freqtrade.data.converter.trade_converter import (
trades_convert_types,
trades_df_remove_duplicates,
)
from freqtrade.data.history import get_datahandler
from freqtrade.enums import TradingMode
2023-09-25 17:48:09 +00:00
from freqtrade.exceptions import OperationalException
2024-02-10 07:48:52 +00:00
from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist
from freqtrade.resolvers import ExchangeResolver
logger = logging.getLogger(__name__)
2024-05-12 15:41:55 +00:00
KRAKEN_CSV_TRADE_COLUMNS = ["timestamp", "price", "amount"]
def import_kraken_trades_from_csv(config: Config, convert_to: str):
"""
Import kraken trades from csv
"""
2024-05-12 15:41:55 +00:00
if config["exchange"]["name"] != "kraken":
raise OperationalException("This function is only for the kraken exchange.")
2024-05-12 15:41:55 +00:00
datadir: Path = config["datadir"]
data_handler = get_datahandler(datadir, data_format=convert_to)
2024-05-12 15:41:55 +00:00
tradesdir: Path = config["datadir"] / "trades_csv"
exchange = ExchangeResolver.load_exchange(config, validate=False)
# iterate through directories in this directory
2024-05-12 15:41:55 +00:00
data_symbols = {p.stem for p in tradesdir.rglob("*.csv")}
# create pair/filename mapping
markets = {
2024-05-12 15:41:55 +00:00
(m["symbol"], m["altname"])
for m in exchange.markets.values()
if m.get("altname") in data_symbols
}
2023-09-25 17:45:03 +00:00
logger.info(f"Found csv files for {', '.join(data_symbols)}.")
2024-05-12 15:41:55 +00:00
if pairs_raw := config.get("pairs"):
2024-02-10 07:48:52 +00:00
pairs = expand_pairlist(pairs_raw, [m[0] for m in markets])
markets = {m for m in markets if m[0] in pairs}
if not markets:
2024-02-10 07:48:52 +00:00
logger.info(f"No data found for pairs {', '.join(pairs_raw)}.")
return
logger.info(f"Converting pairs: {', '.join(m[0] for m in markets)}.")
for pair, name in markets:
logger.debug(f"Converting pair {pair}, files */{name}.csv")
dfs = []
# Load and combine all csv files for this pair
for f in tradesdir.rglob(f"{name}.csv"):
df = pd.read_csv(f, names=KRAKEN_CSV_TRADE_COLUMNS)
if not df.empty:
dfs.append(df)
2023-09-24 20:47:44 +00:00
# Load existing trades data
if not dfs:
2023-09-25 17:46:27 +00:00
# edgecase, can only happen if the file was deleted between the above glob and here
2023-09-24 20:47:44 +00:00
logger.info(f"No data found for pair {pair}")
continue
2023-09-24 20:47:44 +00:00
trades = pd.concat(dfs, ignore_index=True)
del dfs
2024-05-12 15:41:55 +00:00
trades.loc[:, "timestamp"] = trades["timestamp"] * 1e3
trades.loc[:, "cost"] = trades["price"] * trades["amount"]
for col in DEFAULT_TRADES_COLUMNS:
if col not in trades.columns:
2024-05-12 15:41:55 +00:00
trades.loc[:, col] = ""
trades = trades[DEFAULT_TRADES_COLUMNS]
trades = trades_convert_types(trades)
trades_df = trades_df_remove_duplicates(trades)
del trades
2024-05-12 15:41:55 +00:00
logger.info(
f"{pair}: {len(trades_df)} trades, from "
f"{trades_df['date'].min():{DATETIME_PRINT_FORMAT}} to "
f"{trades_df['date'].max():{DATETIME_PRINT_FORMAT}}"
)
data_handler.trades_store(pair, trades_df, TradingMode.SPOT)