freqtrade_origin/freqtrade/data/datahandlers/jsondatahandler.py

180 lines
6.9 KiB
Python
Raw Normal View History

import re
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from pandas import DataFrame, read_json, to_datetime
from freqtrade import misc
from freqtrade.configuration import TimeRange
from freqtrade.data.converter import clean_ohlcv_dataframe, trim_dataframe
from .idatahandler import IDataHandler
class JsonDataHandler(IDataHandler):
_use_zip = False
_columns = ['date', 'open', 'high', 'low', 'close', 'volume']
@classmethod
def ohlcv_get_pairs(cls, datadir: Path, timeframe: str) -> List[str]:
"""
2019-12-25 18:53:52 +00:00
Returns a list of all pairs with ohlcv data available in this datadir
for the specified timeframe
:param datadir: Directory to search for ohlcv files
:param timeframe: Timeframe to search pairs for
:return: List of Pairs
"""
2019-12-25 09:21:30 +00:00
_tmp = [re.search(r'^(\S+)(?=\-' + timeframe + '.json)', p.name)
for p in datadir.glob(f"*{timeframe}.{cls._get_file_extension()}")]
2019-12-25 09:21:30 +00:00
# Check if regex found something and only return these results
2019-12-25 09:25:30 +00:00
return [match[0].replace('_', '/') for match in _tmp if match]
def ohlcv_store(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Store data in json format "values".
format looks as follows:
[[<date>,<open>,<high>,<low>,<close>]]
:param pair: Pair - used to generate filename
:timeframe: Timeframe - used to generate filename
:data: Dataframe containing OHLCV data
:return: None
"""
filename = self._pair_data_filename(self._datadir, pair, timeframe)
_data = data.copy()
# Convert date to int
_data['date'] = _data['date'].astype(np.int64) // 1000 // 1000
# Reset index, select only appropriate columns and save as json
_data.reset_index(drop=True).loc[:, self._columns].to_json(
filename, orient="values",
compression='gzip' if self._use_zip else None)
def _ohlcv_load(self, pair: str, timeframe: str,
timerange: Optional[TimeRange] = None,
fill_missing: bool = True,
drop_incomplete: bool = True,
) -> DataFrame:
"""
2019-12-25 14:07:49 +00:00
Internal method used to load data for one pair from disk.
Implements the loading and conversation to a Pandas dataframe.
2019-12-25 18:53:52 +00:00
:param pair: Pair to load data
2019-12-25 14:07:49 +00:00
:param timeframe: Ticker timeframe (e.g. "5m")
:param timerange: Limit data to be loaded to this timerange
:param fill_missing: Fill missing values with "No action"-candles
:param drop_incomplete: Drop last candle assuming it may be incomplete.
:param startup_candles: Additional candles to load at the start of the period
:return: DataFrame with ohlcv data, or empty DataFrame
"""
filename = self._pair_data_filename(self._datadir, pair, timeframe)
2019-12-25 14:24:53 +00:00
if not filename.is_file():
return DataFrame(columns=self._columns)
pairdata = read_json(filename, orient='values')
pairdata.columns = self._columns
pairdata['date'] = to_datetime(pairdata['date'],
unit='ms',
utc=True,
infer_datetime_format=True)
enddate = pairdata.iloc[-1]['date']
if timerange:
self._validate_pairdata(pair, pairdata, timerange)
2019-12-25 14:07:49 +00:00
pairdata = trim_dataframe(pairdata, timerange)
# incomplete candles should only be dropped if we didn't trim the end beforehand.
return clean_ohlcv_dataframe(pairdata, timeframe,
pair=pair,
fill_missing=fill_missing,
drop_incomplete=(drop_incomplete and
enddate == pairdata.iloc[-1]['date']))
def ohlcv_append(self, pair: str, timeframe: str, data: DataFrame) -> None:
"""
Append data to existing data structures
:param pair: Pair
:param timeframe: Timeframe this ohlcv data is for
:param data: Data to append.
"""
raise NotImplementedError()
@classmethod
def trades_get_pairs(cls, datadir: Path) -> List[str]:
"""
2019-12-25 18:53:52 +00:00
Returns a list of all pairs for which trade data is available in this
:param datadir: Directory to search for ohlcv files
:return: List of Pairs
"""
2019-12-25 09:21:30 +00:00
_tmp = [re.search(r'^(\S+)(?=\-trades.json)', p.name)
for p in datadir.glob(f"*trades.{cls._get_file_extension()}")]
2019-12-25 09:21:30 +00:00
# Check if regex found something and only return these results to avoid exceptions.
2019-12-25 09:25:30 +00:00
return [match[0].replace('_', '/') for match in _tmp if match]
2019-12-25 18:53:52 +00:00
def trades_store(self, pair: str, data: List[Dict]) -> None:
"""
2019-12-25 18:53:52 +00:00
Store trades data (list of Dicts) to file
:param pair: Pair - used for filename
:param data: List of Dicts containing trade data
"""
filename = self._pair_trades_filename(self._datadir, pair)
misc.file_dump_json(filename, data, is_zip=self._use_zip)
def trades_append(self, pair: str, data: DataFrame):
"""
Append data to existing files
2019-12-25 18:53:52 +00:00
:param pair: Pair - used for filename
:param data: List of Dicts containing trade data
"""
raise NotImplementedError()
def trades_load(self, pair: str, timerange: Optional[TimeRange] = None) -> List[Dict]:
"""
Load a pair from file, either .json.gz or .json
2019-12-25 18:53:52 +00:00
# TODO: respect timerange ...
:param pair: Load trades for this pair
:param timerange: Timerange to load trades for - currently not implemented
:return: List of trades
"""
filename = self._pair_trades_filename(self._datadir, pair)
tradesdata = misc.file_load_json(filename)
if not tradesdata:
return []
return tradesdata
def trades_purge(self, pair: str) -> bool:
"""
Remove data for this pair
:param pair: Delete data for this pair.
:return: True when deleted, false if file did not exist.
"""
filename = self._pair_trades_filename(self._datadir, pair)
if filename.is_file():
filename.unlink()
return True
return False
@classmethod
def _pair_data_filename(cls, datadir: Path, pair: str, timeframe: str) -> Path:
pair_s = pair.replace("/", "_")
filename = datadir.joinpath(f'{pair_s}-{timeframe}.{cls._get_file_extension()}')
return filename
@classmethod
def _get_file_extension(cls):
return "json.gz" if cls._use_zip else "json"
@classmethod
def _pair_trades_filename(cls, datadir: Path, pair: str) -> Path:
pair_s = pair.replace("/", "_")
filename = datadir.joinpath(f'{pair_s}-trades.{cls._get_file_extension()}')
return filename
class JsonGzDataHandler(JsonDataHandler):
_use_zip = True