2018-02-04 07:33:54 +00:00
|
|
|
"""
|
|
|
|
Various tool function for Freqtrade and scripts
|
|
|
|
"""
|
2024-05-12 14:32:47 +00:00
|
|
|
|
2018-07-04 07:31:35 +00:00
|
|
|
import gzip
|
2017-11-08 21:43:47 +00:00
|
|
|
import logging
|
2023-10-30 18:20:59 +00:00
|
|
|
from io import StringIO
|
2019-08-16 11:04:07 +00:00
|
|
|
from pathlib import Path
|
2023-03-11 14:15:32 +00:00
|
|
|
from typing import Any, Dict, Iterator, List, Mapping, Optional, TextIO, Union
|
2021-07-12 12:02:10 +00:00
|
|
|
from urllib.parse import urlparse
|
2018-03-17 21:12:42 +00:00
|
|
|
|
2022-11-18 12:59:29 +00:00
|
|
|
import pandas as pd
|
2018-12-28 09:01:16 +00:00
|
|
|
import rapidjson
|
2017-11-08 21:43:47 +00:00
|
|
|
|
2022-09-08 05:01:37 +00:00
|
|
|
from freqtrade.enums import SignalTagType, SignalType
|
2021-02-12 19:32:41 +00:00
|
|
|
|
2020-09-28 17:39:41 +00:00
|
|
|
|
2017-11-11 15:47:19 +00:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2017-09-08 22:31:40 +00:00
|
|
|
|
2020-10-03 11:27:06 +00:00
|
|
|
def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:
|
2017-09-08 22:31:40 +00:00
|
|
|
"""
|
2018-02-04 07:33:54 +00:00
|
|
|
Dump JSON data into a file
|
|
|
|
:param filename: file to create
|
2021-06-25 17:13:31 +00:00
|
|
|
:param is_zip: if file should be zip
|
2018-02-04 07:33:54 +00:00
|
|
|
:param data: JSON Data to save
|
2017-09-08 22:31:40 +00:00
|
|
|
:return:
|
|
|
|
"""
|
2018-04-22 07:57:48 +00:00
|
|
|
|
2018-03-31 15:28:54 +00:00
|
|
|
if is_zip:
|
2024-05-12 14:32:47 +00:00
|
|
|
if filename.suffix != ".gz":
|
|
|
|
filename = filename.with_suffix(".gz")
|
2020-10-03 11:27:06 +00:00
|
|
|
if log:
|
|
|
|
logger.info(f'dumping json to "{filename}"')
|
2019-12-25 09:35:23 +00:00
|
|
|
|
2024-07-03 07:19:23 +00:00
|
|
|
with gzip.open(filename, "wt", encoding="utf-8") as fpz:
|
2020-10-12 17:28:14 +00:00
|
|
|
rapidjson.dump(data, fpz, default=str, number_mode=rapidjson.NM_NATIVE)
|
2018-03-31 15:28:54 +00:00
|
|
|
else:
|
2020-10-03 11:27:06 +00:00
|
|
|
if log:
|
|
|
|
logger.info(f'dumping json to "{filename}"')
|
2024-05-12 14:32:47 +00:00
|
|
|
with filename.open("w") as fp:
|
2018-12-28 09:01:16 +00:00
|
|
|
rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)
|
2018-03-25 11:38:17 +00:00
|
|
|
|
2018-12-28 09:46:48 +00:00
|
|
|
logger.debug(f'done json to "{filename}"')
|
|
|
|
|
2018-03-25 11:38:17 +00:00
|
|
|
|
2022-04-20 12:38:52 +00:00
|
|
|
def file_dump_joblib(filename: Path, data: Any, log: bool = True) -> None:
|
2022-04-19 11:48:21 +00:00
|
|
|
"""
|
|
|
|
Dump object data into a file
|
|
|
|
:param filename: file to create
|
|
|
|
:param data: Object data to save
|
|
|
|
:return:
|
|
|
|
"""
|
2022-04-23 15:08:34 +00:00
|
|
|
import joblib
|
2022-04-19 11:48:21 +00:00
|
|
|
|
|
|
|
if log:
|
2022-04-20 12:38:52 +00:00
|
|
|
logger.info(f'dumping joblib to "{filename}"')
|
2024-05-12 14:32:47 +00:00
|
|
|
with filename.open("wb") as fp:
|
2022-04-20 12:38:52 +00:00
|
|
|
joblib.dump(data, fp)
|
|
|
|
logger.debug(f'done joblib dump to "{filename}"')
|
2022-04-19 11:48:21 +00:00
|
|
|
|
|
|
|
|
2024-07-03 07:19:23 +00:00
|
|
|
def json_load(datafile: TextIO) -> Any:
|
2018-12-28 09:04:28 +00:00
|
|
|
"""
|
|
|
|
load data with rapidjson
|
|
|
|
Use this to have a consistent experience,
|
2021-04-06 09:59:58 +00:00
|
|
|
set number_mode to "NM_NATIVE" for greatest speed
|
2018-12-28 09:04:28 +00:00
|
|
|
"""
|
2018-12-28 09:25:12 +00:00
|
|
|
return rapidjson.load(datafile, number_mode=rapidjson.NM_NATIVE)
|
|
|
|
|
|
|
|
|
2023-02-25 16:08:02 +00:00
|
|
|
def file_load_json(file: Path):
|
2019-08-25 12:30:02 +00:00
|
|
|
if file.suffix != ".gz":
|
2024-05-12 14:32:47 +00:00
|
|
|
gzipfile = file.with_suffix(file.suffix + ".gz")
|
2019-08-25 12:30:02 +00:00
|
|
|
else:
|
|
|
|
gzipfile = file
|
2018-12-28 09:25:12 +00:00
|
|
|
# Try gzip file first, otherwise regular json file.
|
|
|
|
if gzipfile.is_file():
|
2020-03-08 10:35:31 +00:00
|
|
|
logger.debug(f"Loading historical data from file {gzipfile}")
|
2024-07-03 07:19:23 +00:00
|
|
|
with gzip.open(gzipfile, "rt", encoding="utf-8") as datafile:
|
2020-03-08 10:35:31 +00:00
|
|
|
pairdata = json_load(datafile)
|
2018-12-28 09:25:12 +00:00
|
|
|
elif file.is_file():
|
2020-03-08 10:35:31 +00:00
|
|
|
logger.debug(f"Loading historical data from file {file}")
|
2023-02-25 16:08:02 +00:00
|
|
|
with file.open() as datafile:
|
2020-03-08 10:35:31 +00:00
|
|
|
pairdata = json_load(datafile)
|
2018-12-28 09:25:12 +00:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
return pairdata
|
2018-12-28 09:04:28 +00:00
|
|
|
|
|
|
|
|
2023-07-25 18:19:23 +00:00
|
|
|
def is_file_in_dir(file: Path, directory: Path) -> bool:
|
|
|
|
"""
|
|
|
|
Helper function to check if file is in directory.
|
|
|
|
"""
|
|
|
|
return file.is_file() and file.parent.samefile(directory)
|
|
|
|
|
|
|
|
|
2020-01-04 02:07:51 +00:00
|
|
|
def pair_to_filename(pair: str) -> str:
|
2024-05-12 14:32:47 +00:00
|
|
|
for ch in ["/", " ", ".", "@", "$", "+", ":"]:
|
|
|
|
pair = pair.replace(ch, "_")
|
2020-01-04 02:07:51 +00:00
|
|
|
return pair
|
|
|
|
|
|
|
|
|
2022-03-18 05:58:22 +00:00
|
|
|
def deep_merge_dicts(source, destination, allow_null_overrides: bool = True):
|
2019-02-19 12:14:47 +00:00
|
|
|
"""
|
2019-06-09 12:04:19 +00:00
|
|
|
Values from Source override destination, destination is returned (and modified!!)
|
|
|
|
Sample:
|
2019-02-19 12:14:47 +00:00
|
|
|
>>> a = { 'first' : { 'rows' : { 'pass' : 'dog', 'number' : '1' } } }
|
|
|
|
>>> b = { 'first' : { 'rows' : { 'fail' : 'cat', 'number' : '5' } } }
|
|
|
|
>>> merge(b, a) == { 'first' : { 'rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }
|
|
|
|
True
|
|
|
|
"""
|
|
|
|
for key, value in source.items():
|
|
|
|
if isinstance(value, dict):
|
|
|
|
# get node or create one
|
|
|
|
node = destination.setdefault(key, {})
|
2022-03-18 05:58:22 +00:00
|
|
|
deep_merge_dicts(value, node, allow_null_overrides)
|
|
|
|
elif value is not None or allow_null_overrides:
|
2019-02-19 12:14:47 +00:00
|
|
|
destination[key] = value
|
|
|
|
|
|
|
|
return destination
|
2019-08-20 19:17:21 +00:00
|
|
|
|
|
|
|
|
|
|
|
def round_dict(d, n):
|
|
|
|
"""
|
|
|
|
Rounds float values in the dict to n digits after the decimal point.
|
|
|
|
"""
|
|
|
|
return {k: (round(v, n) if isinstance(v, float) else v) for k, v in d.items()}
|
2019-10-13 10:12:20 +00:00
|
|
|
|
|
|
|
|
2024-08-15 06:06:50 +00:00
|
|
|
DictMap = Union[Dict[str, Any], Mapping[str, Any]]
|
|
|
|
|
|
|
|
|
|
|
|
def safe_value_fallback(obj: DictMap, key1: str, key2: Optional[str] = None, default_value=None):
|
2020-07-15 17:49:51 +00:00
|
|
|
"""
|
|
|
|
Search a value in obj, return this if it's not None.
|
|
|
|
Then search key2 in obj - return that if it's not none - then use default_value.
|
|
|
|
Else falls back to None.
|
|
|
|
"""
|
|
|
|
if key1 in obj and obj[key1] is not None:
|
|
|
|
return obj[key1]
|
|
|
|
else:
|
2023-09-11 18:03:08 +00:00
|
|
|
if key2 and key2 in obj and obj[key2] is not None:
|
2020-07-15 17:49:51 +00:00
|
|
|
return obj[key2]
|
|
|
|
return default_value
|
|
|
|
|
|
|
|
|
2024-08-15 06:06:50 +00:00
|
|
|
def safe_value_fallback2(dict1: DictMap, dict2: DictMap, key1: str, key2: str, default_value=None):
|
2020-04-09 17:34:48 +00:00
|
|
|
"""
|
|
|
|
Search a value in dict1, return this if it's not None.
|
|
|
|
Fall back to dict2 - return key2 from dict2 if it's not None.
|
|
|
|
Else falls back to None.
|
|
|
|
|
|
|
|
"""
|
|
|
|
if key1 in dict1 and dict1[key1] is not None:
|
|
|
|
return dict1[key1]
|
|
|
|
else:
|
|
|
|
if key2 in dict2 and dict2[key2] is not None:
|
|
|
|
return dict2[key2]
|
|
|
|
return default_value
|
|
|
|
|
|
|
|
|
2023-01-21 14:01:56 +00:00
|
|
|
def plural(num: float, singular: str, plural: Optional[str] = None) -> str:
|
2024-05-12 14:32:47 +00:00
|
|
|
return singular if (num == 1 or num == -1) else plural or singular + "s"
|
2019-11-01 15:04:44 +00:00
|
|
|
|
|
|
|
|
2021-04-24 11:26:40 +00:00
|
|
|
def chunks(lst: List[Any], n: int) -> Iterator[List[Any]]:
|
|
|
|
"""
|
|
|
|
Split lst into chunks of the size n.
|
|
|
|
:param lst: list to split into chunks
|
|
|
|
:param n: number of max elements per chunk
|
|
|
|
:return: None
|
|
|
|
"""
|
|
|
|
for chunk in range(0, len(lst), n):
|
2024-05-12 14:32:47 +00:00
|
|
|
yield (lst[chunk : chunk + n])
|
2021-07-12 12:02:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
def parse_db_uri_for_logging(uri: str):
|
|
|
|
"""
|
|
|
|
Helper method to parse the DB URI and return the same DB URI with the password censored
|
|
|
|
if it contains it. Otherwise, return the DB URI unchanged
|
|
|
|
:param uri: DB URI to parse for logging
|
|
|
|
"""
|
|
|
|
parsed_db_uri = urlparse(uri)
|
|
|
|
if not parsed_db_uri.netloc: # No need for censoring as no password was provided
|
|
|
|
return uri
|
2024-05-12 14:32:47 +00:00
|
|
|
pwd = parsed_db_uri.netloc.split(":")[1].split("@")[0]
|
|
|
|
return parsed_db_uri.geturl().replace(f":{pwd}@", ":*****@")
|
2022-08-22 04:45:36 +00:00
|
|
|
|
|
|
|
|
2022-11-18 12:59:29 +00:00
|
|
|
def dataframe_to_json(dataframe: pd.DataFrame) -> str:
|
2022-08-22 04:45:36 +00:00
|
|
|
"""
|
|
|
|
Serialize a DataFrame for transmission over the wire using JSON
|
|
|
|
:param dataframe: A pandas DataFrame
|
|
|
|
:returns: A JSON string of the pandas DataFrame
|
|
|
|
"""
|
2024-05-12 14:32:47 +00:00
|
|
|
return dataframe.to_json(orient="split")
|
2022-08-22 04:45:36 +00:00
|
|
|
|
2022-11-18 12:59:29 +00:00
|
|
|
|
|
|
|
def json_to_dataframe(data: str) -> pd.DataFrame:
|
2022-08-22 04:45:36 +00:00
|
|
|
"""
|
|
|
|
Deserialize JSON into a DataFrame
|
|
|
|
:param data: A JSON string
|
|
|
|
:returns: A pandas DataFrame from the JSON string
|
|
|
|
"""
|
2024-05-12 14:32:47 +00:00
|
|
|
dataframe = pd.read_json(StringIO(data), orient="split")
|
|
|
|
if "date" in dataframe.columns:
|
|
|
|
dataframe["date"] = pd.to_datetime(dataframe["date"], unit="ms", utc=True)
|
2022-08-22 04:45:36 +00:00
|
|
|
|
|
|
|
return dataframe
|
2022-08-27 05:40:13 +00:00
|
|
|
|
|
|
|
|
2022-11-18 12:59:29 +00:00
|
|
|
def remove_entry_exit_signals(dataframe: pd.DataFrame):
|
2022-08-27 05:40:13 +00:00
|
|
|
"""
|
|
|
|
Remove Entry and Exit signals from a DataFrame
|
|
|
|
|
|
|
|
:param dataframe: The DataFrame to remove signals from
|
|
|
|
"""
|
|
|
|
dataframe[SignalType.ENTER_LONG.value] = 0
|
|
|
|
dataframe[SignalType.EXIT_LONG.value] = 0
|
|
|
|
dataframe[SignalType.ENTER_SHORT.value] = 0
|
|
|
|
dataframe[SignalType.EXIT_SHORT.value] = 0
|
|
|
|
dataframe[SignalTagType.ENTER_TAG.value] = None
|
|
|
|
dataframe[SignalTagType.EXIT_TAG.value] = None
|
|
|
|
|
|
|
|
return dataframe
|
2022-12-06 23:00:28 +00:00
|
|
|
|
|
|
|
|
|
|
|
def append_candles_to_dataframe(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
|
|
|
|
"""
|
|
|
|
Append the `right` dataframe to the `left` dataframe
|
|
|
|
|
|
|
|
:param left: The full dataframe you want appended to
|
|
|
|
:param right: The new dataframe containing the data you want appended
|
|
|
|
:returns: The dataframe with the right data in it
|
|
|
|
"""
|
2024-05-12 14:32:47 +00:00
|
|
|
if left.iloc[-1]["date"] != right.iloc[-1]["date"]:
|
2022-12-06 23:00:28 +00:00
|
|
|
left = pd.concat([left, right])
|
|
|
|
|
|
|
|
# Only keep the last 1500 candles in memory
|
|
|
|
left = left[-1500:] if len(left) > 1500 else left
|
|
|
|
left.reset_index(drop=True, inplace=True)
|
|
|
|
|
|
|
|
return left
|