ruff format: tests/data

This commit is contained in:
Matthias 2024-05-12 16:00:45 +02:00
parent d8a8b5c125
commit ffd49e0e59
7 changed files with 1335 additions and 1157 deletions

View File

@ -41,18 +41,17 @@ from tests.conftest_trades import MOCK_TRADE_COUNT
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
get_latest_backtest_filename(testdatadir / "does_not_exist")
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
with pytest.raises(ValueError, match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
assert res == "backtest-result.json"
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
assert res == "backtest-result.json"
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
@ -61,8 +60,8 @@ def test_get_latest_backtest_filename(testdatadir, mocker):
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir / "does_not_exist", "testfile.pickle")
assert res == testdatadir / "does_not_exist/testfile.pickle"
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
@ -73,33 +72,35 @@ def test_get_latest_hyperopt_file(testdatadir):
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
match="--hyperopt-filename expects only the filename, not an absolute path.",
):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
res = load_backtest_metadata(testdatadir / "nonexistant.file.json")
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
mocker.patch("freqtrade.data.btanalysis.get_backtest_metadata_filename")
mocker.patch("freqtrade.data.btanalysis.json_load", side_effect=Exception())
with pytest.raises(
OperationalException, match=r"Unexpected error.*loading backtest metadata\."
):
load_backtest_metadata(testdatadir / "nonexistant.file.json")
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
mocker.patch("freqtrade.data.btanalysis.load_backtest_stats", return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
with pytest.raises(
OperationalException,
match=r"Backtest-results with only trades data are no longer supported.",
):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
@ -122,13 +123,11 @@ def test_load_backtest_data_new_format(testdatadir):
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
for strategy in ("StrategyTestV2", "TestStrategy"):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
@ -136,21 +135,20 @@ def test_load_backtest_data_multi(testdatadir):
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
load_backtest_data(filename, strategy="XYZ")
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
@pytest.mark.parametrize("is_short", [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
# remove init so it does not init again
init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock())
init_mock = mocker.patch("freqtrade.data.btanalysis.init_db", MagicMock())
trades = load_trades_from_db(db_url=default_conf['db_url'])
trades = load_trades_from_db(db_url=default_conf["db_url"])
assert init_mock.call_count == 1
assert len(trades) == MOCK_TRADE_COUNT
assert isinstance(trades, DataFrame)
@ -159,38 +157,46 @@ def test_load_trades_from_db(default_conf, fee, is_short, mocker):
assert "profit_ratio" in trades.columns
for col in BT_DATA_COLUMNS:
if col not in ['index', 'open_at_end']:
if col not in ["index", "open_at_end"]:
assert col in trades.columns
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY)
trades = load_trades_from_db(db_url=default_conf["db_url"], strategy=CURRENT_TEST_STRATEGY)
assert len(trades) == 4
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy')
trades = load_trades_from_db(db_url=default_conf["db_url"], strategy="NoneStrategy")
assert len(trades) == 0
def test_extract_trades_of_period(testdatadir):
pair = "UNITTEST/BTC"
# 2018-11-14 06:07:00
timerange = TimeRange('date', None, 1510639620, 0)
timerange = TimeRange("date", None, 1510639620, 0)
data = load_pair_history(pair=pair, timeframe='1m',
datadir=testdatadir, timerange=timerange)
data = load_pair_history(pair=pair, timeframe="1m", datadir=testdatadir, timerange=timerange)
trades = DataFrame(
{'pair': [pair, pair, pair, pair],
'profit_ratio': [0.0, 0.1, -0.2, -0.5],
'profit_abs': [0.0, 1, -2, -5],
'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
{
"pair": [pair, pair, pair, pair],
"profit_ratio": [0.0, 0.1, -0.2, -0.5],
"profit_abs": [0.0, 1, -2, -5],
"open_date": to_datetime(
[
datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc),
], utc=True
],
utc=True,
),
'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
"close_date": to_datetime(
[
datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc),
], utc=True)
})
],
utc=True,
),
}
)
trades1 = extract_trades_of_period(data, trades)
# First and last trade are dropped as they are out of range
assert len(trades1) == 2
@ -206,18 +212,19 @@ def test_analyze_trade_parallelism(testdatadir):
res = analyze_trade_parallelism(bt_data, "5m")
assert isinstance(res, DataFrame)
assert 'open_trades' in res.columns
assert res['open_trades'].max() == 3
assert res['open_trades'].min() == 0
assert "open_trades" in res.columns
assert res["open_trades"].max() == 3
assert res["open_trades"].min() == 0
def test_load_trades(default_conf, mocker):
db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock())
bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock())
load_trades("DB",
db_url=default_conf.get('db_url'),
exportfilename=default_conf.get('exportfilename'),
load_trades(
"DB",
db_url=default_conf.get("db_url"),
exportfilename=default_conf.get("exportfilename"),
no_trades=False,
strategy=CURRENT_TEST_STRATEGY,
)
@ -227,10 +234,11 @@ def test_load_trades(default_conf, mocker):
db_mock.reset_mock()
bt_mock.reset_mock()
default_conf['exportfilename'] = Path("testfile.json")
load_trades("file",
db_url=default_conf.get('db_url'),
exportfilename=default_conf.get('exportfilename'),
default_conf["exportfilename"] = Path("testfile.json")
load_trades(
"file",
db_url=default_conf.get("db_url"),
exportfilename=default_conf.get("exportfilename"),
)
assert db_mock.call_count == 0
@ -238,11 +246,12 @@ def test_load_trades(default_conf, mocker):
db_mock.reset_mock()
bt_mock.reset_mock()
default_conf['exportfilename'] = "testfile.json"
load_trades("file",
db_url=default_conf.get('db_url'),
exportfilename=default_conf.get('exportfilename'),
no_trades=True
default_conf["exportfilename"] = "testfile.json"
load_trades(
"file",
db_url=default_conf.get("db_url"),
exportfilename=default_conf.get("exportfilename"),
no_trades=True,
)
assert db_mock.call_count == 0
@ -251,7 +260,7 @@ def test_load_trades(default_conf, mocker):
def test_calculate_market_change(testdatadir):
pairs = ["ETH/BTC", "ADA/BTC"]
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m')
data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m")
result = calculate_market_change(data)
assert isinstance(result, float)
assert pytest.approx(result) == 0.01100002
@ -259,7 +268,7 @@ def test_calculate_market_change(testdatadir):
def test_combine_dataframes_with_mean(testdatadir):
pairs = ["ETH/BTC", "ADA/BTC"]
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m')
data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m")
df = combine_dataframes_with_mean(data)
assert isinstance(df, DataFrame)
assert "ETH/BTC" in df.columns
@ -269,11 +278,9 @@ def test_combine_dataframes_with_mean(testdatadir):
def test_combined_dataframes_with_rel_mean(testdatadir):
pairs = ["ETH/BTC", "ADA/BTC"]
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='5m')
data = load_data(datadir=testdatadir, pairs=pairs, timeframe="5m")
df = combined_dataframes_with_rel_mean(
data,
datetime(2018, 1, 12, tzinfo=timezone.utc),
datetime(2018, 1, 28, tzinfo=timezone.utc)
data, datetime(2018, 1, 12, tzinfo=timezone.utc), datetime(2018, 1, 28, tzinfo=timezone.utc)
)
assert isinstance(df, DataFrame)
assert "ETH/BTC" not in df.columns
@ -281,14 +288,14 @@ def test_combined_dataframes_with_rel_mean(testdatadir):
assert "mean" in df.columns
assert "rel_mean" in df.columns
assert "count" in df.columns
assert df.iloc[0]['count'] == 2
assert df.iloc[-1]['count'] == 2
assert len(df) < len(data['ETH/BTC'])
assert df.iloc[0]["count"] == 2
assert df.iloc[-1]["count"] == 2
assert len(df) < len(data["ETH/BTC"])
def test_combine_dataframes_with_mean_no_data(testdatadir):
pairs = ["ETH/BTC", "ADA/BTC"]
data = load_data(datadir=testdatadir, pairs=pairs, timeframe='6m')
data = load_data(datadir=testdatadir, pairs=pairs, timeframe="6m")
with pytest.raises(ValueError, match=r"No data provided\."):
combine_dataframes_with_mean(data)
@ -298,60 +305,63 @@ def test_create_cum_profit(testdatadir):
bt_data = load_backtest_data(filename)
timerange = TimeRange.parse_timerange("20180110-20180112")
df = load_pair_history(pair="TRX/BTC", timeframe='5m',
datadir=testdatadir, timerange=timerange)
df = load_pair_history(pair="TRX/BTC", timeframe="5m", datadir=testdatadir, timerange=timerange)
cum_profits = create_cum_profit(df.set_index('date'),
bt_data[bt_data["pair"] == 'TRX/BTC'],
"cum_profits", timeframe="5m")
cum_profits = create_cum_profit(
df.set_index("date"), bt_data[bt_data["pair"] == "TRX/BTC"], "cum_profits", timeframe="5m"
)
assert "cum_profits" in cum_profits.columns
assert cum_profits.iloc[0]['cum_profits'] == 0
assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05
assert cum_profits.iloc[0]["cum_profits"] == 0
assert pytest.approx(cum_profits.iloc[-1]["cum_profits"]) == 9.0225563e-05
def test_create_cum_profit1(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
# Move close-time to "off" the candle, to make sure the logic still works
bt_data['close_date'] = bt_data.loc[:, 'close_date'] + DateOffset(seconds=20)
bt_data["close_date"] = bt_data.loc[:, "close_date"] + DateOffset(seconds=20)
timerange = TimeRange.parse_timerange("20180110-20180112")
df = load_pair_history(pair="TRX/BTC", timeframe='5m',
datadir=testdatadir, timerange=timerange)
df = load_pair_history(pair="TRX/BTC", timeframe="5m", datadir=testdatadir, timerange=timerange)
cum_profits = create_cum_profit(df.set_index('date'),
bt_data[bt_data["pair"] == 'TRX/BTC'],
"cum_profits", timeframe="5m")
cum_profits = create_cum_profit(
df.set_index("date"), bt_data[bt_data["pair"] == "TRX/BTC"], "cum_profits", timeframe="5m"
)
assert "cum_profits" in cum_profits.columns
assert cum_profits.iloc[0]['cum_profits'] == 0
assert pytest.approx(cum_profits.iloc[-1]['cum_profits']) == 9.0225563e-05
assert cum_profits.iloc[0]["cum_profits"] == 0
assert pytest.approx(cum_profits.iloc[-1]["cum_profits"]) == 9.0225563e-05
with pytest.raises(ValueError, match='Trade dataframe empty.'):
create_cum_profit(df.set_index('date'), bt_data[bt_data["pair"] == 'NOTAPAIR'],
"cum_profits", timeframe="5m")
with pytest.raises(ValueError, match="Trade dataframe empty."):
create_cum_profit(
df.set_index("date"),
bt_data[bt_data["pair"] == "NOTAPAIR"],
"cum_profits",
timeframe="5m",
)
def test_calculate_max_drawdown(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
_, hdate, lowdate, hval, lval, drawdown = calculate_max_drawdown(
bt_data, value_col="profit_abs")
bt_data, value_col="profit_abs"
)
assert isinstance(drawdown, float)
assert pytest.approx(drawdown) == 0.29753914
assert isinstance(hdate, Timestamp)
assert isinstance(lowdate, Timestamp)
assert isinstance(hval, float)
assert isinstance(lval, float)
assert hdate == Timestamp('2018-01-16 19:30:00', tz='UTC')
assert lowdate == Timestamp('2018-01-16 22:25:00', tz='UTC')
assert hdate == Timestamp("2018-01-16 19:30:00", tz="UTC")
assert lowdate == Timestamp("2018-01-16 22:25:00", tz="UTC")
underwater = calculate_underwater(bt_data)
assert isinstance(underwater, DataFrame)
with pytest.raises(ValueError, match='Trade dataframe empty.'):
with pytest.raises(ValueError, match="Trade dataframe empty."):
calculate_max_drawdown(DataFrame())
with pytest.raises(ValueError, match='Trade dataframe empty.'):
with pytest.raises(ValueError, match="Trade dataframe empty."):
calculate_underwater(DataFrame())
@ -370,7 +380,7 @@ def test_calculate_csum(testdatadir):
assert csum_min1 == csum_min + 5
assert csum_max1 == csum_max + 5
with pytest.raises(ValueError, match='Trade dataframe empty.'):
with pytest.raises(ValueError, match="Trade dataframe empty."):
csum_min, csum_max = calculate_csum(DataFrame())
@ -388,9 +398,7 @@ def test_calculate_expectancy(testdatadir):
assert pytest.approx(expectancy) == 5.820687070932315e-06
assert pytest.approx(expectancy_ratio) == 0.07151374226574791
data = {
'profit_abs': [100, 200, 50, -150, 300, -100, 80, -30]
}
data = {"profit_abs": [100, 200, 50, -150, 300, -100, 80, -30]}
df = DataFrame(data)
expectancy, expectancy_ratio = calculate_expectancy(df)
@ -407,8 +415,8 @@ def test_calculate_sortino(testdatadir):
sortino = calculate_sortino(
bt_data,
bt_data['open_date'].min(),
bt_data['close_date'].max(),
bt_data["open_date"].min(),
bt_data["close_date"].max(),
0.01,
)
assert isinstance(sortino, float)
@ -424,8 +432,8 @@ def test_calculate_sharpe(testdatadir):
sharpe = calculate_sharpe(
bt_data,
bt_data['open_date'].min(),
bt_data['close_date'].max(),
bt_data["open_date"].min(),
bt_data["close_date"].max(),
0.01,
)
assert isinstance(sharpe, float)
@ -441,40 +449,69 @@ def test_calculate_calmar(testdatadir):
calmar = calculate_calmar(
bt_data,
bt_data['open_date'].min(),
bt_data['close_date'].max(),
bt_data["open_date"].min(),
bt_data["close_date"].max(),
0.01,
)
assert isinstance(calmar, float)
assert pytest.approx(calmar) == 559.040508
@pytest.mark.parametrize('start,end,days, expected', [
@pytest.mark.parametrize(
"start,end,days, expected",
[
(64900, 176000, 3 * 365, 0.3945),
(64900, 176000, 365, 1.7119),
(1000, 1000, 365, 0.0),
(1000, 1500, 365, 0.5),
(1000, 1500, 100, 3.3927), # sub year
(0.01000000, 0.01762792, 120, 4.6087), # sub year BTC values
])
],
)
def test_calculate_cagr(start, end, days, expected):
assert round(calculate_cagr(days, start, end), 4) == expected
def test_calculate_max_drawdown2():
values = [0.011580, 0.010048, 0.011340, 0.012161, 0.010416, 0.010009, 0.020024,
-0.024662, -0.022350, 0.020496, -0.029859, -0.030511, 0.010041, 0.010872,
-0.025782, 0.010400, 0.012374, 0.012467, 0.114741, 0.010303, 0.010088,
-0.033961, 0.010680, 0.010886, -0.029274, 0.011178, 0.010693, 0.010711]
values = [
0.011580,
0.010048,
0.011340,
0.012161,
0.010416,
0.010009,
0.020024,
-0.024662,
-0.022350,
0.020496,
-0.029859,
-0.030511,
0.010041,
0.010872,
-0.025782,
0.010400,
0.012374,
0.012467,
0.114741,
0.010303,
0.010088,
-0.033961,
0.010680,
0.010886,
-0.029274,
0.011178,
0.010693,
0.010711,
]
dates = [dt_utc(2020, 1, 1) + timedelta(days=i) for i in range(len(values))]
df = DataFrame(zip(values, dates), columns=['profit', 'open_date'])
df = DataFrame(zip(values, dates), columns=["profit", "open_date"])
# sort by profit and reset index
df = df.sort_values('profit').reset_index(drop=True)
df = df.sort_values("profit").reset_index(drop=True)
df1 = df.copy()
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
df, date_col='open_date', value_col='profit')
df, date_col="open_date", value_col="profit"
)
# Ensure df has not been altered.
assert df.equals(df1)
@ -486,23 +523,26 @@ def test_calculate_max_drawdown2():
assert hval > lval
assert drawdown == 0.091755
df = DataFrame(zip(values[:5], dates[:5]), columns=['profit', 'open_date'])
with pytest.raises(ValueError, match='No losing trade, therefore no drawdown.'):
calculate_max_drawdown(df, date_col='open_date', value_col='profit')
df = DataFrame(zip(values[:5], dates[:5]), columns=["profit", "open_date"])
with pytest.raises(ValueError, match="No losing trade, therefore no drawdown."):
calculate_max_drawdown(df, date_col="open_date", value_col="profit")
df1 = DataFrame(zip(values[:5], dates[:5]), columns=['profit', 'open_date'])
df1.loc[:, 'profit'] = df1['profit'] * -1
df1 = DataFrame(zip(values[:5], dates[:5]), columns=["profit", "open_date"])
df1.loc[:, "profit"] = df1["profit"] * -1
# No winning trade ...
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
df1, date_col='open_date', value_col='profit')
df1, date_col="open_date", value_col="profit"
)
assert drawdown == 0.043965
@pytest.mark.parametrize('profits,relative,highd,lowdays,result,result_rel', [
@pytest.mark.parametrize(
"profits,relative,highd,lowdays,result,result_rel",
[
([0.0, -500.0, 500.0, 10000.0, -1000.0], False, 3, 4, 1000.0, 0.090909),
([0.0, -500.0, 500.0, 10000.0, -1000.0], True, 0, 1, 500.0, 0.5),
])
],
)
def test_calculate_max_drawdown_abs(profits, relative, highd, lowdays, result, result_rel):
"""
Test case from issue https://github.com/freqtrade/freqtrade/issues/6655
@ -511,12 +551,13 @@ def test_calculate_max_drawdown_abs(profits, relative, highd, lowdays, result, r
"""
init_date = datetime(2020, 1, 1, tzinfo=timezone.utc)
dates = [init_date + timedelta(days=i) for i in range(len(profits))]
df = DataFrame(zip(profits, dates), columns=['profit_abs', 'open_date'])
df = DataFrame(zip(profits, dates), columns=["profit_abs", "open_date"])
# sort by profit and reset index
df = df.sort_values('profit_abs').reset_index(drop=True)
df = df.sort_values("profit_abs").reset_index(drop=True)
df1 = df.copy()
drawdown, hdate, ldate, hval, lval, drawdown_rel = calculate_max_drawdown(
df, date_col='open_date', starting_balance=1000, relative=relative)
df, date_col="open_date", starting_balance=1000, relative=relative
)
# Ensure df has not been altered.
assert df.equals(df1)

View File

@ -34,102 +34,105 @@ from tests.data.test_history import _clean_test_file
def test_dataframe_correct_columns(dataframe_1m):
assert dataframe_1m.columns.tolist() == ['date', 'open', 'high', 'low', 'close', 'volume']
assert dataframe_1m.columns.tolist() == ["date", "open", "high", "low", "close", "volume"]
def test_ohlcv_to_dataframe(ohlcv_history_list, caplog):
columns = ['date', 'open', 'high', 'low', 'close', 'volume']
columns = ["date", "open", "high", "low", "close", "volume"]
caplog.set_level(logging.DEBUG)
# Test file with BV data
dataframe = ohlcv_to_dataframe(ohlcv_history_list, '5m', pair="UNITTEST/BTC",
fill_missing=True)
dataframe = ohlcv_to_dataframe(ohlcv_history_list, "5m", pair="UNITTEST/BTC", fill_missing=True)
assert dataframe.columns.tolist() == columns
assert log_has('Converting candle (OHLCV) data to dataframe for pair UNITTEST/BTC.', caplog)
assert log_has("Converting candle (OHLCV) data to dataframe for pair UNITTEST/BTC.", caplog)
def test_trades_to_ohlcv(trades_history_df, caplog):
caplog.set_level(logging.DEBUG)
with pytest.raises(ValueError, match="Trade-list empty."):
trades_to_ohlcv(pd.DataFrame(columns=trades_history_df.columns), '1m')
trades_to_ohlcv(pd.DataFrame(columns=trades_history_df.columns), "1m")
df = trades_to_ohlcv(trades_history_df, '1m')
df = trades_to_ohlcv(trades_history_df, "1m")
assert not df.empty
assert len(df) == 1
assert 'open' in df.columns
assert 'high' in df.columns
assert 'low' in df.columns
assert 'close' in df.columns
assert df.iloc[0, :]['high'] == 0.019627
assert df.iloc[0, :]['low'] == 0.019626
assert df.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:00+0000')
assert "open" in df.columns
assert "high" in df.columns
assert "low" in df.columns
assert "close" in df.columns
assert df.iloc[0, :]["high"] == 0.019627
assert df.iloc[0, :]["low"] == 0.019626
assert df.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:59:00+0000")
df_1h = trades_to_ohlcv(trades_history_df, '1h')
df_1h = trades_to_ohlcv(trades_history_df, "1h")
assert len(df_1h) == 1
assert df_1h.iloc[0, :]['high'] == 0.019627
assert df_1h.iloc[0, :]['low'] == 0.019626
assert df_1h.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:00:00+0000')
assert df_1h.iloc[0, :]["high"] == 0.019627
assert df_1h.iloc[0, :]["low"] == 0.019626
assert df_1h.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:00:00+0000")
df_1s = trades_to_ohlcv(trades_history_df, '1s')
df_1s = trades_to_ohlcv(trades_history_df, "1s")
assert len(df_1s) == 2
assert df_1s.iloc[0, :]['high'] == 0.019627
assert df_1s.iloc[0, :]['low'] == 0.019627
assert df_1s.iloc[0, :]['date'] == pd.Timestamp('2019-08-14 15:59:49+0000')
assert df_1s.iloc[-1, :]['date'] == pd.Timestamp('2019-08-14 15:59:59+0000')
assert df_1s.iloc[0, :]["high"] == 0.019627
assert df_1s.iloc[0, :]["low"] == 0.019627
assert df_1s.iloc[0, :]["date"] == pd.Timestamp("2019-08-14 15:59:49+0000")
assert df_1s.iloc[-1, :]["date"] == pd.Timestamp("2019-08-14 15:59:59+0000")
@pytest.mark.parametrize('timeframe,rows,days,candles,start,end,weekday', [
('1s', 20_000, 5, 19522, '2020-01-01 00:00:05', '2020-01-05 23:59:27', None),
('1m', 20_000, 5, 6745, '2020-01-01 00:00:00', '2020-01-05 23:59:00', None),
('5m', 20_000, 5, 1440, '2020-01-01 00:00:00', '2020-01-05 23:55:00', None),
('15m', 20_000, 5, 480, '2020-01-01 00:00:00', '2020-01-05 23:45:00', None),
('1h', 20_000, 5, 120, '2020-01-01 00:00:00', '2020-01-05 23:00:00', None),
('2h', 20_000, 5, 60, '2020-01-01 00:00:00', '2020-01-05 22:00:00', None),
('4h', 20_000, 5, 30, '2020-01-01 00:00:00', '2020-01-05 20:00:00', None),
('8h', 20_000, 5, 15, '2020-01-01 00:00:00', '2020-01-05 16:00:00', None),
('12h', 20_000, 5, 10, '2020-01-01 00:00:00', '2020-01-05 12:00:00', None),
('1d', 20_000, 5, 5, '2020-01-01 00:00:00', '2020-01-05 00:00:00', 'Sunday'),
('7d', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'),
('1w', 20_000, 37, 6, '2020-01-06 00:00:00', '2020-02-10 00:00:00', 'Monday'),
('1M', 20_000, 74, 3, '2020-01-01 00:00:00', '2020-03-01 00:00:00', None),
('3M', 20_000, 100, 2, '2020-01-01 00:00:00', '2020-04-01 00:00:00', None),
('1y', 20_000, 1000, 3, '2020-01-01 00:00:00', '2022-01-01 00:00:00', None),
])
@pytest.mark.parametrize(
"timeframe,rows,days,candles,start,end,weekday",
[
("1s", 20_000, 5, 19522, "2020-01-01 00:00:05", "2020-01-05 23:59:27", None),
("1m", 20_000, 5, 6745, "2020-01-01 00:00:00", "2020-01-05 23:59:00", None),
("5m", 20_000, 5, 1440, "2020-01-01 00:00:00", "2020-01-05 23:55:00", None),
("15m", 20_000, 5, 480, "2020-01-01 00:00:00", "2020-01-05 23:45:00", None),
("1h", 20_000, 5, 120, "2020-01-01 00:00:00", "2020-01-05 23:00:00", None),
("2h", 20_000, 5, 60, "2020-01-01 00:00:00", "2020-01-05 22:00:00", None),
("4h", 20_000, 5, 30, "2020-01-01 00:00:00", "2020-01-05 20:00:00", None),
("8h", 20_000, 5, 15, "2020-01-01 00:00:00", "2020-01-05 16:00:00", None),
("12h", 20_000, 5, 10, "2020-01-01 00:00:00", "2020-01-05 12:00:00", None),
("1d", 20_000, 5, 5, "2020-01-01 00:00:00", "2020-01-05 00:00:00", "Sunday"),
("7d", 20_000, 37, 6, "2020-01-06 00:00:00", "2020-02-10 00:00:00", "Monday"),
("1w", 20_000, 37, 6, "2020-01-06 00:00:00", "2020-02-10 00:00:00", "Monday"),
("1M", 20_000, 74, 3, "2020-01-01 00:00:00", "2020-03-01 00:00:00", None),
("3M", 20_000, 100, 2, "2020-01-01 00:00:00", "2020-04-01 00:00:00", None),
("1y", 20_000, 1000, 3, "2020-01-01 00:00:00", "2022-01-01 00:00:00", None),
],
)
def test_trades_to_ohlcv_multi(timeframe, rows, days, candles, start, end, weekday):
trades_history = generate_trades_history(n_rows=rows, days=days)
df = trades_to_ohlcv(trades_history, timeframe)
assert not df.empty
assert len(df) == candles
assert df.iloc[0, :]['date'] == pd.Timestamp(f'{start}+0000')
assert df.iloc[-1, :]['date'] == pd.Timestamp(f'{end}+0000')
assert df.iloc[0, :]["date"] == pd.Timestamp(f"{start}+0000")
assert df.iloc[-1, :]["date"] == pd.Timestamp(f"{end}+0000")
if weekday:
# Weekday is only relevant for daily and weekly candles.
assert df.iloc[-1, :]['date'].day_name() == weekday
assert df.iloc[-1, :]["date"].day_name() == weekday
def test_ohlcv_fill_up_missing_data(testdatadir, caplog):
data = load_pair_history(datadir=testdatadir,
timeframe='1m',
pair='UNITTEST/BTC',
fill_up_missing=False)
data = load_pair_history(
datadir=testdatadir, timeframe="1m", pair="UNITTEST/BTC", fill_up_missing=False
)
caplog.set_level(logging.DEBUG)
data2 = ohlcv_fill_up_missing_data(data, '1m', 'UNITTEST/BTC')
data2 = ohlcv_fill_up_missing_data(data, "1m", "UNITTEST/BTC")
assert len(data2) > len(data)
# Column names should not change
assert (data.columns == data2.columns).all()
assert log_has_re(f"Missing data fillup for UNITTEST/BTC, 1m: before: "
f"{len(data)} - after: {len(data2)}.*", caplog)
assert log_has_re(
f"Missing data fillup for UNITTEST/BTC, 1m: before: "
f"{len(data)} - after: {len(data2)}.*",
caplog,
)
# Test fillup actually fixes invalid backtest data
min_date, max_date = get_timerange({'UNITTEST/BTC': data})
assert validate_backtest_data(data, 'UNITTEST/BTC', min_date, max_date, 1)
assert not validate_backtest_data(data2, 'UNITTEST/BTC', min_date, max_date, 1)
min_date, max_date = get_timerange({"UNITTEST/BTC": data})
assert validate_backtest_data(data, "UNITTEST/BTC", min_date, max_date, 1)
assert not validate_backtest_data(data2, "UNITTEST/BTC", min_date, max_date, 1)
def test_ohlcv_fill_up_missing_data2(caplog):
timeframe = '5m'
timeframe = "5m"
ticks = [
[
1511686200000, # 8:50:00
@ -153,7 +156,7 @@ def test_ohlcv_fill_up_missing_data2(caplog):
8.893e-05,
8.875e-05,
8.877e-05,
2251
2251,
],
[
1511687400000, # 9:10:00
@ -161,51 +164,54 @@ def test_ohlcv_fill_up_missing_data2(caplog):
8.883e-05,
8.895e-05,
8.817e-05,
123551
]
123551,
],
]
# Generate test-data without filling missing
data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC",
fill_missing=False)
data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False)
assert len(data) == 3
caplog.set_level(logging.DEBUG)
data2 = ohlcv_fill_up_missing_data(data, timeframe, "UNITTEST/BTC")
assert len(data2) == 4
# 3rd candle has been filled
row = data2.loc[2, :]
assert row['volume'] == 0
assert row["volume"] == 0
# close should match close of previous candle
assert row['close'] == data.loc[1, 'close']
assert row['open'] == row['close']
assert row['high'] == row['close']
assert row['low'] == row['close']
assert row["close"] == data.loc[1, "close"]
assert row["open"] == row["close"]
assert row["high"] == row["close"]
assert row["low"] == row["close"]
# Column names should not change
assert (data.columns == data2.columns).all()
assert log_has_re(f"Missing data fillup for UNITTEST/BTC, {timeframe}: before: "
f"{len(data)} - after: {len(data2)}.*", caplog)
assert log_has_re(
f"Missing data fillup for UNITTEST/BTC, {timeframe}: before: "
f"{len(data)} - after: {len(data2)}.*",
caplog,
)
@pytest.mark.parametrize('timeframe', [
'1s', '1m', '5m', '15m', '1h', '2h', '4h', '8h', '12h', '1d', '7d', '1w', '1M', '3M', '1y'
])
@pytest.mark.parametrize(
"timeframe",
["1s", "1m", "5m", "15m", "1h", "2h", "4h", "8h", "12h", "1d", "7d", "1w", "1M", "3M", "1y"],
)
def test_ohlcv_to_dataframe_multi(timeframe):
data = generate_test_data(timeframe, 180)
assert len(data) == 180
df = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT')
df = ohlcv_to_dataframe(data, timeframe, "UNITTEST/USDT")
assert len(df) == len(data) - 1
df1 = ohlcv_to_dataframe(data, timeframe, 'UNITTEST/USDT', drop_incomplete=False)
df1 = ohlcv_to_dataframe(data, timeframe, "UNITTEST/USDT", drop_incomplete=False)
assert len(df1) == len(data)
assert data.equals(df1)
data1 = data.copy()
if timeframe in ('1M', '3M', '1y'):
data1.loc[:, 'date'] = data1.loc[:, 'date'] + pd.to_timedelta('1w')
if timeframe in ("1M", "3M", "1y"):
data1.loc[:, "date"] = data1.loc[:, "date"] + pd.to_timedelta("1w")
else:
# Shift by half a timeframe
data1.loc[:, 'date'] = data1.loc[:, 'date'] + (pd.to_timedelta(timeframe) / 2)
df2 = ohlcv_to_dataframe(data1, timeframe, 'UNITTEST/USDT')
data1.loc[:, "date"] = data1.loc[:, "date"] + (pd.to_timedelta(timeframe) / 2)
df2 = ohlcv_to_dataframe(data1, timeframe, "UNITTEST/USDT")
assert len(df2) == len(data) - 1
tfs = timeframe_to_seconds(timeframe)
@ -213,21 +219,20 @@ def test_ohlcv_to_dataframe_multi(timeframe):
if 1 <= tfm < 10000:
# minute based resampling does not work on timeframes >= 1 week
ohlcv_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'
"open": "first",
"high": "max",
"low": "min",
"close": "last",
"volume": "sum",
}
dfs = data1.resample(f"{tfs}s", on='date').agg(ohlcv_dict).reset_index(drop=False)
dfm = data1.resample(f"{tfm}min", on='date').agg(ohlcv_dict).reset_index(drop=False)
dfs = data1.resample(f"{tfs}s", on="date").agg(ohlcv_dict).reset_index(drop=False)
dfm = data1.resample(f"{tfm}min", on="date").agg(ohlcv_dict).reset_index(drop=False)
assert dfs.equals(dfm)
assert dfs.equals(df1)
def test_ohlcv_to_dataframe_1M():
# Monthly ticks from 2019-09-01 to 2023-07-01
ticks = [
[1567296000000, 8042.08, 10475.54, 7700.67, 8041.96, 608742.1109999999],
@ -276,25 +281,27 @@ def test_ohlcv_to_dataframe_1M():
[1680307200000, 28454.8, 31059.0, 26919.3, 29223.0, 14654208.219],
[1682899200000, 29223.0, 29840.0, 25751.0, 27201.1, 13328157.284],
[1685577600000, 27201.1, 31500.0, 24777.0, 30460.2, 14099299.273],
[1688169600000, 30460.2, 31850.0, 28830.0, 29338.8, 8760361.377]
[1688169600000, 30460.2, 31850.0, 28830.0, 29338.8, 8760361.377],
]
data = ohlcv_to_dataframe(ticks, '1M', pair="UNITTEST/USDT",
fill_missing=False, drop_incomplete=False)
data = ohlcv_to_dataframe(
ticks, "1M", pair="UNITTEST/USDT", fill_missing=False, drop_incomplete=False
)
assert len(data) == len(ticks)
assert data.iloc[0]['date'].strftime('%Y-%m-%d') == '2019-09-01'
assert data.iloc[-1]['date'].strftime('%Y-%m-%d') == '2023-07-01'
assert data.iloc[0]["date"].strftime("%Y-%m-%d") == "2019-09-01"
assert data.iloc[-1]["date"].strftime("%Y-%m-%d") == "2023-07-01"
# Test with filling missing data
data = ohlcv_to_dataframe(ticks, '1M', pair="UNITTEST/USDT",
fill_missing=True, drop_incomplete=False)
data = ohlcv_to_dataframe(
ticks, "1M", pair="UNITTEST/USDT", fill_missing=True, drop_incomplete=False
)
assert len(data) == len(ticks)
assert data.iloc[0]['date'].strftime('%Y-%m-%d') == '2019-09-01'
assert data.iloc[-1]['date'].strftime('%Y-%m-%d') == '2023-07-01'
assert data.iloc[0]["date"].strftime("%Y-%m-%d") == "2019-09-01"
assert data.iloc[-1]["date"].strftime("%Y-%m-%d") == "2023-07-01"
def test_ohlcv_drop_incomplete(caplog):
timeframe = '1d'
timeframe = "1d"
ticks = [
[
1559750400000, # 2019-06-04
@ -318,7 +325,7 @@ def test_ohlcv_drop_incomplete(caplog):
8.893e-05,
8.875e-05,
8.877e-05,
2251
2251,
],
[
1560009600000, # 2019-06-07
@ -326,35 +333,33 @@ def test_ohlcv_drop_incomplete(caplog):
8.883e-05,
8.895e-05,
8.817e-05,
123551
]
123551,
],
]
caplog.set_level(logging.DEBUG)
data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC",
fill_missing=False, drop_incomplete=False)
data = ohlcv_to_dataframe(
ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False, drop_incomplete=False
)
assert len(data) == 4
assert not log_has("Dropping last candle", caplog)
# Drop last candle
data = ohlcv_to_dataframe(ticks, timeframe, pair="UNITTEST/BTC",
fill_missing=False, drop_incomplete=True)
data = ohlcv_to_dataframe(
ticks, timeframe, pair="UNITTEST/BTC", fill_missing=False, drop_incomplete=True
)
assert len(data) == 3
assert log_has("Dropping last candle", caplog)
def test_trim_dataframe(testdatadir) -> None:
data = load_data(
datadir=testdatadir,
timeframe='1m',
pairs=['UNITTEST/BTC']
)['UNITTEST/BTC']
min_date = int(data.iloc[0]['date'].timestamp())
max_date = int(data.iloc[-1]['date'].timestamp())
data = load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"])["UNITTEST/BTC"]
min_date = int(data.iloc[0]["date"].timestamp())
max_date = int(data.iloc[-1]["date"].timestamp())
data_modify = data.copy()
# Remove first 30 minutes (1800 s)
tr = TimeRange('date', None, min_date + 1800, 0)
tr = TimeRange("date", None, min_date + 1800, 0)
data_modify = trim_dataframe(data_modify, tr)
assert not data_modify.equals(data)
assert len(data_modify) < len(data)
@ -363,7 +368,7 @@ def test_trim_dataframe(testdatadir) -> None:
assert all(data_modify.iloc[0] == data.iloc[30])
data_modify = data.copy()
tr = TimeRange('date', None, min_date + 1800, 0)
tr = TimeRange("date", None, min_date + 1800, 0)
# Remove first 20 candles - ignores min date
data_modify = trim_dataframe(data_modify, tr, startup_candles=20)
assert not data_modify.equals(data)
@ -374,7 +379,7 @@ def test_trim_dataframe(testdatadir) -> None:
data_modify = data.copy()
# Remove last 30 minutes (1800 s)
tr = TimeRange(None, 'date', 0, max_date - 1800)
tr = TimeRange(None, "date", 0, max_date - 1800)
data_modify = trim_dataframe(data_modify, tr)
assert not data_modify.equals(data)
assert len(data_modify) < len(data)
@ -384,7 +389,7 @@ def test_trim_dataframe(testdatadir) -> None:
data_modify = data.copy()
# Remove first 25 and last 30 minutes (1800 s)
tr = TimeRange('date', 'date', min_date + 1500, max_date - 1800)
tr = TimeRange("date", "date", min_date + 1500, max_date - 1800)
data_modify = trim_dataframe(data_modify, tr)
assert not data_modify.equals(data)
assert len(data_modify) < len(data)
@ -394,7 +399,8 @@ def test_trim_dataframe(testdatadir) -> None:
def test_trades_df_remove_duplicates(trades_history_df):
trades_history1 = pd.concat([trades_history_df, trades_history_df, trades_history_df]
trades_history1 = pd.concat(
[trades_history_df, trades_history_df, trades_history_df]
).reset_index(drop=True)
assert len(trades_history1) == len(trades_history_df) * 3
res = trades_df_remove_duplicates(trades_history1)
@ -407,55 +413,55 @@ def test_trades_dict_to_list(fetch_trades_result):
assert isinstance(res, list)
assert isinstance(res[0], list)
for i, t in enumerate(res):
assert t[0] == fetch_trades_result[i]['timestamp']
assert t[1] == fetch_trades_result[i]['id']
assert t[2] == fetch_trades_result[i]['type']
assert t[3] == fetch_trades_result[i]['side']
assert t[4] == fetch_trades_result[i]['price']
assert t[5] == fetch_trades_result[i]['amount']
assert t[6] == fetch_trades_result[i]['cost']
assert t[0] == fetch_trades_result[i]["timestamp"]
assert t[1] == fetch_trades_result[i]["id"]
assert t[2] == fetch_trades_result[i]["type"]
assert t[3] == fetch_trades_result[i]["side"]
assert t[4] == fetch_trades_result[i]["price"]
assert t[5] == fetch_trades_result[i]["amount"]
assert t[6] == fetch_trades_result[i]["cost"]
def test_convert_trades_format(default_conf, testdatadir, tmp_path):
files = [{'old': tmp_path / "XRP_ETH-trades.json.gz",
'new': tmp_path / "XRP_ETH-trades.json"},
{'old': tmp_path / "XRP_OLD-trades.json.gz",
'new': tmp_path / "XRP_OLD-trades.json"},
files = [
{"old": tmp_path / "XRP_ETH-trades.json.gz", "new": tmp_path / "XRP_ETH-trades.json"},
{"old": tmp_path / "XRP_OLD-trades.json.gz", "new": tmp_path / "XRP_OLD-trades.json"},
]
for file in files:
copyfile(testdatadir / file['old'].name, file['old'])
assert not file['new'].exists()
copyfile(testdatadir / file["old"].name, file["old"])
assert not file["new"].exists()
default_conf['datadir'] = tmp_path
default_conf["datadir"] = tmp_path
convert_trades_format(default_conf, convert_from='jsongz',
convert_to='json', erase=False)
convert_trades_format(default_conf, convert_from="jsongz", convert_to="json", erase=False)
for file in files:
assert file['new'].exists()
assert file['old'].exists()
assert file["new"].exists()
assert file["old"].exists()
# Remove original file
file['old'].unlink()
file["old"].unlink()
# Convert back
convert_trades_format(default_conf, convert_from='json',
convert_to='jsongz', erase=True)
convert_trades_format(default_conf, convert_from="json", convert_to="jsongz", erase=True)
for file in files:
assert file['old'].exists()
assert not file['new'].exists()
assert file["old"].exists()
assert not file["new"].exists()
_clean_test_file(file['old'])
if file['new'].exists():
file['new'].unlink()
_clean_test_file(file["old"])
if file["new"].exists():
file["new"].unlink()
@pytest.mark.parametrize('file_base,candletype', [
(['XRP_ETH-5m', 'XRP_ETH-1m'], CandleType.SPOT),
(['UNITTEST_USDT_USDT-1h-mark', 'XRP_USDT_USDT-1h-mark'], CandleType.MARK),
(['XRP_USDT_USDT-1h-futures'], CandleType.FUTURES),
])
@pytest.mark.parametrize(
"file_base,candletype",
[
(["XRP_ETH-5m", "XRP_ETH-1m"], CandleType.SPOT),
(["UNITTEST_USDT_USDT-1h-mark", "XRP_USDT_USDT-1h-mark"], CandleType.MARK),
(["XRP_USDT_USDT-1h-futures"], CandleType.FUTURES),
],
)
def test_convert_ohlcv_format(default_conf, testdatadir, tmp_path, file_base, candletype):
prependix = '' if candletype == CandleType.SPOT else 'futures/'
prependix = "" if candletype == CandleType.SPOT else "futures/"
files_orig = []
files_temp = []
files_new = []
@ -470,77 +476,77 @@ def test_convert_ohlcv_format(default_conf, testdatadir, tmp_path, file_base, ca
files_temp.append(file_temp)
files_new.append(file_new)
default_conf['datadir'] = tmp_path
default_conf['candle_types'] = [candletype]
default_conf["datadir"] = tmp_path
default_conf["candle_types"] = [candletype]
if candletype == CandleType.SPOT:
default_conf['pairs'] = ['XRP/ETH', 'XRP/USDT', 'UNITTEST/USDT']
default_conf["pairs"] = ["XRP/ETH", "XRP/USDT", "UNITTEST/USDT"]
else:
default_conf['pairs'] = ['XRP/ETH:ETH', 'XRP/USDT:USDT', 'UNITTEST/USDT:USDT']
default_conf['timeframes'] = ['1m', '5m', '1h']
default_conf["pairs"] = ["XRP/ETH:ETH", "XRP/USDT:USDT", "UNITTEST/USDT:USDT"]
default_conf["timeframes"] = ["1m", "5m", "1h"]
assert not file_new.exists()
convert_ohlcv_format(
default_conf,
convert_from='feather',
convert_to='jsongz',
convert_from="feather",
convert_to="jsongz",
erase=False,
)
for file in (files_temp + files_new):
for file in files_temp + files_new:
assert file.exists()
# Remove original files
for file in (files_temp):
for file in files_temp:
file.unlink()
# Convert back
convert_ohlcv_format(
default_conf,
convert_from='jsongz',
convert_to='feather',
convert_from="jsongz",
convert_to="feather",
erase=True,
)
for file in (files_temp):
for file in files_temp:
assert file.exists()
for file in (files_new):
for file in files_new:
assert not file.exists()
def test_reduce_dataframe_footprint():
data = generate_test_data('15m', 40)
data = generate_test_data("15m", 40)
data['open_copy'] = data['open']
data['close_copy'] = data['close']
data['close_copy'] = data['close']
data["open_copy"] = data["open"]
data["close_copy"] = data["close"]
data["close_copy"] = data["close"]
assert data['open'].dtype == np.float64
assert data['open_copy'].dtype == np.float64
assert data['close_copy'].dtype == np.float64
assert data["open"].dtype == np.float64
assert data["open_copy"].dtype == np.float64
assert data["close_copy"].dtype == np.float64
df2 = reduce_dataframe_footprint(data)
# Does not modify original dataframe
assert data['open'].dtype == np.float64
assert data['open_copy'].dtype == np.float64
assert data['close_copy'].dtype == np.float64
assert data["open"].dtype == np.float64
assert data["open_copy"].dtype == np.float64
assert data["close_copy"].dtype == np.float64
# skips ohlcv columns
assert df2['open'].dtype == np.float64
assert df2['high'].dtype == np.float64
assert df2['low'].dtype == np.float64
assert df2['close'].dtype == np.float64
assert df2['volume'].dtype == np.float64
assert df2["open"].dtype == np.float64
assert df2["high"].dtype == np.float64
assert df2["low"].dtype == np.float64
assert df2["close"].dtype == np.float64
assert df2["volume"].dtype == np.float64
# Changes dtype of returned dataframe
assert df2['open_copy'].dtype == np.float32
assert df2['close_copy'].dtype == np.float32
assert df2["open_copy"].dtype == np.float32
assert df2["close_copy"].dtype == np.float32
def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog):
pair = 'XRP/ETH'
file1 = tmp_path / 'XRP_ETH-1m.feather'
file5 = tmp_path / 'XRP_ETH-5m.feather'
filetrades = tmp_path / 'XRP_ETH-trades.json.gz'
pair = "XRP/ETH"
file1 = tmp_path / "XRP_ETH-1m.feather"
file5 = tmp_path / "XRP_ETH-5m.feather"
filetrades = tmp_path / "XRP_ETH-trades.json.gz"
copyfile(testdatadir / file1.name, file1)
copyfile(testdatadir / file5.name, file5)
copyfile(testdatadir / filetrades.name, filetrades)
@ -549,13 +555,18 @@ def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog):
dfbak_1m = load_pair_history(datadir=tmp_path, timeframe="1m", pair=pair)
dfbak_5m = load_pair_history(datadir=tmp_path, timeframe="5m", pair=pair)
tr = TimeRange.parse_timerange('20191011-20191012')
tr = TimeRange.parse_timerange("20191011-20191012")
convert_trades_to_ohlcv([pair], timeframes=['1m', '5m'],
data_format_trades='jsongz',
datadir=tmp_path, timerange=tr, erase=True,
data_format_ohlcv='feather',
candle_type=CandleType.SPOT)
convert_trades_to_ohlcv(
[pair],
timeframes=["1m", "5m"],
data_format_trades="jsongz",
datadir=tmp_path,
timerange=tr,
erase=True,
data_format_ohlcv="feather",
candle_type=CandleType.SPOT,
)
assert log_has("Deleting existing data for pair XRP/ETH, interval 1m.", caplog)
# Load new data
@ -564,12 +575,17 @@ def test_convert_trades_to_ohlcv(testdatadir, tmp_path, caplog):
assert_frame_equal(dfbak_1m, df_1m, check_exact=True)
assert_frame_equal(dfbak_5m, df_5m, check_exact=True)
msg = 'Could not convert NoDatapair to OHLCV.'
msg = "Could not convert NoDatapair to OHLCV."
assert not log_has(msg, caplog)
convert_trades_to_ohlcv(['NoDatapair'], timeframes=['1m', '5m'],
data_format_trades='jsongz',
datadir=tmp_path, timerange=tr, erase=True,
data_format_ohlcv='feather',
candle_type=CandleType.SPOT)
convert_trades_to_ohlcv(
["NoDatapair"],
timeframes=["1m", "5m"],
data_format_trades="jsongz",
datadir=tmp_path,
timerange=tr,
erase=True,
data_format_ohlcv="feather",
candle_type=CandleType.SPOT,
)
assert log_has(msg, caplog)

View File

@ -25,39 +25,53 @@ from tests.conftest import log_has, log_has_re
def test_datahandler_ohlcv_get_pairs(testdatadir):
pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, '5m', candle_type=CandleType.SPOT)
pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, "5m", candle_type=CandleType.SPOT)
# Convert to set to avoid failures due to sorting
assert set(pairs) == {'UNITTEST/BTC', 'XLM/BTC', 'ETH/BTC', 'TRX/BTC', 'LTC/BTC',
'XMR/BTC', 'ZEC/BTC', 'ADA/BTC', 'ETC/BTC', 'NXT/BTC',
'DASH/BTC', 'XRP/ETH'}
assert set(pairs) == {
"UNITTEST/BTC",
"XLM/BTC",
"ETH/BTC",
"TRX/BTC",
"LTC/BTC",
"XMR/BTC",
"ZEC/BTC",
"ADA/BTC",
"ETC/BTC",
"NXT/BTC",
"DASH/BTC",
"XRP/ETH",
}
pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, '8m', candle_type=CandleType.SPOT)
assert set(pairs) == {'UNITTEST/BTC'}
pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, "8m", candle_type=CandleType.SPOT)
assert set(pairs) == {"UNITTEST/BTC"}
pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, '5m', candle_type=CandleType.SPOT)
assert set(pairs) == {'UNITTEST/BTC'}
pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, "5m", candle_type=CandleType.SPOT)
assert set(pairs) == {"UNITTEST/BTC"}
pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.MARK)
assert set(pairs) == {'UNITTEST/USDT:USDT', 'XRP/USDT:USDT'}
pairs = FeatherDataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.MARK)
assert set(pairs) == {"UNITTEST/USDT:USDT", "XRP/USDT:USDT"}
pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.FUTURES)
assert set(pairs) == {'XRP/USDT:USDT'}
pairs = JsonGzDataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.FUTURES)
assert set(pairs) == {"XRP/USDT:USDT"}
pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, '1h', candle_type=CandleType.MARK)
assert set(pairs) == {'UNITTEST/USDT:USDT'}
pairs = HDF5DataHandler.ohlcv_get_pairs(testdatadir, "1h", candle_type=CandleType.MARK)
assert set(pairs) == {"UNITTEST/USDT:USDT"}
@pytest.mark.parametrize('filename,pair,timeframe,candletype', [
('XMR_BTC-5m.json', 'XMR_BTC', '5m', ''),
('XMR_USDT-1h.h5', 'XMR_USDT', '1h', ''),
('BTC-PERP-1h.h5', 'BTC-PERP', '1h', ''),
('BTC_USDT-2h.jsongz', 'BTC_USDT', '2h', ''),
('BTC_USDT-2h-mark.jsongz', 'BTC_USDT', '2h', 'mark'),
('XMR_USDT-1h-mark.h5', 'XMR_USDT', '1h', 'mark'),
('XMR_USDT-1h-random.h5', 'XMR_USDT', '1h', 'random'),
('BTC-PERP-1h-index.h5', 'BTC-PERP', '1h', 'index'),
('XMR_USDT_USDT-1h-mark.h5', 'XMR_USDT_USDT', '1h', 'mark'),
])
@pytest.mark.parametrize(
"filename,pair,timeframe,candletype",
[
("XMR_BTC-5m.json", "XMR_BTC", "5m", ""),
("XMR_USDT-1h.h5", "XMR_USDT", "1h", ""),
("BTC-PERP-1h.h5", "BTC-PERP", "1h", ""),
("BTC_USDT-2h.jsongz", "BTC_USDT", "2h", ""),
("BTC_USDT-2h-mark.jsongz", "BTC_USDT", "2h", "mark"),
("XMR_USDT-1h-mark.h5", "XMR_USDT", "1h", "mark"),
("XMR_USDT-1h-random.h5", "XMR_USDT", "1h", "random"),
("BTC-PERP-1h-index.h5", "BTC-PERP", "1h", "index"),
("XMR_USDT_USDT-1h-mark.h5", "XMR_USDT_USDT", "1h", "mark"),
],
)
def test_datahandler_ohlcv_regex(filename, pair, timeframe, candletype):
regex = JsonDataHandler._OHLCV_REGEX
@ -68,18 +82,20 @@ def test_datahandler_ohlcv_regex(filename, pair, timeframe, candletype):
assert match[3] == candletype
@pytest.mark.parametrize('input,expected', [
('XMR_USDT', 'XMR/USDT'),
('BTC_USDT', 'BTC/USDT'),
('USDT_BUSD', 'USDT/BUSD'),
('BTC_USDT_USDT', 'BTC/USDT:USDT'), # Futures
('XRP_USDT_USDT', 'XRP/USDT:USDT'), # futures
('BTC-PERP', 'BTC-PERP'),
('BTC-PERP_USDT', 'BTC-PERP:USDT'),
('UNITTEST_USDT', 'UNITTEST/USDT'),
])
@pytest.mark.parametrize(
"input,expected",
[
("XMR_USDT", "XMR/USDT"),
("BTC_USDT", "BTC/USDT"),
("USDT_BUSD", "USDT/BUSD"),
("BTC_USDT_USDT", "BTC/USDT:USDT"), # Futures
("XRP_USDT_USDT", "XRP/USDT:USDT"), # futures
("BTC-PERP", "BTC-PERP"),
("BTC-PERP_USDT", "BTC-PERP:USDT"),
("UNITTEST_USDT", "UNITTEST/USDT"),
],
)
def test_rebuild_pair_from_filename(input, expected):
assert IDataHandler.rebuild_pair_from_filename(input) == expected
@ -87,63 +103,63 @@ def test_datahandler_ohlcv_get_available_data(testdatadir):
paircombs = FeatherDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT)
# Convert to set to avoid failures due to sorting
assert set(paircombs) == {
('UNITTEST/BTC', '5m', CandleType.SPOT),
('ETH/BTC', '5m', CandleType.SPOT),
('XLM/BTC', '5m', CandleType.SPOT),
('TRX/BTC', '5m', CandleType.SPOT),
('LTC/BTC', '5m', CandleType.SPOT),
('XMR/BTC', '5m', CandleType.SPOT),
('ZEC/BTC', '5m', CandleType.SPOT),
('UNITTEST/BTC', '1m', CandleType.SPOT),
('ADA/BTC', '5m', CandleType.SPOT),
('ETC/BTC', '5m', CandleType.SPOT),
('NXT/BTC', '5m', CandleType.SPOT),
('DASH/BTC', '5m', CandleType.SPOT),
('XRP/ETH', '1m', CandleType.SPOT),
('XRP/ETH', '5m', CandleType.SPOT),
('UNITTEST/BTC', '30m', CandleType.SPOT),
('UNITTEST/BTC', '8m', CandleType.SPOT),
("UNITTEST/BTC", "5m", CandleType.SPOT),
("ETH/BTC", "5m", CandleType.SPOT),
("XLM/BTC", "5m", CandleType.SPOT),
("TRX/BTC", "5m", CandleType.SPOT),
("LTC/BTC", "5m", CandleType.SPOT),
("XMR/BTC", "5m", CandleType.SPOT),
("ZEC/BTC", "5m", CandleType.SPOT),
("UNITTEST/BTC", "1m", CandleType.SPOT),
("ADA/BTC", "5m", CandleType.SPOT),
("ETC/BTC", "5m", CandleType.SPOT),
("NXT/BTC", "5m", CandleType.SPOT),
("DASH/BTC", "5m", CandleType.SPOT),
("XRP/ETH", "1m", CandleType.SPOT),
("XRP/ETH", "5m", CandleType.SPOT),
("UNITTEST/BTC", "30m", CandleType.SPOT),
("UNITTEST/BTC", "8m", CandleType.SPOT),
}
paircombs = FeatherDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.FUTURES)
# Convert to set to avoid failures due to sorting
assert set(paircombs) == {
('UNITTEST/USDT:USDT', '1h', 'mark'),
('XRP/USDT:USDT', '5m', 'futures'),
('XRP/USDT:USDT', '1h', 'futures'),
('XRP/USDT:USDT', '1h', 'mark'),
('XRP/USDT:USDT', '8h', 'mark'),
('XRP/USDT:USDT', '8h', 'funding_rate'),
("UNITTEST/USDT:USDT", "1h", "mark"),
("XRP/USDT:USDT", "5m", "futures"),
("XRP/USDT:USDT", "1h", "futures"),
("XRP/USDT:USDT", "1h", "mark"),
("XRP/USDT:USDT", "8h", "mark"),
("XRP/USDT:USDT", "8h", "funding_rate"),
}
paircombs = JsonGzDataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT)
assert set(paircombs) == {('UNITTEST/BTC', '8m', CandleType.SPOT)}
assert set(paircombs) == {("UNITTEST/BTC", "8m", CandleType.SPOT)}
paircombs = HDF5DataHandler.ohlcv_get_available_data(testdatadir, TradingMode.SPOT)
assert set(paircombs) == {('UNITTEST/BTC', '5m', CandleType.SPOT)}
assert set(paircombs) == {("UNITTEST/BTC", "5m", CandleType.SPOT)}
def test_jsondatahandler_ohlcv_purge(mocker, testdatadir):
mocker.patch.object(Path, "exists", MagicMock(return_value=False))
unlinkmock = mocker.patch.object(Path, "unlink", MagicMock())
dh = JsonGzDataHandler(testdatadir)
assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '')
assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark')
assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "")
assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark")
assert unlinkmock.call_count == 0
mocker.patch.object(Path, "exists", MagicMock(return_value=True))
assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '')
assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark')
assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "")
assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark")
assert unlinkmock.call_count == 2
def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
dh = JsonDataHandler(testdatadir)
df = dh.ohlcv_load('UNITTEST/BTC', '1m', 'spot')
df = dh.ohlcv_load("UNITTEST/BTC", "1m", "spot")
assert len(df) > 0
# # Failure case (empty array)
df1 = dh.ohlcv_load('NOPAIR/XXX', '4m', 'spot')
df1 = dh.ohlcv_load("NOPAIR/XXX", "4m", "spot")
assert len(df1) == 0
assert log_has("Could not load data for NOPAIR/XXX.", caplog)
assert df.columns.equals(df1.columns)
@ -151,22 +167,22 @@ def test_jsondatahandler_ohlcv_load(testdatadir, caplog):
def test_datahandler_ohlcv_data_min_max(testdatadir):
dh = JsonDataHandler(testdatadir)
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '5m', 'spot')
min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "5m", "spot")
assert len(min_max) == 3
# Empty pair
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '8m', 'spot')
min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "8m", "spot")
assert len(min_max) == 3
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
assert min_max[0] == min_max[1]
# Empty pair2
min_max = dh.ohlcv_data_min_max('NOPAIR/XXX', '41m', 'spot')
min_max = dh.ohlcv_data_min_max("NOPAIR/XXX", "41m", "spot")
assert len(min_max) == 3
assert min_max[0] == datetime.fromtimestamp(0, tz=timezone.utc)
assert min_max[0] == min_max[1]
# Existing pair ...
min_max = dh.ohlcv_data_min_max('UNITTEST/BTC', '1m', 'spot')
min_max = dh.ohlcv_data_min_max("UNITTEST/BTC", "1m", "spot")
assert len(min_max) == 3
assert min_max[0] == datetime(2017, 11, 4, 23, 2, tzinfo=timezone.utc)
assert min_max[1] == datetime(2017, 11, 14, 22, 59, tzinfo=timezone.utc)
@ -175,7 +191,8 @@ def test_datahandler_ohlcv_data_min_max(testdatadir):
def test_datahandler__check_empty_df(testdatadir, caplog):
dh = JsonDataHandler(testdatadir)
expected_text = r"Price jump in UNITTEST/USDT, 1h, spot between"
df = DataFrame([
df = DataFrame(
[
[
1511686200000, # 8:50:00
8.794, # open
@ -198,7 +215,7 @@ def test_datahandler__check_empty_df(testdatadir, caplog):
8.893,
8.875,
8.877,
2251
2251,
],
[
1511687400000, # 9:10:00
@ -206,13 +223,16 @@ def test_datahandler__check_empty_df(testdatadir, caplog):
8.883,
8.895,
8.817,
123551
]
], columns=['date', 'open', 'high', 'low', 'close', 'volume'])
123551,
],
],
columns=["date", "open", "high", "low", "close", "volume"],
)
dh._check_empty_df(df, 'UNITTEST/USDT', '1h', CandleType.SPOT, True, True)
dh._check_empty_df(df, "UNITTEST/USDT", "1h", CandleType.SPOT, True, True)
assert not log_has_re(expected_text, caplog)
df = DataFrame([
df = DataFrame(
[
[
1511686200000, # 8:50:00
8.794, # open
@ -235,7 +255,7 @@ def test_datahandler__check_empty_df(testdatadir, caplog):
889.3,
887.5,
887.7,
2251
2251,
],
[
1511687400000, # 9:10:00
@ -243,113 +263,120 @@ def test_datahandler__check_empty_df(testdatadir, caplog):
8.883,
8.895,
8.817,
123551
]
], columns=['date', 'open', 'high', 'low', 'close', 'volume'])
123551,
],
],
columns=["date", "open", "high", "low", "close", "volume"],
)
dh._check_empty_df(df, 'UNITTEST/USDT', '1h', CandleType.SPOT, True, True)
dh._check_empty_df(df, "UNITTEST/USDT", "1h", CandleType.SPOT, True, True)
assert log_has_re(expected_text, caplog)
# @pytest.mark.parametrize('datahandler', [])
@pytest.mark.skip("All datahandlers currently support trades data.")
def test_datahandler_trades_not_supported(datahandler, testdatadir, ):
def test_datahandler_trades_not_supported(
datahandler,
testdatadir,
):
# Currently disabled. Re-enable should a new provider not support trades data.
dh = get_datahandler(testdatadir, datahandler)
with pytest.raises(NotImplementedError):
dh.trades_load('UNITTEST/ETH')
dh.trades_load("UNITTEST/ETH")
with pytest.raises(NotImplementedError):
dh.trades_store('UNITTEST/ETH', MagicMock())
dh.trades_store("UNITTEST/ETH", MagicMock())
def test_jsondatahandler_trades_load(testdatadir, caplog):
dh = JsonGzDataHandler(testdatadir)
logmsg = "Old trades format detected - converting"
dh.trades_load('XRP/ETH', TradingMode.SPOT)
dh.trades_load("XRP/ETH", TradingMode.SPOT)
assert not log_has(logmsg, caplog)
# Test conversation is happening
dh.trades_load('XRP/OLD', TradingMode.SPOT)
dh.trades_load("XRP/OLD", TradingMode.SPOT)
assert log_has(logmsg, caplog)
@pytest.mark.parametrize('datahandler', AVAILABLE_DATAHANDLERS)
def test_datahandler_ohlcv_append(datahandler, testdatadir, ):
@pytest.mark.parametrize("datahandler", AVAILABLE_DATAHANDLERS)
def test_datahandler_ohlcv_append(
datahandler,
testdatadir,
):
dh = get_datahandler(testdatadir, datahandler)
with pytest.raises(NotImplementedError):
dh.ohlcv_append('UNITTEST/ETH', '5m', DataFrame(), CandleType.SPOT)
dh.ohlcv_append("UNITTEST/ETH", "5m", DataFrame(), CandleType.SPOT)
with pytest.raises(NotImplementedError):
dh.ohlcv_append('UNITTEST/ETH', '5m', DataFrame(), CandleType.MARK)
dh.ohlcv_append("UNITTEST/ETH", "5m", DataFrame(), CandleType.MARK)
@pytest.mark.parametrize('datahandler', AVAILABLE_DATAHANDLERS)
@pytest.mark.parametrize("datahandler", AVAILABLE_DATAHANDLERS)
def test_datahandler_trades_append(datahandler, testdatadir):
dh = get_datahandler(testdatadir, datahandler)
with pytest.raises(NotImplementedError):
dh.trades_append('UNITTEST/ETH', DataFrame())
dh.trades_append("UNITTEST/ETH", DataFrame())
@pytest.mark.parametrize('datahandler,expected', [
('jsongz', {'XRP/ETH', 'XRP/OLD'}),
('hdf5', {'XRP/ETH'}),
('feather', {'XRP/ETH'}),
('parquet', {'XRP/ETH'}),
])
@pytest.mark.parametrize(
"datahandler,expected",
[
("jsongz", {"XRP/ETH", "XRP/OLD"}),
("hdf5", {"XRP/ETH"}),
("feather", {"XRP/ETH"}),
("parquet", {"XRP/ETH"}),
],
)
def test_datahandler_trades_get_pairs(testdatadir, datahandler, expected):
pairs = get_datahandlerclass(datahandler).trades_get_pairs(testdatadir)
# Convert to set to avoid failures due to sorting
assert set(pairs) == expected
def test_hdf5datahandler_trades_load(testdatadir):
dh = get_datahandler(testdatadir, 'hdf5')
trades = dh.trades_load('XRP/ETH', TradingMode.SPOT)
dh = get_datahandler(testdatadir, "hdf5")
trades = dh.trades_load("XRP/ETH", TradingMode.SPOT)
assert isinstance(trades, DataFrame)
trades1 = dh.trades_load('UNITTEST/NONEXIST', TradingMode.SPOT)
trades1 = dh.trades_load("UNITTEST/NONEXIST", TradingMode.SPOT)
assert isinstance(trades1, DataFrame)
assert trades1.empty
# data goes from 2019-10-11 - 2019-10-13
timerange = TimeRange.parse_timerange('20191011-20191012')
timerange = TimeRange.parse_timerange("20191011-20191012")
trades2 = dh._trades_load('XRP/ETH', TradingMode.SPOT, timerange)
trades2 = dh._trades_load("XRP/ETH", TradingMode.SPOT, timerange)
assert len(trades) > len(trades2)
# Check that ID is None (If it's nan, it's wrong)
assert trades2.iloc[0]['type'] is None
assert trades2.iloc[0]["type"] is None
# unfiltered load has trades before starttime
assert len(trades.loc[trades['timestamp'] < timerange.startts * 1000]) >= 0
assert len(trades.loc[trades["timestamp"] < timerange.startts * 1000]) >= 0
# filtered list does not have trades before starttime
assert len(trades2.loc[trades2['timestamp'] < timerange.startts * 1000]) == 0
assert len(trades2.loc[trades2["timestamp"] < timerange.startts * 1000]) == 0
# unfiltered load has trades after endtime
assert len(trades.loc[trades['timestamp'] > timerange.stopts * 1000]) >= 0
assert len(trades.loc[trades["timestamp"] > timerange.stopts * 1000]) >= 0
# filtered list does not have trades after endtime
assert len(trades2.loc[trades2['timestamp'] > timerange.stopts * 1000]) == 0
assert len(trades2.loc[trades2["timestamp"] > timerange.stopts * 1000]) == 0
# assert len([t for t in trades2 if t[0] > timerange.stopts * 1000]) == 0
@pytest.mark.parametrize('pair,timeframe,candle_type,candle_append,startdt,enddt', [
@pytest.mark.parametrize(
"pair,timeframe,candle_type,candle_append,startdt,enddt",
[
# Data goes from 2018-01-10 - 2018-01-30
('UNITTEST/BTC', '5m', 'spot', '', '2018-01-15', '2018-01-19'),
("UNITTEST/BTC", "5m", "spot", "", "2018-01-15", "2018-01-19"),
# Mark data goes from to 2021-11-15 2021-11-19
('UNITTEST/USDT:USDT', '1h', 'mark', '-mark', '2021-11-16', '2021-11-18'),
])
("UNITTEST/USDT:USDT", "1h", "mark", "-mark", "2021-11-16", "2021-11-18"),
],
)
def test_hdf5datahandler_ohlcv_load_and_resave(
testdatadir,
tmp_path,
pair,
timeframe,
candle_type,
candle_append,
startdt, enddt
testdatadir, tmp_path, pair, timeframe, candle_type, candle_append, startdt, enddt
):
tmpdir2 = tmp_path
if candle_type not in ('', 'spot'):
tmpdir2 = tmp_path / 'futures'
if candle_type not in ("", "spot"):
tmpdir2 = tmp_path / "futures"
tmpdir2.mkdir()
dh = get_datahandler(testdatadir, 'hdf5')
dh = get_datahandler(testdatadir, "hdf5")
ohlcv = dh._ohlcv_load(pair, timeframe, None, candle_type=candle_type)
assert isinstance(ohlcv, DataFrame)
assert len(ohlcv) > 0
@ -357,50 +384,46 @@ def test_hdf5datahandler_ohlcv_load_and_resave(
file = tmpdir2 / f"UNITTEST_NEW-{timeframe}{candle_append}.h5"
assert not file.is_file()
dh1 = get_datahandler(tmp_path, 'hdf5')
dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type)
dh1 = get_datahandler(tmp_path, "hdf5")
dh1.ohlcv_store("UNITTEST/NEW", timeframe, ohlcv, candle_type=candle_type)
assert file.is_file()
assert not ohlcv[ohlcv['date'] < startdt].empty
assert not ohlcv[ohlcv["date"] < startdt].empty
timerange = TimeRange.parse_timerange(f"{startdt.replace('-', '')}-{enddt.replace('-', '')}")
# Call private function to ensure timerange is filtered in hdf5
ohlcv = dh._ohlcv_load(pair, timeframe, timerange, candle_type=candle_type)
ohlcv1 = dh1._ohlcv_load('UNITTEST/NEW', timeframe, timerange, candle_type=candle_type)
ohlcv1 = dh1._ohlcv_load("UNITTEST/NEW", timeframe, timerange, candle_type=candle_type)
assert len(ohlcv) == len(ohlcv1)
assert ohlcv.equals(ohlcv1)
assert ohlcv[ohlcv['date'] < startdt].empty
assert ohlcv[ohlcv['date'] > enddt].empty
assert ohlcv[ohlcv["date"] < startdt].empty
assert ohlcv[ohlcv["date"] > enddt].empty
# Try loading inexisting file
ohlcv = dh.ohlcv_load('UNITTEST/NONEXIST', timeframe, candle_type=candle_type)
ohlcv = dh.ohlcv_load("UNITTEST/NONEXIST", timeframe, candle_type=candle_type)
assert ohlcv.empty
@pytest.mark.parametrize('pair,timeframe,candle_type,candle_append,startdt,enddt', [
@pytest.mark.parametrize(
"pair,timeframe,candle_type,candle_append,startdt,enddt",
[
# Data goes from 2018-01-10 - 2018-01-30
('UNITTEST/BTC', '5m', 'spot', '', '2018-01-15', '2018-01-19'),
("UNITTEST/BTC", "5m", "spot", "", "2018-01-15", "2018-01-19"),
# Mark data goes from to 2021-11-15 2021-11-19
('UNITTEST/USDT:USDT', '1h', 'mark', '-mark', '2021-11-16', '2021-11-18'),
])
@pytest.mark.parametrize('datahandler', ['hdf5', 'feather', 'parquet'])
("UNITTEST/USDT:USDT", "1h", "mark", "-mark", "2021-11-16", "2021-11-18"),
],
)
@pytest.mark.parametrize("datahandler", ["hdf5", "feather", "parquet"])
def test_generic_datahandler_ohlcv_load_and_resave(
datahandler,
testdatadir,
tmp_path,
pair,
timeframe,
candle_type,
candle_append,
startdt, enddt
datahandler, testdatadir, tmp_path, pair, timeframe, candle_type, candle_append, startdt, enddt
):
tmpdir2 = tmp_path
if candle_type not in ('', 'spot'):
tmpdir2 = tmp_path / 'futures'
if candle_type not in ("", "spot"):
tmpdir2 = tmp_path / "futures"
tmpdir2.mkdir()
# Load data from one common file
dhbase = get_datahandler(testdatadir, 'feather')
dhbase = get_datahandler(testdatadir, "feather")
ohlcv = dhbase._ohlcv_load(pair, timeframe, None, candle_type=candle_type)
assert isinstance(ohlcv, DataFrame)
assert len(ohlcv) > 0
@ -412,122 +435,123 @@ def test_generic_datahandler_ohlcv_load_and_resave(
assert not file.is_file()
dh1 = get_datahandler(tmp_path, datahandler)
dh1.ohlcv_store('UNITTEST/NEW', timeframe, ohlcv, candle_type=candle_type)
dh1.ohlcv_store("UNITTEST/NEW", timeframe, ohlcv, candle_type=candle_type)
assert file.is_file()
assert not ohlcv[ohlcv['date'] < startdt].empty
assert not ohlcv[ohlcv["date"] < startdt].empty
timerange = TimeRange.parse_timerange(f"{startdt.replace('-', '')}-{enddt.replace('-', '')}")
ohlcv = dhbase.ohlcv_load(pair, timeframe, timerange=timerange, candle_type=candle_type)
if datahandler == 'hdf5':
ohlcv1 = dh1._ohlcv_load('UNITTEST/NEW', timeframe, timerange, candle_type=candle_type)
if candle_type == 'mark':
ohlcv1['volume'] = 0.0
if datahandler == "hdf5":
ohlcv1 = dh1._ohlcv_load("UNITTEST/NEW", timeframe, timerange, candle_type=candle_type)
if candle_type == "mark":
ohlcv1["volume"] = 0.0
else:
ohlcv1 = dh1.ohlcv_load('UNITTEST/NEW', timeframe,
timerange=timerange, candle_type=candle_type)
ohlcv1 = dh1.ohlcv_load(
"UNITTEST/NEW", timeframe, timerange=timerange, candle_type=candle_type
)
assert len(ohlcv) == len(ohlcv1)
assert ohlcv.equals(ohlcv1)
assert ohlcv[ohlcv['date'] < startdt].empty
assert ohlcv[ohlcv['date'] > enddt].empty
assert ohlcv[ohlcv["date"] < startdt].empty
assert ohlcv[ohlcv["date"] > enddt].empty
# Try loading inexisting file
ohlcv = dh.ohlcv_load('UNITTEST/NONEXIST', timeframe, candle_type=candle_type)
ohlcv = dh.ohlcv_load("UNITTEST/NONEXIST", timeframe, candle_type=candle_type)
assert ohlcv.empty
def test_hdf5datahandler_ohlcv_purge(mocker, testdatadir):
mocker.patch.object(Path, "exists", MagicMock(return_value=False))
unlinkmock = mocker.patch.object(Path, "unlink", MagicMock())
dh = get_datahandler(testdatadir, 'hdf5')
assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '')
assert not dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark')
dh = get_datahandler(testdatadir, "hdf5")
assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "")
assert not dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark")
assert unlinkmock.call_count == 0
mocker.patch.object(Path, "exists", MagicMock(return_value=True))
assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', '')
assert dh.ohlcv_purge('UNITTEST/NONEXIST', '5m', candle_type='mark')
assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", "")
assert dh.ohlcv_purge("UNITTEST/NONEXIST", "5m", candle_type="mark")
assert unlinkmock.call_count == 2
@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet'])
@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"])
def test_datahandler_trades_load(testdatadir, datahandler):
dh = get_datahandler(testdatadir, datahandler)
trades = dh.trades_load('XRP/ETH', TradingMode.SPOT)
trades = dh.trades_load("XRP/ETH", TradingMode.SPOT)
assert isinstance(trades, DataFrame)
assert trades.iloc[0]['timestamp'] == 1570752011620
assert trades.iloc[0]['date'] == Timestamp('2019-10-11 00:00:11.620000+0000')
assert trades.iloc[-1]['cost'] == 0.1986231
assert trades.iloc[0]["timestamp"] == 1570752011620
assert trades.iloc[0]["date"] == Timestamp("2019-10-11 00:00:11.620000+0000")
assert trades.iloc[-1]["cost"] == 0.1986231
trades1 = dh.trades_load('UNITTEST/NONEXIST', TradingMode.SPOT)
trades1 = dh.trades_load("UNITTEST/NONEXIST", TradingMode.SPOT)
assert isinstance(trades, DataFrame)
assert trades1.empty
@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet'])
@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"])
def test_datahandler_trades_store(testdatadir, tmp_path, datahandler):
dh = get_datahandler(testdatadir, datahandler)
trades = dh.trades_load('XRP/ETH', TradingMode.SPOT)
trades = dh.trades_load("XRP/ETH", TradingMode.SPOT)
dh1 = get_datahandler(tmp_path, datahandler)
dh1.trades_store('XRP/NEW', trades, TradingMode.SPOT)
dh1.trades_store("XRP/NEW", trades, TradingMode.SPOT)
file = tmp_path / f'XRP_NEW-trades.{dh1._get_file_extension()}'
file = tmp_path / f"XRP_NEW-trades.{dh1._get_file_extension()}"
assert file.is_file()
# Load trades back
trades_new = dh1.trades_load('XRP/NEW', TradingMode.SPOT)
trades_new = dh1.trades_load("XRP/NEW", TradingMode.SPOT)
assert_frame_equal(trades, trades_new, check_exact=True)
assert len(trades_new) == len(trades)
@pytest.mark.parametrize('datahandler', ['jsongz', 'hdf5', 'feather', 'parquet'])
@pytest.mark.parametrize("datahandler", ["jsongz", "hdf5", "feather", "parquet"])
def test_datahandler_trades_purge(mocker, testdatadir, datahandler):
mocker.patch.object(Path, "exists", MagicMock(return_value=False))
unlinkmock = mocker.patch.object(Path, "unlink", MagicMock())
dh = get_datahandler(testdatadir, datahandler)
assert not dh.trades_purge('UNITTEST/NONEXIST', TradingMode.SPOT)
assert not dh.trades_purge("UNITTEST/NONEXIST", TradingMode.SPOT)
assert unlinkmock.call_count == 0
mocker.patch.object(Path, "exists", MagicMock(return_value=True))
assert dh.trades_purge('UNITTEST/NONEXIST', TradingMode.SPOT)
assert dh.trades_purge("UNITTEST/NONEXIST", TradingMode.SPOT)
assert unlinkmock.call_count == 1
def test_gethandlerclass():
cl = get_datahandlerclass('json')
cl = get_datahandlerclass("json")
assert cl == JsonDataHandler
assert issubclass(cl, IDataHandler)
cl = get_datahandlerclass('jsongz')
cl = get_datahandlerclass("jsongz")
assert cl == JsonGzDataHandler
assert issubclass(cl, IDataHandler)
assert issubclass(cl, JsonDataHandler)
cl = get_datahandlerclass('hdf5')
cl = get_datahandlerclass("hdf5")
assert cl == HDF5DataHandler
assert issubclass(cl, IDataHandler)
cl = get_datahandlerclass('feather')
cl = get_datahandlerclass("feather")
assert cl == FeatherDataHandler
assert issubclass(cl, IDataHandler)
cl = get_datahandlerclass('parquet')
cl = get_datahandlerclass("parquet")
assert cl == ParquetDataHandler
assert issubclass(cl, IDataHandler)
with pytest.raises(ValueError, match=r"No datahandler for .*"):
get_datahandlerclass('DeadBeef')
get_datahandlerclass("DeadBeef")
def test_get_datahandler(testdatadir):
dh = get_datahandler(testdatadir, 'json')
dh = get_datahandler(testdatadir, "json")
assert isinstance(dh, JsonDataHandler)
dh = get_datahandler(testdatadir, 'jsongz')
dh = get_datahandler(testdatadir, "jsongz")
assert isinstance(dh, JsonGzDataHandler)
dh1 = get_datahandler(testdatadir, 'jsongz', dh)
dh1 = get_datahandler(testdatadir, "jsongz", dh)
assert id(dh1) == id(dh)
dh = get_datahandler(testdatadir, 'hdf5')
dh = get_datahandler(testdatadir, "hdf5")
assert isinstance(dh, HDF5DataHandler)

View File

@ -11,10 +11,13 @@ from freqtrade.plugins.pairlistmanager import PairListManager
from tests.conftest import EXMS, generate_test_data, get_patched_exchange
@pytest.mark.parametrize('candle_type', [
'mark',
'',
])
@pytest.mark.parametrize(
"candle_type",
[
"mark",
"",
],
)
def test_dp_ohlcv(mocker, default_conf, ohlcv_history, candle_type):
default_conf["runmode"] = RunMode.DRY_RUN
timeframe = default_conf["timeframe"]
@ -33,11 +36,9 @@ def test_dp_ohlcv(mocker, default_conf, ohlcv_history, candle_type):
assert dp.ohlcv("NONSENSE/AAA", timeframe, candle_type=candletype).empty
# Test with and without parameter
assert dp.ohlcv(
"UNITTEST/BTC",
timeframe,
candle_type=candletype
).equals(dp.ohlcv("UNITTEST/BTC", candle_type=candle_type))
assert dp.ohlcv("UNITTEST/BTC", timeframe, candle_type=candletype).equals(
dp.ohlcv("UNITTEST/BTC", candle_type=candle_type)
)
default_conf["runmode"] = RunMode.LIVE
dp = DataProvider(default_conf, exchange)
@ -66,10 +67,12 @@ def test_historic_ohlcv_dataformat(mocker, default_conf, ohlcv_history):
featherloadmock = MagicMock(return_value=ohlcv_history)
mocker.patch(
"freqtrade.data.history.datahandlers.hdf5datahandler.HDF5DataHandler._ohlcv_load",
hdf5loadmock)
hdf5loadmock,
)
mocker.patch(
"freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load",
featherloadmock)
featherloadmock,
)
default_conf["runmode"] = RunMode.BACKTEST
exchange = get_patched_exchange(mocker, default_conf)
@ -90,11 +93,14 @@ def test_historic_ohlcv_dataformat(mocker, default_conf, ohlcv_history):
featherloadmock.assert_not_called()
@pytest.mark.parametrize('candle_type', [
'mark',
'futures',
'',
])
@pytest.mark.parametrize(
"candle_type",
[
"mark",
"futures",
"",
],
)
def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type):
default_conf["runmode"] = RunMode.DRY_RUN
timeframe = default_conf["timeframe"]
@ -105,26 +111,33 @@ def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type):
dp = DataProvider(default_conf, exchange)
assert dp.runmode == RunMode.DRY_RUN
assert ohlcv_history.equals(dp.get_pair_dataframe(
"UNITTEST/BTC", timeframe, candle_type=candle_type))
assert ohlcv_history.equals(dp.get_pair_dataframe(
"UNITTEST/BTC", timeframe, candle_type=candletype))
assert isinstance(dp.get_pair_dataframe(
"UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame)
assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe,
candle_type=candle_type) is not ohlcv_history
assert ohlcv_history.equals(
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type)
)
assert ohlcv_history.equals(
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candletype)
)
assert isinstance(
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame
)
assert (
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type)
is not ohlcv_history
)
assert not dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type).empty
assert dp.get_pair_dataframe("NONSENSE/AAA", timeframe, candle_type=candle_type).empty
# Test with and without parameter
assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type)\
.equals(dp.get_pair_dataframe("UNITTEST/BTC", candle_type=candle_type))
assert dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type).equals(
dp.get_pair_dataframe("UNITTEST/BTC", candle_type=candle_type)
)
default_conf["runmode"] = RunMode.LIVE
dp = DataProvider(default_conf, exchange)
assert dp.runmode == RunMode.LIVE
assert isinstance(dp.get_pair_dataframe(
"UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame)
assert isinstance(
dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type), DataFrame
)
assert dp.get_pair_dataframe("NONSENSE/AAA", timeframe, candle_type=candle_type).empty
historymock = MagicMock(return_value=ohlcv_history)
@ -136,7 +149,7 @@ def test_get_pair_dataframe(mocker, default_conf, ohlcv_history, candle_type):
assert isinstance(df, DataFrame)
assert len(df) == 3 # ohlcv_history mock has just 3 rows
dp._set_dataframe_max_date(ohlcv_history.iloc[-1]['date'])
dp._set_dataframe_max_date(ohlcv_history.iloc[-1]["date"])
df = dp.get_pair_dataframe("UNITTEST/BTC", timeframe, candle_type=candle_type)
assert isinstance(df, DataFrame)
assert len(df) == 2 # ohlcv_history is limited to 2 rows now
@ -150,7 +163,10 @@ def test_available_pairs(mocker, default_conf, ohlcv_history):
dp = DataProvider(default_conf, exchange)
assert len(dp.available_pairs) == 2
assert dp.available_pairs == [("XRP/BTC", timeframe), ("UNITTEST/BTC", timeframe), ]
assert dp.available_pairs == [
("XRP/BTC", timeframe),
("UNITTEST/BTC", timeframe),
]
def test_producer_pairs(default_conf):
@ -172,9 +188,9 @@ def test_producer_pairs(default_conf):
def test_get_producer_df(default_conf):
dataprovider = DataProvider(default_conf, None)
ohlcv_history = generate_test_data('5m', 150)
pair = 'BTC/USDT'
timeframe = default_conf['timeframe']
ohlcv_history = generate_test_data("5m", 150)
pair = "BTC/USDT"
timeframe = default_conf["timeframe"]
candle_type = CandleType.SPOT
empty_la = datetime.fromtimestamp(0, tz=timezone.utc)
@ -192,20 +208,20 @@ def test_get_producer_df(default_conf):
assert la > empty_la
# no data on this producer, should return empty dataframe
dataframe, la = dataprovider.get_producer_df(pair, producer_name='bad')
dataframe, la = dataprovider.get_producer_df(pair, producer_name="bad")
assert dataframe.empty
assert la == empty_la
# non existent timeframe, empty dataframe
_dataframe, la = dataprovider.get_producer_df(pair, timeframe='1h')
_dataframe, la = dataprovider.get_producer_df(pair, timeframe="1h")
assert dataframe.empty
assert la == empty_la
def test_emit_df(mocker, default_conf, ohlcv_history):
mocker.patch('freqtrade.rpc.rpc_manager.RPCManager.__init__', MagicMock())
rpc_mock = mocker.patch('freqtrade.rpc.rpc_manager.RPCManager', MagicMock())
send_mock = mocker.patch('freqtrade.rpc.rpc_manager.RPCManager.send_msg', MagicMock())
mocker.patch("freqtrade.rpc.rpc_manager.RPCManager.__init__", MagicMock())
rpc_mock = mocker.patch("freqtrade.rpc.rpc_manager.RPCManager", MagicMock())
send_mock = mocker.patch("freqtrade.rpc.rpc_manager.RPCManager.send_msg", MagicMock())
dataprovider = DataProvider(default_conf, exchange=None, rpc=rpc_mock)
dataprovider_no_rpc = DataProvider(default_conf, exchange=None)
@ -262,14 +278,14 @@ def test_orderbook(mocker, default_conf, order_book_l2):
exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock)
dp = DataProvider(default_conf, exchange)
res = dp.orderbook('ETH/BTC', 5)
res = dp.orderbook("ETH/BTC", 5)
assert order_book_l2.call_count == 1
assert order_book_l2.call_args_list[0][0][0] == 'ETH/BTC'
assert order_book_l2.call_args_list[0][0][0] == "ETH/BTC"
assert order_book_l2.call_args_list[0][0][1] >= 5
assert isinstance(res, dict)
assert 'bids' in res
assert 'asks' in res
assert "bids" in res
assert "asks" in res
def test_market(mocker, default_conf, markets):
@ -278,41 +294,39 @@ def test_market(mocker, default_conf, markets):
exchange = get_patched_exchange(mocker, default_conf, api_mock=api_mock)
dp = DataProvider(default_conf, exchange)
res = dp.market('ETH/BTC')
res = dp.market("ETH/BTC")
assert isinstance(res, dict)
assert 'symbol' in res
assert res['symbol'] == 'ETH/BTC'
assert "symbol" in res
assert res["symbol"] == "ETH/BTC"
res = dp.market('UNITTEST/BTC')
res = dp.market("UNITTEST/BTC")
assert res is None
def test_ticker(mocker, default_conf, tickers):
ticker_mock = MagicMock(return_value=tickers()['ETH/BTC'])
ticker_mock = MagicMock(return_value=tickers()["ETH/BTC"])
mocker.patch(f"{EXMS}.fetch_ticker", ticker_mock)
exchange = get_patched_exchange(mocker, default_conf)
dp = DataProvider(default_conf, exchange)
res = dp.ticker('ETH/BTC')
res = dp.ticker("ETH/BTC")
assert isinstance(res, dict)
assert 'symbol' in res
assert res['symbol'] == 'ETH/BTC'
assert "symbol" in res
assert res["symbol"] == "ETH/BTC"
ticker_mock = MagicMock(side_effect=ExchangeError('Pair not found'))
ticker_mock = MagicMock(side_effect=ExchangeError("Pair not found"))
mocker.patch(f"{EXMS}.fetch_ticker", ticker_mock)
exchange = get_patched_exchange(mocker, default_conf)
dp = DataProvider(default_conf, exchange)
res = dp.ticker('UNITTEST/BTC')
res = dp.ticker("UNITTEST/BTC")
assert res == {}
def test_current_whitelist(mocker, default_conf, tickers):
# patch default conf to volumepairlist
default_conf['pairlists'][0] = {'method': 'VolumePairList', "number_assets": 5}
default_conf["pairlists"][0] = {"method": "VolumePairList", "number_assets": 5}
mocker.patch.multiple(EXMS,
exchange_has=MagicMock(return_value=True),
get_tickers=tickers)
mocker.patch.multiple(EXMS, exchange_has=MagicMock(return_value=True), get_tickers=tickers)
exchange = get_patched_exchange(mocker, default_conf)
pairlist = PairListManager(exchange, default_conf)
@ -331,7 +345,6 @@ def test_current_whitelist(mocker, default_conf, tickers):
def test_get_analyzed_dataframe(mocker, default_conf, ohlcv_history):
default_conf["runmode"] = RunMode.DRY_RUN
timeframe = default_conf["timeframe"]
@ -384,28 +397,27 @@ def test_no_exchange_mode(default_conf):
dp.refresh([()])
with pytest.raises(OperationalException, match=message):
dp.ohlcv('XRP/USDT', '5m', '')
dp.ohlcv("XRP/USDT", "5m", "")
with pytest.raises(OperationalException, match=message):
dp.market('XRP/USDT')
dp.market("XRP/USDT")
with pytest.raises(OperationalException, match=message):
dp.ticker('XRP/USDT')
dp.ticker("XRP/USDT")
with pytest.raises(OperationalException, match=message):
dp.orderbook('XRP/USDT', 20)
dp.orderbook("XRP/USDT", 20)
with pytest.raises(OperationalException, match=message):
dp.available_pairs()
def test_dp_send_msg(default_conf):
default_conf["runmode"] = RunMode.DRY_RUN
default_conf["timeframe"] = '1h'
default_conf["timeframe"] = "1h"
dp = DataProvider(default_conf, None)
msg = 'Test message'
msg = "Test message"
dp.send_msg(msg)
assert msg in dp._msg_queue
@ -424,81 +436,81 @@ def test_dp_send_msg(default_conf):
def test_dp__add_external_df(default_conf_usdt):
timeframe = '1h'
timeframe = "1h"
default_conf_usdt["timeframe"] = timeframe
dp = DataProvider(default_conf_usdt, None)
df = generate_test_data(timeframe, 24, '2022-01-01 00:00:00+00:00')
df = generate_test_data(timeframe, 24, "2022-01-01 00:00:00+00:00")
last_analyzed = datetime.now(timezone.utc)
res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT)
res = dp._add_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is False
# Why 1000 ??
assert res[1] == 1000
# Hard add dataframe
dp._replace_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT)
dp._replace_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT)
# BTC is not stored yet
res = dp._add_external_df('BTC/USDT', df, last_analyzed, timeframe, CandleType.SPOT)
res = dp._add_external_df("BTC/USDT", df, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is False
df_res, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df_res, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
assert len(df_res) == 24
# Add the same dataframe again - dataframe size shall not change.
res = dp._add_external_df('ETH/USDT', df, last_analyzed, timeframe, CandleType.SPOT)
res = dp._add_external_df("ETH/USDT", df, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is True
assert isinstance(res[1], int)
assert res[1] == 0
df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
assert len(df) == 24
# Add a new day.
df2 = generate_test_data(timeframe, 24, '2022-01-02 00:00:00+00:00')
df2 = generate_test_data(timeframe, 24, "2022-01-02 00:00:00+00:00")
res = dp._add_external_df('ETH/USDT', df2, last_analyzed, timeframe, CandleType.SPOT)
res = dp._add_external_df("ETH/USDT", df2, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is True
assert isinstance(res[1], int)
assert res[1] == 0
df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
assert len(df) == 48
# Add a dataframe with a 12 hour offset - so 12 candles are overlapping, and 12 valid.
df3 = generate_test_data(timeframe, 24, '2022-01-02 12:00:00+00:00')
df3 = generate_test_data(timeframe, 24, "2022-01-02 12:00:00+00:00")
res = dp._add_external_df('ETH/USDT', df3, last_analyzed, timeframe, CandleType.SPOT)
res = dp._add_external_df("ETH/USDT", df3, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is True
assert isinstance(res[1], int)
assert res[1] == 0
df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
# New length = 48 + 12 (since we have a 12 hour offset).
assert len(df) == 60
assert df.iloc[-1]['date'] == df3.iloc[-1]['date']
assert df.iloc[-1]['date'] == Timestamp('2022-01-03 11:00:00+00:00')
assert df.iloc[-1]["date"] == df3.iloc[-1]["date"]
assert df.iloc[-1]["date"] == Timestamp("2022-01-03 11:00:00+00:00")
# Generate 1 new candle
df4 = generate_test_data(timeframe, 1, '2022-01-03 12:00:00+00:00')
res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT)
df4 = generate_test_data(timeframe, 1, "2022-01-03 12:00:00+00:00")
res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT)
# assert res[0] is True
# assert res[1] == 0
df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
# New length = 61 + 1
assert len(df) == 61
assert df.iloc[-2]['date'] == Timestamp('2022-01-03 11:00:00+00:00')
assert df.iloc[-1]['date'] == Timestamp('2022-01-03 12:00:00+00:00')
assert df.iloc[-2]["date"] == Timestamp("2022-01-03 11:00:00+00:00")
assert df.iloc[-1]["date"] == Timestamp("2022-01-03 12:00:00+00:00")
# Gap in the data ...
df4 = generate_test_data(timeframe, 1, '2022-01-05 00:00:00+00:00')
res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT)
df4 = generate_test_data(timeframe, 1, "2022-01-05 00:00:00+00:00")
res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is False
# 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00
assert isinstance(res[1], int)
assert res[1] == 36
df, _ = dp.get_producer_df('ETH/USDT', timeframe, CandleType.SPOT)
df, _ = dp.get_producer_df("ETH/USDT", timeframe, CandleType.SPOT)
# New length = 61 + 1
assert len(df) == 61
# Empty dataframe
df4 = generate_test_data(timeframe, 0, '2022-01-05 00:00:00+00:00')
res = dp._add_external_df('ETH/USDT', df4, last_analyzed, timeframe, CandleType.SPOT)
df4 = generate_test_data(timeframe, 0, "2022-01-05 00:00:00+00:00")
res = dp._add_external_df("ETH/USDT", df4, last_analyzed, timeframe, CandleType.SPOT)
assert res[0] is False
# 36 hours - from 2022-01-03 12:00:00+00:00 to 2022-01-05 00:00:00+00:00
assert isinstance(res[1], int)
@ -506,59 +518,59 @@ def test_dp__add_external_df(default_conf_usdt):
def test_dp_get_required_startup(default_conf_usdt):
timeframe = '1h'
timeframe = "1h"
default_conf_usdt["timeframe"] = timeframe
dp = DataProvider(default_conf_usdt, None)
# No FreqAI config
assert dp.get_required_startup('5m') == 0
assert dp.get_required_startup('1h') == 0
assert dp.get_required_startup('1d') == 0
assert dp.get_required_startup("5m") == 0
assert dp.get_required_startup("1h") == 0
assert dp.get_required_startup("1d") == 0
dp._config['startup_candle_count'] = 20
assert dp.get_required_startup('5m') == 20
assert dp.get_required_startup('1h') == 20
assert dp.get_required_startup('1h') == 20
dp._config["startup_candle_count"] = 20
assert dp.get_required_startup("5m") == 20
assert dp.get_required_startup("1h") == 20
assert dp.get_required_startup("1h") == 20
# With freqAI config
dp._config['freqai'] = {
'enabled': True,
'train_period_days': 20,
'feature_parameters': {
'indicator_periods_candles': [
dp._config["freqai"] = {
"enabled": True,
"train_period_days": 20,
"feature_parameters": {
"indicator_periods_candles": [
5,
20,
]
},
}
}
assert dp.get_required_startup('5m') == 5780
assert dp.get_required_startup('1h') == 500
assert dp.get_required_startup('1d') == 40
assert dp.get_required_startup("5m") == 5780
assert dp.get_required_startup("1h") == 500
assert dp.get_required_startup("1d") == 40
# FreqAI kindof ignores startup_candle_count if it's below indicator_periods_candles
dp._config['startup_candle_count'] = 0
assert dp.get_required_startup('5m') == 5780
assert dp.get_required_startup('1h') == 500
assert dp.get_required_startup('1d') == 40
dp._config["startup_candle_count"] = 0
assert dp.get_required_startup("5m") == 5780
assert dp.get_required_startup("1h") == 500
assert dp.get_required_startup("1d") == 40
dp._config['freqai']['feature_parameters']['indicator_periods_candles'][1] = 50
assert dp.get_required_startup('5m') == 5810
assert dp.get_required_startup('1h') == 530
assert dp.get_required_startup('1d') == 70
dp._config["freqai"]["feature_parameters"]["indicator_periods_candles"][1] = 50
assert dp.get_required_startup("5m") == 5810
assert dp.get_required_startup("1h") == 530
assert dp.get_required_startup("1d") == 70
# scenario from issue https://github.com/freqtrade/freqtrade/issues/9432
dp._config['freqai'] = {
'enabled': True,
'train_period_days': 180,
'feature_parameters': {
'indicator_periods_candles': [
dp._config["freqai"] = {
"enabled": True,
"train_period_days": 180,
"feature_parameters": {
"indicator_periods_candles": [
10,
20,
]
},
}
}
dp._config['startup_candle_count'] = 40
assert dp.get_required_startup('5m') == 51880
assert dp.get_required_startup('1h') == 4360
assert dp.get_required_startup('1d') == 220
dp._config["startup_candle_count"] = 40
assert dp.get_required_startup("5m") == 51880
assert dp.get_required_startup("1h") == 4360
assert dp.get_required_startup("1d") == 220

View File

@ -10,83 +10,84 @@ from tests.conftest import EXMS, log_has, patch_exchange
def test_download_data_main_no_markets(mocker, caplog):
dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data',
MagicMock(return_value=["ETH/BTC", "XRP/BTC"]))
patch_exchange(mocker, id='binance')
mocker.patch(f'{EXMS}.get_markets', return_value={})
dl_mock = mocker.patch(
"freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data",
MagicMock(return_value=["ETH/BTC", "XRP/BTC"]),
)
patch_exchange(mocker, id="binance")
mocker.patch(f"{EXMS}.get_markets", return_value={})
config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE)
config.update({
"days": 20,
"pairs": ["ETH/BTC", "XRP/BTC"],
"timeframes": ["5m", "1h"]
})
config.update({"days": 20, "pairs": ["ETH/BTC", "XRP/BTC"], "timeframes": ["5m", "1h"]})
download_data_main(config)
assert dl_mock.call_args[1]['timerange'].starttype == "date"
assert dl_mock.call_args[1]["timerange"].starttype == "date"
assert log_has("Pairs [ETH/BTC,XRP/BTC] not available on exchange Binance.", caplog)
def test_download_data_main_all_pairs(mocker, markets):
dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data',
MagicMock(return_value=["ETH/BTC", "XRP/BTC"]))
dl_mock = mocker.patch(
"freqtrade.data.history.history_utils.refresh_backtest_ohlcv_data",
MagicMock(return_value=["ETH/BTC", "XRP/BTC"]),
)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets))
config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE)
config.update({
"pairs": [".*/USDT"],
"timeframes": ["5m", "1h"]
})
config.update({"pairs": [".*/USDT"], "timeframes": ["5m", "1h"]})
download_data_main(config)
expected = set(['BTC/USDT', 'ETH/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'])
assert set(dl_mock.call_args_list[0][1]['pairs']) == expected
expected = set(["BTC/USDT", "ETH/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"])
assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected
assert dl_mock.call_count == 1
dl_mock.reset_mock()
config.update({
"pairs": [".*/USDT"],
"timeframes": ["5m", "1h"],
"include_inactive": True
})
config.update({"pairs": [".*/USDT"], "timeframes": ["5m", "1h"], "include_inactive": True})
download_data_main(config)
expected = set(['BTC/USDT', 'ETH/USDT', 'LTC/USDT', 'XRP/USDT', 'NEO/USDT', 'TKN/USDT'])
assert set(dl_mock.call_args_list[0][1]['pairs']) == expected
expected = set(["BTC/USDT", "ETH/USDT", "LTC/USDT", "XRP/USDT", "NEO/USDT", "TKN/USDT"])
assert set(dl_mock.call_args_list[0][1]["pairs"]) == expected
def test_download_data_main_trades(mocker):
dl_mock = mocker.patch('freqtrade.data.history.history_utils.refresh_backtest_trades_data',
MagicMock(return_value=[]))
convert_mock = mocker.patch('freqtrade.data.history.history_utils.convert_trades_to_ohlcv',
MagicMock(return_value=[]))
dl_mock = mocker.patch(
"freqtrade.data.history.history_utils.refresh_backtest_trades_data",
MagicMock(return_value=[]),
)
convert_mock = mocker.patch(
"freqtrade.data.history.history_utils.convert_trades_to_ohlcv", MagicMock(return_value=[])
)
patch_exchange(mocker)
mocker.patch(f'{EXMS}.get_markets', return_value={})
mocker.patch(f"{EXMS}.get_markets", return_value={})
config = setup_utils_configuration({"exchange": "binance"}, RunMode.UTIL_EXCHANGE)
config.update({
config.update(
{
"days": 20,
"pairs": ["ETH/BTC", "XRP/BTC"],
"timeframes": ["5m", "1h"],
"download_trades": True,
})
}
)
download_data_main(config)
assert dl_mock.call_args[1]['timerange'].starttype == "date"
assert dl_mock.call_args[1]["timerange"].starttype == "date"
assert dl_mock.call_count == 1
assert convert_mock.call_count == 1
config.update({
config.update(
{
"download_trades": True,
"trading_mode": "futures",
})
}
)
def test_download_data_main_data_invalid(mocker):
patch_exchange(mocker, id="kraken")
mocker.patch(f'{EXMS}.get_markets', return_value={})
mocker.patch(f"{EXMS}.get_markets", return_value={})
config = setup_utils_configuration({"exchange": "kraken"}, RunMode.UTIL_EXCHANGE)
config.update({
config.update(
{
"days": 20,
"pairs": ["ETH/BTC", "XRP/BTC"],
"timeframes": ["5m", "1h"],
})
}
)
with pytest.raises(OperationalException, match=r"Historic klines not available for .*"):
download_data_main(config)

View File

@ -20,198 +20,228 @@ def entryexitanalysis_cleanup() -> None:
def test_backtest_analysis_nomock(default_conf, mocker, caplog, testdatadir, user_dir, capsys):
caplog.set_level(logging.INFO)
(user_dir / 'backtest_results').mkdir(parents=True, exist_ok=True)
(user_dir / "backtest_results").mkdir(parents=True, exist_ok=True)
default_conf.update({
default_conf.update(
{
"use_exit_signal": True,
"exit_profit_only": False,
"exit_profit_offset": 0.0,
"ignore_roi_if_entry_signal": False,
})
}
)
patch_exchange(mocker)
result1 = pd.DataFrame({'pair': ['ETH/BTC', 'LTC/BTC', 'ETH/BTC', 'LTC/BTC'],
'profit_ratio': [0.025, 0.05, -0.1, -0.05],
'profit_abs': [0.5, 2.0, -4.0, -2.0],
'open_date': pd.to_datetime(['2018-01-29 18:40:00',
'2018-01-30 03:30:00',
'2018-01-30 08:10:00',
'2018-01-31 13:30:00', ], utc=True
result1 = pd.DataFrame(
{
"pair": ["ETH/BTC", "LTC/BTC", "ETH/BTC", "LTC/BTC"],
"profit_ratio": [0.025, 0.05, -0.1, -0.05],
"profit_abs": [0.5, 2.0, -4.0, -2.0],
"open_date": pd.to_datetime(
[
"2018-01-29 18:40:00",
"2018-01-30 03:30:00",
"2018-01-30 08:10:00",
"2018-01-31 13:30:00",
],
utc=True,
),
'close_date': pd.to_datetime(['2018-01-29 20:45:00',
'2018-01-30 05:35:00',
'2018-01-30 09:10:00',
'2018-01-31 15:00:00', ], utc=True),
'trade_duration': [235, 40, 60, 90],
'is_open': [False, False, False, False],
'stake_amount': [0.01, 0.01, 0.01, 0.01],
'open_rate': [0.104445, 0.10302485, 0.10302485, 0.10302485],
'close_rate': [0.104969, 0.103541, 0.102041, 0.102541],
"close_date": pd.to_datetime(
[
"2018-01-29 20:45:00",
"2018-01-30 05:35:00",
"2018-01-30 09:10:00",
"2018-01-31 15:00:00",
],
utc=True,
),
"trade_duration": [235, 40, 60, 90],
"is_open": [False, False, False, False],
"stake_amount": [0.01, 0.01, 0.01, 0.01],
"open_rate": [0.104445, 0.10302485, 0.10302485, 0.10302485],
"close_rate": [0.104969, 0.103541, 0.102041, 0.102541],
"is_short": [False, False, False, False],
'enter_tag': ["enter_tag_long_a",
"enter_tag": [
"enter_tag_long_a",
"enter_tag_long_b",
"enter_tag_long_a",
"enter_tag_long_b"],
'exit_reason': [ExitType.ROI,
"enter_tag_long_b",
],
"exit_reason": [
ExitType.ROI,
ExitType.EXIT_SIGNAL,
ExitType.STOP_LOSS,
ExitType.TRAILING_STOP_LOSS]
})
backtestmock = MagicMock(side_effect=[
{
'results': result1,
'config': default_conf,
'locks': [],
'rejected_signals': 20,
'timedout_entry_orders': 0,
'timedout_exit_orders': 0,
'canceled_trade_entries': 0,
'canceled_entry_orders': 0,
'replaced_entry_orders': 0,
'final_balance': 1000,
ExitType.TRAILING_STOP_LOSS,
],
}
])
mocker.patch('freqtrade.plugins.pairlistmanager.PairListManager.whitelist',
PropertyMock(return_value=['ETH/BTC', 'LTC/BTC', 'DASH/BTC']))
mocker.patch('freqtrade.optimize.backtesting.Backtesting.backtest', backtestmock)
)
backtestmock = MagicMock(
side_effect=[
{
"results": result1,
"config": default_conf,
"locks": [],
"rejected_signals": 20,
"timedout_entry_orders": 0,
"timedout_exit_orders": 0,
"canceled_trade_entries": 0,
"canceled_entry_orders": 0,
"replaced_entry_orders": 0,
"final_balance": 1000,
}
]
)
mocker.patch(
"freqtrade.plugins.pairlistmanager.PairListManager.whitelist",
PropertyMock(return_value=["ETH/BTC", "LTC/BTC", "DASH/BTC"]),
)
mocker.patch("freqtrade.optimize.backtesting.Backtesting.backtest", backtestmock)
patched_configuration_load_config_file(mocker, default_conf)
args = [
'backtesting',
'--config', 'config.json',
'--datadir', str(testdatadir),
'--user-data-dir', str(user_dir),
'--timeframe', '5m',
'--timerange', '1515560100-1517287800',
'--export', 'signals',
'--cache', 'none',
"backtesting",
"--config",
"config.json",
"--datadir",
str(testdatadir),
"--user-data-dir",
str(user_dir),
"--timeframe",
"5m",
"--timerange",
"1515560100-1517287800",
"--export",
"signals",
"--cache",
"none",
]
args = get_args(args)
start_backtesting(args)
captured = capsys.readouterr()
assert 'BACKTESTING REPORT' in captured.out
assert 'EXIT REASON STATS' in captured.out
assert 'LEFT OPEN TRADES REPORT' in captured.out
assert "BACKTESTING REPORT" in captured.out
assert "EXIT REASON STATS" in captured.out
assert "LEFT OPEN TRADES REPORT" in captured.out
base_args = [
'backtesting-analysis',
'--config', 'config.json',
'--datadir', str(testdatadir),
'--user-data-dir', str(user_dir),
"backtesting-analysis",
"--config",
"config.json",
"--datadir",
str(testdatadir),
"--user-data-dir",
str(user_dir),
]
# test group 0 and indicator list
args = get_args(base_args +
['--analysis-groups', "0",
'--indicator-list', "close", "rsi", "profit_abs"]
args = get_args(
base_args + ["--analysis-groups", "0", "--indicator-list", "close", "rsi", "profit_abs"]
)
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'LTC/BTC' in captured.out
assert 'ETH/BTC' in captured.out
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' in captured.out
assert 'exit_signal' in captured.out
assert 'roi' in captured.out
assert 'stop_loss' in captured.out
assert 'trailing_stop_loss' in captured.out
assert '0.5' in captured.out
assert '-4' in captured.out
assert '-2' in captured.out
assert '-3.5' in captured.out
assert '50' in captured.out
assert '0' in captured.out
assert '0.01616' in captured.out
assert '34.049' in captured.out
assert '0.104411' in captured.out
assert '52.8292' in captured.out
assert "LTC/BTC" in captured.out
assert "ETH/BTC" in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" in captured.out
assert "exit_signal" in captured.out
assert "roi" in captured.out
assert "stop_loss" in captured.out
assert "trailing_stop_loss" in captured.out
assert "0.5" in captured.out
assert "-4" in captured.out
assert "-2" in captured.out
assert "-3.5" in captured.out
assert "50" in captured.out
assert "0" in captured.out
assert "0.01616" in captured.out
assert "34.049" in captured.out
assert "0.104411" in captured.out
assert "52.8292" in captured.out
# test group 1
args = get_args(base_args + ['--analysis-groups', "1"])
args = get_args(base_args + ["--analysis-groups", "1"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' in captured.out
assert 'total_profit_pct' in captured.out
assert '-3.5' in captured.out
assert '-1.75' in captured.out
assert '-7.5' in captured.out
assert '-3.75' in captured.out
assert '0' in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" in captured.out
assert "total_profit_pct" in captured.out
assert "-3.5" in captured.out
assert "-1.75" in captured.out
assert "-7.5" in captured.out
assert "-3.75" in captured.out
assert "0" in captured.out
# test group 2
args = get_args(base_args + ['--analysis-groups', "2"])
args = get_args(base_args + ["--analysis-groups", "2"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' in captured.out
assert 'exit_signal' in captured.out
assert 'roi' in captured.out
assert 'stop_loss' in captured.out
assert 'trailing_stop_loss' in captured.out
assert 'total_profit_pct' in captured.out
assert '-10' in captured.out
assert '-5' in captured.out
assert '2.5' in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" in captured.out
assert "exit_signal" in captured.out
assert "roi" in captured.out
assert "stop_loss" in captured.out
assert "trailing_stop_loss" in captured.out
assert "total_profit_pct" in captured.out
assert "-10" in captured.out
assert "-5" in captured.out
assert "2.5" in captured.out
# test group 3
args = get_args(base_args + ['--analysis-groups', "3"])
args = get_args(base_args + ["--analysis-groups", "3"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'LTC/BTC' in captured.out
assert 'ETH/BTC' in captured.out
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' in captured.out
assert 'total_profit_pct' in captured.out
assert '-7.5' in captured.out
assert '-3.75' in captured.out
assert '-1.75' in captured.out
assert '0' in captured.out
assert '2' in captured.out
assert "LTC/BTC" in captured.out
assert "ETH/BTC" in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" in captured.out
assert "total_profit_pct" in captured.out
assert "-7.5" in captured.out
assert "-3.75" in captured.out
assert "-1.75" in captured.out
assert "0" in captured.out
assert "2" in captured.out
# test group 4
args = get_args(base_args + ['--analysis-groups', "4"])
args = get_args(base_args + ["--analysis-groups", "4"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'LTC/BTC' in captured.out
assert 'ETH/BTC' in captured.out
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' in captured.out
assert 'exit_signal' in captured.out
assert 'roi' in captured.out
assert 'stop_loss' in captured.out
assert 'trailing_stop_loss' in captured.out
assert 'total_profit_pct' in captured.out
assert '-10' in captured.out
assert '-5' in captured.out
assert '-4' in captured.out
assert '0.5' in captured.out
assert '1' in captured.out
assert '2.5' in captured.out
assert "LTC/BTC" in captured.out
assert "ETH/BTC" in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" in captured.out
assert "exit_signal" in captured.out
assert "roi" in captured.out
assert "stop_loss" in captured.out
assert "trailing_stop_loss" in captured.out
assert "total_profit_pct" in captured.out
assert "-10" in captured.out
assert "-5" in captured.out
assert "-4" in captured.out
assert "0.5" in captured.out
assert "1" in captured.out
assert "2.5" in captured.out
# test group 5
args = get_args(base_args + ['--analysis-groups', "5"])
args = get_args(base_args + ["--analysis-groups", "5"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'exit_signal' in captured.out
assert 'roi' in captured.out
assert 'stop_loss' in captured.out
assert 'trailing_stop_loss' in captured.out
assert "exit_signal" in captured.out
assert "roi" in captured.out
assert "stop_loss" in captured.out
assert "trailing_stop_loss" in captured.out
# test date filtering
args = get_args(base_args +
['--analysis-groups', "0", "1", "2",
'--timerange', "20180129-20180130"]
args = get_args(
base_args + ["--analysis-groups", "0", "1", "2", "--timerange", "20180129-20180130"]
)
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'enter_tag_long_a' in captured.out
assert 'enter_tag_long_b' not in captured.out
assert "enter_tag_long_a" in captured.out
assert "enter_tag_long_b" not in captured.out
# Due to the backtest mock, there's no rejected signals generated.
args = get_args(base_args + ['--rejected-signals'])
args = get_args(base_args + ["--rejected-signals"])
start_analysis_entries_exits(args)
captured = capsys.readouterr()
assert 'no rejected signals' in captured.out
assert "no rejected signals" in captured.out

View File

@ -50,7 +50,7 @@ def _clean_test_file(file: Path) -> None:
:param file: complete path to the file
:return: None
"""
file_swp = Path(str(file) + '.swp')
file_swp = Path(str(file) + ".swp")
# 1. Delete file from the test
if file.is_file():
file.unlink()
@ -61,96 +61,108 @@ def _clean_test_file(file: Path) -> None:
def test_load_data_30min_timeframe(caplog, testdatadir) -> None:
ld = load_pair_history(pair='UNITTEST/BTC', timeframe='30m', datadir=testdatadir)
ld = load_pair_history(pair="UNITTEST/BTC", timeframe="30m", datadir=testdatadir)
assert isinstance(ld, DataFrame)
assert not log_has(
'Download history data for pair: "UNITTEST/BTC", timeframe: 30m '
'and store in None.', caplog
'Download history data for pair: "UNITTEST/BTC", timeframe: 30m ' "and store in None.",
caplog,
)
def test_load_data_7min_timeframe(caplog, testdatadir) -> None:
ld = load_pair_history(pair='UNITTEST/BTC', timeframe='7m', datadir=testdatadir)
ld = load_pair_history(pair="UNITTEST/BTC", timeframe="7m", datadir=testdatadir)
assert isinstance(ld, DataFrame)
assert ld.empty
assert log_has(
'No history for UNITTEST/BTC, spot, 7m found. '
'Use `freqtrade download-data` to download the data', caplog
"No history for UNITTEST/BTC, spot, 7m found. "
"Use `freqtrade download-data` to download the data",
caplog,
)
def test_load_data_1min_timeframe(ohlcv_history, mocker, caplog, testdatadir) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history)
file = testdatadir / 'UNITTEST_BTC-1m.feather'
load_data(datadir=testdatadir, timeframe='1m', pairs=['UNITTEST/BTC'])
mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history)
file = testdatadir / "UNITTEST_BTC-1m.feather"
load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"])
assert file.is_file()
assert not log_has(
'Download history data for pair: "UNITTEST/BTC", interval: 1m '
'and store in None.', caplog
'Download history data for pair: "UNITTEST/BTC", interval: 1m ' "and store in None.", caplog
)
def test_load_data_mark(ohlcv_history, mocker, caplog, testdatadir) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history)
file = testdatadir / 'futures/UNITTEST_USDT_USDT-1h-mark.feather'
load_data(datadir=testdatadir, timeframe='1h', pairs=['UNITTEST/BTC'], candle_type='mark')
mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history)
file = testdatadir / "futures/UNITTEST_USDT_USDT-1h-mark.feather"
load_data(datadir=testdatadir, timeframe="1h", pairs=["UNITTEST/BTC"], candle_type="mark")
assert file.is_file()
assert not log_has(
'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m '
'and store in None.', caplog
'Download history data for pair: "UNITTEST/USDT:USDT", interval: 1m ' "and store in None.",
caplog,
)
def test_load_data_startup_candles(mocker, testdatadir) -> None:
ltfmock = mocker.patch(
'freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load',
MagicMock(return_value=DataFrame()))
timerange = TimeRange('date', None, 1510639620, 0)
load_pair_history(pair='UNITTEST/BTC', timeframe='1m',
datadir=testdatadir, timerange=timerange,
startup_candles=20,)
"freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler._ohlcv_load",
MagicMock(return_value=DataFrame()),
)
timerange = TimeRange("date", None, 1510639620, 0)
load_pair_history(
pair="UNITTEST/BTC",
timeframe="1m",
datadir=testdatadir,
timerange=timerange,
startup_candles=20,
)
assert ltfmock.call_count == 1
assert ltfmock.call_args_list[0][1]['timerange'] != timerange
assert ltfmock.call_args_list[0][1]["timerange"] != timerange
# startts is 20 minutes earlier
assert ltfmock.call_args_list[0][1]['timerange'].startts == timerange.startts - 20 * 60
assert ltfmock.call_args_list[0][1]["timerange"].startts == timerange.startts - 20 * 60
@pytest.mark.parametrize('candle_type', ['mark', ''])
def test_load_data_with_new_pair_1min(ohlcv_history_list, mocker, caplog,
default_conf, tmp_path, candle_type) -> None:
@pytest.mark.parametrize("candle_type", ["mark", ""])
def test_load_data_with_new_pair_1min(
ohlcv_history_list, mocker, caplog, default_conf, tmp_path, candle_type
) -> None:
"""
Test load_pair_history() with 1 min timeframe
"""
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list)
mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history_list)
exchange = get_patched_exchange(mocker, default_conf)
file = tmp_path / 'MEME_BTC-1m.feather'
file = tmp_path / "MEME_BTC-1m.feather"
# do not download a new pair if refresh_pairs isn't set
load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type)
load_pair_history(datadir=tmp_path, timeframe="1m", pair="MEME/BTC", candle_type=candle_type)
assert not file.is_file()
assert log_has(
f"No history for MEME/BTC, {candle_type}, 1m found. "
"Use `freqtrade download-data` to download the data", caplog
"Use `freqtrade download-data` to download the data",
caplog,
)
# download a new pair if refresh_pairs is set
refresh_data(datadir=tmp_path, timeframe='1m', pairs=['MEME/BTC'],
exchange=exchange, candle_type=CandleType.SPOT
refresh_data(
datadir=tmp_path,
timeframe="1m",
pairs=["MEME/BTC"],
exchange=exchange,
candle_type=CandleType.SPOT,
)
load_pair_history(datadir=tmp_path, timeframe='1m', pair='MEME/BTC', candle_type=candle_type)
load_pair_history(datadir=tmp_path, timeframe="1m", pair="MEME/BTC", candle_type=candle_type)
assert file.is_file()
assert log_has_re(
r'\(0/1\) - Download history data for "MEME/BTC", 1m, '
r'spot and store in .*', caplog
r'\(0/1\) - Download history data for "MEME/BTC", 1m, ' r"spot and store in .*", caplog
)
def test_testdata_path(testdatadir) -> None:
assert str(Path('tests') / 'testdata') in str(testdatadir)
assert str(Path("tests") / "testdata") in str(testdatadir)
@pytest.mark.parametrize("pair,timeframe,expected_result,candle_type", [
@pytest.mark.parametrize(
"pair,timeframe,expected_result,candle_type",
[
("ETH/BTC", "5m", "freqtrade/hello/world/ETH_BTC-5m.json", ""),
("ETH/USDT", "1M", "freqtrade/hello/world/ETH_USDT-1Mo.json", ""),
("Fabric Token/ETH", "5m", "freqtrade/hello/world/Fabric_Token_ETH-5m.json", ""),
@ -160,82 +172,86 @@ def test_testdata_path(testdatadir) -> None:
("ACC_OLD/BTC", "5m", "freqtrade/hello/world/ACC_OLD_BTC-5m.json", ""),
("ETH/BTC", "5m", "freqtrade/hello/world/futures/ETH_BTC-5m-mark.json", "mark"),
("ACC_OLD/BTC", "5m", "freqtrade/hello/world/futures/ACC_OLD_BTC-5m-index.json", "index"),
])
],
)
def test_json_pair_data_filename(pair, timeframe, expected_result, candle_type):
fn = JsonDataHandler._pair_data_filename(
Path('freqtrade/hello/world'),
pair,
timeframe,
CandleType.from_string(candle_type)
Path("freqtrade/hello/world"), pair, timeframe, CandleType.from_string(candle_type)
)
assert isinstance(fn, Path)
assert fn == Path(expected_result)
fn = JsonGzDataHandler._pair_data_filename(
Path('freqtrade/hello/world'),
Path("freqtrade/hello/world"),
pair,
timeframe,
candle_type=CandleType.from_string(candle_type)
candle_type=CandleType.from_string(candle_type),
)
assert isinstance(fn, Path)
assert fn == Path(expected_result + '.gz')
assert fn == Path(expected_result + ".gz")
@pytest.mark.parametrize("pair,trading_mode,expected_result", [
("ETH/BTC", '', 'freqtrade/hello/world/ETH_BTC-trades.json'),
("ETH/USDT:USDT", 'futures', 'freqtrade/hello/world/futures/ETH_USDT_USDT-trades.json'),
("Fabric Token/ETH", '', 'freqtrade/hello/world/Fabric_Token_ETH-trades.json'),
("ETHH20", '', 'freqtrade/hello/world/ETHH20-trades.json'),
(".XBTBON2H", '', 'freqtrade/hello/world/_XBTBON2H-trades.json'),
("ETHUSD.d", '', 'freqtrade/hello/world/ETHUSD_d-trades.json'),
("ACC_OLD_BTC", '', 'freqtrade/hello/world/ACC_OLD_BTC-trades.json'),
])
@pytest.mark.parametrize(
"pair,trading_mode,expected_result",
[
("ETH/BTC", "", "freqtrade/hello/world/ETH_BTC-trades.json"),
("ETH/USDT:USDT", "futures", "freqtrade/hello/world/futures/ETH_USDT_USDT-trades.json"),
("Fabric Token/ETH", "", "freqtrade/hello/world/Fabric_Token_ETH-trades.json"),
("ETHH20", "", "freqtrade/hello/world/ETHH20-trades.json"),
(".XBTBON2H", "", "freqtrade/hello/world/_XBTBON2H-trades.json"),
("ETHUSD.d", "", "freqtrade/hello/world/ETHUSD_d-trades.json"),
("ACC_OLD_BTC", "", "freqtrade/hello/world/ACC_OLD_BTC-trades.json"),
],
)
def test_json_pair_trades_filename(pair, trading_mode, expected_result):
fn = JsonDataHandler._pair_trades_filename(Path('freqtrade/hello/world'), pair, trading_mode)
fn = JsonDataHandler._pair_trades_filename(Path("freqtrade/hello/world"), pair, trading_mode)
assert isinstance(fn, Path)
assert fn == Path(expected_result)
fn = JsonGzDataHandler._pair_trades_filename(Path('freqtrade/hello/world'), pair, trading_mode)
fn = JsonGzDataHandler._pair_trades_filename(Path("freqtrade/hello/world"), pair, trading_mode)
assert isinstance(fn, Path)
assert fn == Path(expected_result + '.gz')
assert fn == Path(expected_result + ".gz")
def test_load_cached_data_for_updating(mocker, testdatadir) -> None:
data_handler = get_datahandler(testdatadir, 'json')
data_handler = get_datahandler(testdatadir, "json")
test_data = None
test_filename = testdatadir.joinpath('UNITTEST_BTC-1m.json')
test_filename = testdatadir.joinpath("UNITTEST_BTC-1m.json")
with test_filename.open("rt") as file:
test_data = json.load(file)
test_data_df = ohlcv_to_dataframe(test_data, '1m', 'UNITTEST/BTC',
fill_missing=False, drop_incomplete=False)
test_data_df = ohlcv_to_dataframe(
test_data, "1m", "UNITTEST/BTC", fill_missing=False, drop_incomplete=False
)
# now = last cached item + 1 hour
now_ts = test_data[-1][0] / 1000 + 60 * 60
# timeframe starts earlier than the cached data
# should fully update data
timerange = TimeRange('date', None, test_data[0][0] / 1000 - 1, 0)
timerange = TimeRange("date", None, test_data[0][0] / 1000 - 1, 0)
data, start_ts, end_ts = _load_cached_data_for_updating(
'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT)
"UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT
)
assert data.empty
assert start_ts == test_data[0][0] - 1000
assert end_ts is None
# timeframe starts earlier than the cached data - prepending
timerange = TimeRange('date', None, test_data[0][0] / 1000 - 1, 0)
timerange = TimeRange("date", None, test_data[0][0] / 1000 - 1, 0)
data, start_ts, end_ts = _load_cached_data_for_updating(
'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT, True)
"UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT, True
)
assert_frame_equal(data, test_data_df.iloc[:-1])
assert start_ts == test_data[0][0] - 1000
assert end_ts == test_data[0][0]
# timeframe starts in the center of the cached data
# should return the cached data w/o the last item
timerange = TimeRange('date', None, test_data[0][0] / 1000 + 1, 0)
timerange = TimeRange("date", None, test_data[0][0] / 1000 + 1, 0)
data, start_ts, end_ts = _load_cached_data_for_updating(
'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT)
"UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT
)
assert_frame_equal(data, test_data_df.iloc[:-1])
assert test_data[-2][0] <= start_ts < test_data[-1][0]
@ -243,27 +259,30 @@ def test_load_cached_data_for_updating(mocker, testdatadir) -> None:
# timeframe starts after the cached data
# should return the cached data w/o the last item
timerange = TimeRange('date', None, test_data[-1][0] / 1000 + 100, 0)
timerange = TimeRange("date", None, test_data[-1][0] / 1000 + 100, 0)
data, start_ts, end_ts = _load_cached_data_for_updating(
'UNITTEST/BTC', '1m', timerange, data_handler, CandleType.SPOT)
"UNITTEST/BTC", "1m", timerange, data_handler, CandleType.SPOT
)
assert_frame_equal(data, test_data_df.iloc[:-1])
assert test_data[-2][0] <= start_ts < test_data[-1][0]
assert end_ts is None
# no datafile exist
# should return timestamp start time
timerange = TimeRange('date', None, now_ts - 10000, 0)
timerange = TimeRange("date", None, now_ts - 10000, 0)
data, start_ts, end_ts = _load_cached_data_for_updating(
'NONEXIST/BTC', '1m', timerange, data_handler, CandleType.SPOT)
"NONEXIST/BTC", "1m", timerange, data_handler, CandleType.SPOT
)
assert data.empty
assert start_ts == (now_ts - 10000) * 1000
assert end_ts is None
# no datafile exist
# should return timestamp start and end time time
timerange = TimeRange('date', 'date', now_ts - 1000000, now_ts - 100000)
timerange = TimeRange("date", "date", now_ts - 1000000, now_ts - 100000)
data, start_ts, end_ts = _load_cached_data_for_updating(
'NONEXIST/BTC', '1m', timerange, data_handler, CandleType.SPOT)
"NONEXIST/BTC", "1m", timerange, data_handler, CandleType.SPOT
)
assert data.empty
assert start_ts == (now_ts - 1000000) * 1000
assert end_ts == (now_ts - 100000) * 1000
@ -271,43 +290,43 @@ def test_load_cached_data_for_updating(mocker, testdatadir) -> None:
# no datafile exist, no timeframe is set
# should return an empty array and None
data, start_ts, end_ts = _load_cached_data_for_updating(
'NONEXIST/BTC', '1m', None, data_handler, CandleType.SPOT)
"NONEXIST/BTC", "1m", None, data_handler, CandleType.SPOT
)
assert data.empty
assert start_ts is None
assert end_ts is None
@pytest.mark.parametrize('candle_type,subdir,file_tail', [
('mark', 'futures/', '-mark'),
('spot', '', ''),
])
@pytest.mark.parametrize(
"candle_type,subdir,file_tail",
[
("mark", "futures/", "-mark"),
("spot", "", ""),
],
)
def test_download_pair_history(
ohlcv_history_list,
mocker,
default_conf,
tmp_path,
candle_type,
subdir,
file_tail
ohlcv_history_list, mocker, default_conf, tmp_path, candle_type, subdir, file_tail
) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=ohlcv_history_list)
mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=ohlcv_history_list)
exchange = get_patched_exchange(mocker, default_conf)
file1_1 = tmp_path / f'{subdir}MEME_BTC-1m{file_tail}.feather'
file1_5 = tmp_path / f'{subdir}MEME_BTC-5m{file_tail}.feather'
file2_1 = tmp_path / f'{subdir}CFI_BTC-1m{file_tail}.feather'
file2_5 = tmp_path / f'{subdir}CFI_BTC-5m{file_tail}.feather'
file1_1 = tmp_path / f"{subdir}MEME_BTC-1m{file_tail}.feather"
file1_5 = tmp_path / f"{subdir}MEME_BTC-5m{file_tail}.feather"
file2_1 = tmp_path / f"{subdir}CFI_BTC-1m{file_tail}.feather"
file2_5 = tmp_path / f"{subdir}CFI_BTC-5m{file_tail}.feather"
assert not file1_1.is_file()
assert not file2_1.is_file()
assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC',
timeframe='1m',
candle_type=candle_type)
assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='CFI/BTC',
timeframe='1m',
candle_type=candle_type)
assert _download_pair_history(
datadir=tmp_path,
exchange=exchange,
pair="MEME/BTC",
timeframe="1m",
candle_type=candle_type,
)
assert _download_pair_history(
datadir=tmp_path, exchange=exchange, pair="CFI/BTC", timeframe="1m", candle_type=candle_type
)
assert not exchange._pairs_last_refresh_time
assert file1_1.is_file()
assert file2_1.is_file()
@ -319,14 +338,16 @@ def test_download_pair_history(
assert not file1_5.is_file()
assert not file2_5.is_file()
assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC',
timeframe='5m',
candle_type=candle_type)
assert _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='CFI/BTC',
timeframe='5m',
candle_type=candle_type)
assert _download_pair_history(
datadir=tmp_path,
exchange=exchange,
pair="MEME/BTC",
timeframe="5m",
candle_type=candle_type,
)
assert _download_pair_history(
datadir=tmp_path, exchange=exchange, pair="CFI/BTC", timeframe="5m", candle_type=candle_type
)
assert not exchange._pairs_last_refresh_time
assert file1_5.is_file()
assert file2_5.is_file()
@ -335,30 +356,45 @@ def test_download_pair_history(
def test_download_pair_history2(mocker, default_conf, testdatadir) -> None:
tick = [
[1509836520000, 0.00162008, 0.00162008, 0.00162008, 0.00162008, 108.14853839],
[1509836580000, 0.00161, 0.00161, 0.00161, 0.00161, 82.390199]
[1509836580000, 0.00161, 0.00161, 0.00161, 0.00161, 82.390199],
]
json_dump_mock = mocker.patch(
'freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler.ohlcv_store',
return_value=None)
mocker.patch(f'{EXMS}.get_historic_ohlcv', return_value=tick)
"freqtrade.data.history.datahandlers.featherdatahandler.FeatherDataHandler.ohlcv_store",
return_value=None,
)
mocker.patch(f"{EXMS}.get_historic_ohlcv", return_value=tick)
exchange = get_patched_exchange(mocker, default_conf)
_download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/BTC",
timeframe='1m', candle_type='spot')
_download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/BTC",
timeframe='3m', candle_type='spot')
_download_pair_history(datadir=testdatadir, exchange=exchange, pair="UNITTEST/USDT",
timeframe='1h', candle_type='mark')
_download_pair_history(
datadir=testdatadir,
exchange=exchange,
pair="UNITTEST/BTC",
timeframe="1m",
candle_type="spot",
)
_download_pair_history(
datadir=testdatadir,
exchange=exchange,
pair="UNITTEST/BTC",
timeframe="3m",
candle_type="spot",
)
_download_pair_history(
datadir=testdatadir,
exchange=exchange,
pair="UNITTEST/USDT",
timeframe="1h",
candle_type="mark",
)
assert json_dump_mock.call_count == 3
def test_download_backtesting_data_exception(mocker, caplog, default_conf, tmp_path) -> None:
mocker.patch(f'{EXMS}.get_historic_ohlcv',
side_effect=Exception('File Error'))
mocker.patch(f"{EXMS}.get_historic_ohlcv", side_effect=Exception("File Error"))
exchange = get_patched_exchange(mocker, default_conf)
assert not _download_pair_history(datadir=tmp_path, exchange=exchange,
pair='MEME/BTC',
timeframe='1m', candle_type='spot')
assert not _download_pair_history(
datadir=tmp_path, exchange=exchange, pair="MEME/BTC", timeframe="1m", candle_type="spot"
)
assert log_has('Failed to download history data for pair: "MEME/BTC", timeframe: 1m.', caplog)
@ -366,41 +402,46 @@ def test_load_partial_missing(testdatadir, caplog) -> None:
# Make sure we start fresh - test missing data at start
start = dt_utc(2018, 1, 1)
end = dt_utc(2018, 1, 11)
data = load_data(testdatadir, '5m', ['UNITTEST/BTC'], startup_candles=20,
timerange=TimeRange('date', 'date', start.timestamp(), end.timestamp()))
assert log_has(
'Using indicator startup period: 20 ...', caplog
data = load_data(
testdatadir,
"5m",
["UNITTEST/BTC"],
startup_candles=20,
timerange=TimeRange("date", "date", start.timestamp(), end.timestamp()),
)
assert log_has("Using indicator startup period: 20 ...", caplog)
# timedifference in 5 minutes
td = ((end - start).total_seconds() // 60 // 5) + 1
assert td != len(data['UNITTEST/BTC'])
start_real = data['UNITTEST/BTC'].iloc[0, 0]
assert log_has(f'UNITTEST/BTC, spot, 5m, '
f'data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}',
caplog)
assert td != len(data["UNITTEST/BTC"])
start_real = data["UNITTEST/BTC"].iloc[0, 0]
assert log_has(
f"UNITTEST/BTC, spot, 5m, " f"data starts at {start_real.strftime(DATETIME_PRINT_FORMAT)}",
caplog,
)
# Make sure we start fresh - test missing data at end
caplog.clear()
start = dt_utc(2018, 1, 10)
end = dt_utc(2018, 2, 20)
data = load_data(datadir=testdatadir, timeframe='5m', pairs=['UNITTEST/BTC'],
timerange=TimeRange('date', 'date', start.timestamp(), end.timestamp()))
data = load_data(
datadir=testdatadir,
timeframe="5m",
pairs=["UNITTEST/BTC"],
timerange=TimeRange("date", "date", start.timestamp(), end.timestamp()),
)
# timedifference in 5 minutes
td = ((end - start).total_seconds() // 60 // 5) + 1
assert td != len(data['UNITTEST/BTC'])
assert td != len(data["UNITTEST/BTC"])
# Shift endtime with +5
end_real = data['UNITTEST/BTC'].iloc[-1, 0].to_pydatetime()
assert log_has(f'UNITTEST/BTC, spot, 5m, '
f'data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}',
caplog)
end_real = data["UNITTEST/BTC"].iloc[-1, 0].to_pydatetime()
assert log_has(
f"UNITTEST/BTC, spot, 5m, " f"data ends at {end_real.strftime(DATETIME_PRINT_FORMAT)}",
caplog,
)
def test_init(default_conf) -> None:
assert {} == load_data(
datadir=Path(),
pairs=[],
timeframe=default_conf['timeframe']
)
assert {} == load_data(datadir=Path(), pairs=[], timeframe=default_conf["timeframe"])
def test_init_with_refresh(default_conf, mocker) -> None:
@ -408,20 +449,16 @@ def test_init_with_refresh(default_conf, mocker) -> None:
refresh_data(
datadir=Path(),
pairs=[],
timeframe=default_conf['timeframe'],
timeframe=default_conf["timeframe"],
exchange=exchange,
candle_type=CandleType.SPOT
)
assert {} == load_data(
datadir=Path(),
pairs=[],
timeframe=default_conf['timeframe']
candle_type=CandleType.SPOT,
)
assert {} == load_data(datadir=Path(), pairs=[], timeframe=default_conf["timeframe"])
def test_file_dump_json_tofile(testdatadir) -> None:
file = testdatadir / f'test_{uuid.uuid4()}.json'
data = {'bar': 'foo'}
file = testdatadir / f"test_{uuid.uuid4()}.json"
data = {"bar": "foo"}
# check the file we will create does not exist
assert not file.is_file()
@ -436,8 +473,8 @@ def test_file_dump_json_tofile(testdatadir) -> None:
with file.open() as data_file:
json_from_file = json.load(data_file)
assert 'bar' in json_from_file
assert json_from_file['bar'] == 'foo'
assert "bar" in json_from_file
assert json_from_file["bar"] == "foo"
# Remove the file
_clean_test_file(file)
@ -446,112 +483,115 @@ def test_file_dump_json_tofile(testdatadir) -> None:
def test_get_timerange(default_conf, mocker, testdatadir) -> None:
patch_exchange(mocker)
default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
default_conf.update({"strategy": CURRENT_TEST_STRATEGY})
strategy = StrategyResolver.load_strategy(default_conf)
data = strategy.advise_all_indicators(
load_data(
datadir=testdatadir,
timeframe='1m',
pairs=['UNITTEST/BTC']
)
load_data(datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"])
)
min_date, max_date = get_timerange(data)
assert min_date.isoformat() == '2017-11-04T23:02:00+00:00'
assert max_date.isoformat() == '2017-11-14T22:59:00+00:00'
assert min_date.isoformat() == "2017-11-04T23:02:00+00:00"
assert max_date.isoformat() == "2017-11-14T22:59:00+00:00"
def test_validate_backtest_data_warn(default_conf, mocker, caplog, testdatadir) -> None:
patch_exchange(mocker)
default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
default_conf.update({"strategy": CURRENT_TEST_STRATEGY})
strategy = StrategyResolver.load_strategy(default_conf)
data = strategy.advise_all_indicators(
load_data(
datadir=testdatadir,
timeframe='1m',
pairs=['UNITTEST/BTC'],
fill_up_missing=False
datadir=testdatadir, timeframe="1m", pairs=["UNITTEST/BTC"], fill_up_missing=False
)
)
min_date, max_date = get_timerange(data)
caplog.clear()
assert validate_backtest_data(data['UNITTEST/BTC'], 'UNITTEST/BTC',
min_date, max_date, timeframe_to_minutes('1m'))
assert validate_backtest_data(
data["UNITTEST/BTC"], "UNITTEST/BTC", min_date, max_date, timeframe_to_minutes("1m")
)
assert len(caplog.record_tuples) == 1
assert log_has(
"UNITTEST/BTC has missing frames: expected 14397, got 13681, that's 716 missing values",
caplog)
caplog,
)
def test_validate_backtest_data(default_conf, mocker, caplog, testdatadir) -> None:
patch_exchange(mocker)
default_conf.update({'strategy': CURRENT_TEST_STRATEGY})
default_conf.update({"strategy": CURRENT_TEST_STRATEGY})
strategy = StrategyResolver.load_strategy(default_conf)
timerange = TimeRange()
data = strategy.advise_all_indicators(
load_data(
datadir=testdatadir,
timeframe='5m',
pairs=['UNITTEST/BTC'],
timerange=timerange
)
load_data(datadir=testdatadir, timeframe="5m", pairs=["UNITTEST/BTC"], timerange=timerange)
)
min_date, max_date = get_timerange(data)
caplog.clear()
assert not validate_backtest_data(data['UNITTEST/BTC'], 'UNITTEST/BTC',
min_date, max_date, timeframe_to_minutes('5m'))
assert not validate_backtest_data(
data["UNITTEST/BTC"], "UNITTEST/BTC", min_date, max_date, timeframe_to_minutes("5m")
)
assert len(caplog.record_tuples) == 0
@pytest.mark.parametrize('trademode,callcount', [
('spot', 4),
('margin', 4),
('futures', 8), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls
])
@pytest.mark.parametrize(
"trademode,callcount",
[
("spot", 4),
("margin", 4),
("futures", 8), # Called 8 times - 4 normal, 2 funding and 2 mark/index calls
],
)
def test_refresh_backtest_ohlcv_data(
mocker, default_conf, markets, caplog, testdatadir, trademode, callcount):
mocker, default_conf, markets, caplog, testdatadir, trademode, callcount
):
caplog.set_level(logging.DEBUG)
dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history')
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
dl_mock = mocker.patch("freqtrade.data.history.history_utils._download_pair_history")
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets))
mocker.patch.object(Path, "exists", MagicMock(return_value=True))
mocker.patch.object(Path, "unlink", MagicMock())
default_conf['trading_mode'] = trademode
default_conf["trading_mode"] = trademode
ex = get_patched_exchange(mocker, default_conf, id='bybit')
ex = get_patched_exchange(mocker, default_conf, id="bybit")
timerange = TimeRange.parse_timerange("20190101-20190102")
refresh_backtest_ohlcv_data(exchange=ex, pairs=["ETH/BTC", "XRP/BTC"],
timeframes=["1m", "5m"], datadir=testdatadir,
timerange=timerange, erase=True,
trading_mode=trademode
refresh_backtest_ohlcv_data(
exchange=ex,
pairs=["ETH/BTC", "XRP/BTC"],
timeframes=["1m", "5m"],
datadir=testdatadir,
timerange=timerange,
erase=True,
trading_mode=trademode,
)
assert dl_mock.call_count == callcount
assert dl_mock.call_args[1]['timerange'].starttype == 'date'
assert dl_mock.call_args[1]["timerange"].starttype == "date"
assert log_has_re(r"Downloading pair ETH/BTC, .* interval 1m\.", caplog)
if trademode == 'futures':
if trademode == "futures":
assert log_has_re(r"Downloading pair ETH/BTC, funding_rate, interval 8h\.", caplog)
assert log_has_re(r"Downloading pair ETH/BTC, mark, interval 4h\.", caplog)
def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir):
dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_pair_history',
MagicMock())
dl_mock = mocker.patch(
"freqtrade.data.history.history_utils._download_pair_history", MagicMock()
)
ex = get_patched_exchange(mocker, default_conf)
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value={}))
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value={}))
timerange = TimeRange.parse_timerange("20190101-20190102")
unav_pairs = refresh_backtest_ohlcv_data(exchange=ex, pairs=["BTT/BTC", "LTC/USDT"],
unav_pairs = refresh_backtest_ohlcv_data(
exchange=ex,
pairs=["BTT/BTC", "LTC/USDT"],
timeframes=["1m", "5m"],
datadir=testdatadir,
timerange=timerange, erase=False,
trading_mode='spot'
timerange=timerange,
erase=False,
trading_mode="spot",
)
assert dl_mock.call_count == 0
@ -561,90 +601,104 @@ def test_download_data_no_markets(mocker, default_conf, caplog, testdatadir):
def test_refresh_backtest_trades_data(mocker, default_conf, markets, caplog, testdatadir):
dl_mock = mocker.patch('freqtrade.data.history.history_utils._download_trades_history',
MagicMock())
mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=markets))
dl_mock = mocker.patch(
"freqtrade.data.history.history_utils._download_trades_history", MagicMock()
)
mocker.patch(f"{EXMS}.markets", PropertyMock(return_value=markets))
mocker.patch.object(Path, "exists", MagicMock(return_value=True))
mocker.patch.object(Path, "unlink", MagicMock())
ex = get_patched_exchange(mocker, default_conf)
timerange = TimeRange.parse_timerange("20190101-20190102")
unavailable_pairs = refresh_backtest_trades_data(exchange=ex,
unavailable_pairs = refresh_backtest_trades_data(
exchange=ex,
pairs=["ETH/BTC", "XRP/BTC", "XRP/ETH"],
datadir=testdatadir,
timerange=timerange, erase=True,
timerange=timerange,
erase=True,
trading_mode=TradingMode.SPOT,
)
assert dl_mock.call_count == 2
assert dl_mock.call_args[1]['timerange'].starttype == 'date'
assert dl_mock.call_args[1]["timerange"].starttype == "date"
assert log_has("Downloading trades for pair ETH/BTC.", caplog)
assert unavailable_pairs == ["XRP/ETH"]
assert log_has("Skipping pair XRP/ETH...", caplog)
def test_download_trades_history(trades_history, mocker, default_conf, testdatadir, caplog,
tmp_path, time_machine) -> None:
def test_download_trades_history(
trades_history, mocker, default_conf, testdatadir, caplog, tmp_path, time_machine
) -> None:
start_dt = dt_utc(2023, 1, 1)
time_machine.move_to(start_dt, tick=False)
ght_mock = MagicMock(side_effect=lambda pair, *args, **kwargs: (pair, trades_history))
mocker.patch(f'{EXMS}.get_historic_trades', ght_mock)
mocker.patch(f"{EXMS}.get_historic_trades", ght_mock)
exchange = get_patched_exchange(mocker, default_conf)
file1 = tmp_path / 'ETH_BTC-trades.json.gz'
data_handler = get_datahandler(tmp_path, data_format='jsongz')
file1 = tmp_path / "ETH_BTC-trades.json.gz"
data_handler = get_datahandler(tmp_path, data_format="jsongz")
assert not file1.is_file()
assert _download_trades_history(data_handler=data_handler, exchange=exchange,
pair='ETH/BTC', trading_mode=TradingMode.SPOT)
assert _download_trades_history(
data_handler=data_handler, exchange=exchange, pair="ETH/BTC", trading_mode=TradingMode.SPOT
)
assert log_has("Current Amount of trades: 0", caplog)
assert log_has("New Amount of trades: 6", caplog)
assert ght_mock.call_count == 1
# Default "since" - 30 days before current day.
assert ght_mock.call_args_list[0][1]['since'] == dt_ts(start_dt - timedelta(days=30))
assert ght_mock.call_args_list[0][1]["since"] == dt_ts(start_dt - timedelta(days=30))
assert file1.is_file()
caplog.clear()
ght_mock.reset_mock()
since_time = int(trades_history[-3][0] // 1000)
since_time2 = int(trades_history[-1][0] // 1000)
timerange = TimeRange('date', None, since_time, 0)
timerange = TimeRange("date", None, since_time, 0)
assert _download_trades_history(
data_handler=data_handler, exchange=exchange, pair='ETH/BTC',
timerange=timerange, trading_mode=TradingMode.SPOT)
data_handler=data_handler,
exchange=exchange,
pair="ETH/BTC",
timerange=timerange,
trading_mode=TradingMode.SPOT,
)
assert ght_mock.call_count == 1
# Check this in seconds - since we had to convert to seconds above too.
assert int(ght_mock.call_args_list[0][1]['since'] // 1000) == since_time2 - 5
assert ght_mock.call_args_list[0][1]['from_id'] is not None
assert int(ght_mock.call_args_list[0][1]["since"] // 1000) == since_time2 - 5
assert ght_mock.call_args_list[0][1]["from_id"] is not None
file1.unlink()
mocker.patch(f'{EXMS}.get_historic_trades', MagicMock(side_effect=ValueError))
mocker.patch(f"{EXMS}.get_historic_trades", MagicMock(side_effect=ValueError))
caplog.clear()
assert not _download_trades_history(data_handler=data_handler, exchange=exchange,
pair='ETH/BTC', trading_mode=TradingMode.SPOT)
assert not _download_trades_history(
data_handler=data_handler, exchange=exchange, pair="ETH/BTC", trading_mode=TradingMode.SPOT
)
assert log_has_re('Failed to download historic trades for pair: "ETH/BTC".*', caplog)
file2 = tmp_path / 'XRP_ETH-trades.json.gz'
file2 = tmp_path / "XRP_ETH-trades.json.gz"
copyfile(testdatadir / file2.name, file2)
ght_mock.reset_mock()
mocker.patch(f'{EXMS}.get_historic_trades', ght_mock)
mocker.patch(f"{EXMS}.get_historic_trades", ght_mock)
# Since before first start date
since_time = int(trades_history[0][0] // 1000) - 500
timerange = TimeRange('date', None, since_time, 0)
timerange = TimeRange("date", None, since_time, 0)
assert _download_trades_history(
data_handler=data_handler, exchange=exchange, pair='XRP/ETH',
timerange=timerange, trading_mode=TradingMode.SPOT)
data_handler=data_handler,
exchange=exchange,
pair="XRP/ETH",
timerange=timerange,
trading_mode=TradingMode.SPOT,
)
assert ght_mock.call_count == 1
assert int(ght_mock.call_args_list[0][1]['since'] // 1000) == since_time
assert ght_mock.call_args_list[0][1]['from_id'] is None
assert log_has_re(r'Start .* earlier than available data. Redownloading trades for.*', caplog)
assert int(ght_mock.call_args_list[0][1]["since"] // 1000) == since_time
assert ght_mock.call_args_list[0][1]["from_id"] is None
assert log_has_re(r"Start .* earlier than available data. Redownloading trades for.*", caplog)
_clean_test_file(file2)