freqtrade_origin/en/2024.3/freqai-reinforcement-learning/index.html

2024 lines
77 KiB
HTML

<!doctype html>
<html lang="en" class="no-js">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<link rel="canonical" href="https://www.freqtrade.io/2024.3/freqai-reinforcement-learning/">
<link rel="prev" href="../freqai-running/">
<link rel="next" href="../freqai-developers/">
<link rel="icon" href="../images/logo.png">
<meta name="generator" content="mkdocs-1.5.3, mkdocs-material-9.5.15">
<title>Reinforcement Learning - Freqtrade</title>
<link rel="stylesheet" href="../assets/stylesheets/main.7e359304.min.css">
<link rel="stylesheet" href="../assets/stylesheets/palette.06af60db.min.css">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback">
<style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
<link rel="stylesheet" href="../stylesheets/ft.extra.css">
<script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce((e,_)=>(e<<5)-e+_.charCodeAt(0),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
</head>
<body dir="ltr" data-md-color-scheme="default" data-md-color-primary="blue-grey" data-md-color-accent="tear">
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off">
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off">
<label class="md-overlay" for="__drawer"></label>
<div data-md-component="skip">
<a href="#reinforcement-learning" class="md-skip">
Skip to content
</a>
</div>
<div data-md-component="announce">
</div>
<div data-md-color-scheme="default" data-md-component="outdated" hidden>
</div>
<header class="md-header md-header--shadow" data-md-component="header">
<nav class="md-header__inner md-grid" aria-label="Header">
<a href=".." title="Freqtrade" class="md-header__button md-logo" aria-label="Freqtrade" data-md-component="logo">
<img src="../images/logo.png" alt="logo">
</a>
<label class="md-header__button md-icon" for="__drawer">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2Z"/></svg>
</label>
<div class="md-header__title" data-md-component="header-title">
<div class="md-header__ellipsis">
<div class="md-header__topic">
<span class="md-ellipsis">
Freqtrade
</span>
</div>
<div class="md-header__topic" data-md-component="header-topic">
<span class="md-ellipsis">
Reinforcement Learning
</span>
</div>
</div>
</div>
<form class="md-header__option" data-md-component="palette">
<input class="md-option" data-md-color-media="" data-md-color-scheme="default" data-md-color-primary="blue-grey" data-md-color-accent="tear" aria-label="Switch to dark mode" type="radio" name="__palette" id="__palette_0">
<label class="md-header__button md-icon" title="Switch to dark mode" for="__palette_1" hidden>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M17 6H7c-3.31 0-6 2.69-6 6s2.69 6 6 6h10c3.31 0 6-2.69 6-6s-2.69-6-6-6zm0 10H7c-2.21 0-4-1.79-4-4s1.79-4 4-4h10c2.21 0 4 1.79 4 4s-1.79 4-4 4zM7 9c-1.66 0-3 1.34-3 3s1.34 3 3 3 3-1.34 3-3-1.34-3-3-3z"/></svg>
</label>
<input class="md-option" data-md-color-media="" data-md-color-scheme="slate" data-md-color-primary="blue-grey" data-md-color-accent="tear" aria-label="Switch to light mode" type="radio" name="__palette" id="__palette_1">
<label class="md-header__button md-icon" title="Switch to light mode" for="__palette_0" hidden>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M17 7H7a5 5 0 0 0-5 5 5 5 0 0 0 5 5h10a5 5 0 0 0 5-5 5 5 0 0 0-5-5m0 8a3 3 0 0 1-3-3 3 3 0 0 1 3-3 3 3 0 0 1 3 3 3 3 0 0 1-3 3Z"/></svg>
</label>
</form>
<script>var media,input,key,value,palette=__md_get("__palette");if(palette&&palette.color){"(prefers-color-scheme)"===palette.color.media&&(media=matchMedia("(prefers-color-scheme: light)"),input=document.querySelector(media.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']"),palette.color.media=input.getAttribute("data-md-color-media"),palette.color.scheme=input.getAttribute("data-md-color-scheme"),palette.color.primary=input.getAttribute("data-md-color-primary"),palette.color.accent=input.getAttribute("data-md-color-accent"));for([key,value]of Object.entries(palette.color))document.body.setAttribute("data-md-color-"+key,value)}</script>
<label class="md-header__button md-icon" for="__search">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg>
</label>
<div class="md-search" data-md-component="search" role="dialog">
<label class="md-search__overlay" for="__search"></label>
<div class="md-search__inner" role="search">
<form class="md-search__form" name="search">
<input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required>
<label class="md-search__icon md-icon" for="__search">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5Z"/></svg>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg>
</label>
<nav class="md-search__options" aria-label="Search">
<a href="javascript:void(0)" class="md-search__icon md-icon" title="Share" aria-label="Share" data-clipboard data-clipboard-text="" data-md-component="search-share" tabindex="-1">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M18 16.08c-.76 0-1.44.3-1.96.77L8.91 12.7c.05-.23.09-.46.09-.7 0-.24-.04-.47-.09-.7l7.05-4.11c.54.5 1.25.81 2.04.81a3 3 0 0 0 3-3 3 3 0 0 0-3-3 3 3 0 0 0-3 3c0 .24.04.47.09.7L8.04 9.81C7.5 9.31 6.79 9 6 9a3 3 0 0 0-3 3 3 3 0 0 0 3 3c.79 0 1.5-.31 2.04-.81l7.12 4.15c-.05.21-.08.43-.08.66 0 1.61 1.31 2.91 2.92 2.91 1.61 0 2.92-1.3 2.92-2.91A2.92 2.92 0 0 0 18 16.08Z"/></svg>
</a>
<button type="reset" class="md-search__icon md-icon" title="Clear" aria-label="Clear" tabindex="-1">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41Z"/></svg>
</button>
</nav>
</form>
<div class="md-search__output">
<div class="md-search__scrollwrap" data-md-scrollfix>
<div class="md-search-result" data-md-component="search-result">
<div class="md-search-result__meta">
Initializing search
</div>
<ol class="md-search-result__list" role="presentation"></ol>
</div>
</div>
</div>
</div>
</div>
<div class="md-header__source">
<a href="https://github.com/freqtrade/freqtrade" title="Go to repository" class="md-source" data-md-component="source">
<div class="md-source__icon md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z"/></svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
</nav>
</header>
<div class="md-container" data-md-component="container">
<main class="md-main" data-md-component="main">
<div class="md-main__inner md-grid">
<!-- Main navigation -->
<div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" >
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--primary" aria-label="Navigation" data-md-level="0">
<label class="md-nav__title" for="__drawer">
<a href=".." title="Freqtrade" class="md-nav__button md-logo" aria-label="Freqtrade" data-md-component="logo">
<img src="../images/logo.png" alt="logo">
</a>
Freqtrade
</label>
<div class="md-nav__source">
<a href="https://github.com/freqtrade/freqtrade" title="Go to repository" class="md-source" data-md-component="source">
<div class="md-source__icon md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 6.5.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2023 Fonticons, Inc.--><path d="M439.55 236.05 244 40.45a28.87 28.87 0 0 0-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 0 1-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 0 0 0 40.81l195.61 195.6a28.86 28.86 0 0 0 40.8 0l194.69-194.69a28.86 28.86 0 0 0 0-40.81z"/></svg>
</div>
<div class="md-source__repository">
GitHub
</div>
</a>
</div>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href=".." class="md-nav__link">
<span class="md-ellipsis">
Home
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../docker_quickstart/" class="md-nav__link">
<span class="md-ellipsis">
Quickstart with Docker
</span>
</a>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_3" >
<label class="md-nav__link" for="__nav_3" id="__nav_3_label" tabindex="0">
<span class="md-ellipsis">
Installation
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_3_label" aria-expanded="false">
<label class="md-nav__title" for="__nav_3">
<span class="md-nav__icon md-icon"></span>
Installation
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../installation/" class="md-nav__link">
<span class="md-ellipsis">
Linux/MacOS/Raspberry
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../windows_installation/" class="md-nav__link">
<span class="md-ellipsis">
Windows
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../bot-basics/" class="md-nav__link">
<span class="md-ellipsis">
Freqtrade Basics
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../configuration/" class="md-nav__link">
<span class="md-ellipsis">
Configuration
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../strategy-customization/" class="md-nav__link">
<span class="md-ellipsis">
Strategy Customization
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../strategy-callbacks/" class="md-nav__link">
<span class="md-ellipsis">
Strategy Callbacks
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../stoploss/" class="md-nav__link">
<span class="md-ellipsis">
Stoploss
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../plugins/" class="md-nav__link">
<span class="md-ellipsis">
Plugins
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../bot-usage/" class="md-nav__link">
<span class="md-ellipsis">
Start the bot
</span>
</a>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_11" >
<label class="md-nav__link" for="__nav_11" id="__nav_11_label" tabindex="0">
<span class="md-ellipsis">
Control the bot
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_11_label" aria-expanded="false">
<label class="md-nav__title" for="__nav_11">
<span class="md-nav__icon md-icon"></span>
Control the bot
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../telegram-usage/" class="md-nav__link">
<span class="md-ellipsis">
Telegram
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../rest-api/" class="md-nav__link">
<span class="md-ellipsis">
REST API & FreqUI
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../webhook-config/" class="md-nav__link">
<span class="md-ellipsis">
Web Hook
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../data-download/" class="md-nav__link">
<span class="md-ellipsis">
Data Downloading
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../backtesting/" class="md-nav__link">
<span class="md-ellipsis">
Backtesting
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../hyperopt/" class="md-nav__link">
<span class="md-ellipsis">
Hyperopt
</span>
</a>
</li>
<li class="md-nav__item md-nav__item--active md-nav__item--nested">
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_15" checked>
<label class="md-nav__link" for="__nav_15" id="__nav_15_label" tabindex="0">
<span class="md-ellipsis">
FreqAI
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_15_label" aria-expanded="true">
<label class="md-nav__title" for="__nav_15">
<span class="md-nav__icon md-icon"></span>
FreqAI
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../freqai/" class="md-nav__link">
<span class="md-ellipsis">
Introduction
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../freqai-configuration/" class="md-nav__link">
<span class="md-ellipsis">
Configuration
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../freqai-parameter-table/" class="md-nav__link">
<span class="md-ellipsis">
Parameter table
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../freqai-feature-engineering/" class="md-nav__link">
<span class="md-ellipsis">
Feature engineering
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../freqai-running/" class="md-nav__link">
<span class="md-ellipsis">
Running FreqAI
</span>
</a>
</li>
<li class="md-nav__item md-nav__item--active">
<input class="md-nav__toggle md-toggle" type="checkbox" id="__toc">
<label class="md-nav__link md-nav__link--active" for="__toc">
<span class="md-ellipsis">
Reinforcement Learning
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<a href="./" class="md-nav__link md-nav__link--active">
<span class="md-ellipsis">
Reinforcement Learning
</span>
</a>
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
<label class="md-nav__title" for="__toc">
<span class="md-nav__icon md-icon"></span>
Table of contents
</label>
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
<li class="md-nav__item">
<a href="#background-and-terminology" class="md-nav__link">
<span class="md-ellipsis">
Background and terminology
</span>
</a>
<nav class="md-nav" aria-label="Background and terminology">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#what-is-rl-and-why-does-freqai-need-it" class="md-nav__link">
<span class="md-ellipsis">
What is RL and why does FreqAI need it?
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#the-rl-interface" class="md-nav__link">
<span class="md-ellipsis">
The RL interface
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#important-considerations" class="md-nav__link">
<span class="md-ellipsis">
Important considerations
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#running-reinforcement-learning" class="md-nav__link">
<span class="md-ellipsis">
Running Reinforcement Learning
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#configuring-the-reinforcement-learner" class="md-nav__link">
<span class="md-ellipsis">
Configuring the Reinforcement Learner
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#creating-a-custom-reward-function" class="md-nav__link">
<span class="md-ellipsis">
Creating a custom reward function
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#using-tensorboard" class="md-nav__link">
<span class="md-ellipsis">
Using Tensorboard
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#custom-logging" class="md-nav__link">
<span class="md-ellipsis">
Custom logging
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#choosing-a-base-environment" class="md-nav__link">
<span class="md-ellipsis">
Choosing a base environment
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../freqai-developers/" class="md-nav__link">
<span class="md-ellipsis">
Developer guide
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../leverage/" class="md-nav__link">
<span class="md-ellipsis">
Short / Leverage
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../utils/" class="md-nav__link">
<span class="md-ellipsis">
Utility Sub-commands
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../plotting/" class="md-nav__link">
<span class="md-ellipsis">
Plotting
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../exchanges/" class="md-nav__link">
<span class="md-ellipsis">
Exchange-specific Notes
</span>
</a>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_20" >
<label class="md-nav__link" for="__nav_20" id="__nav_20_label" tabindex="0">
<span class="md-ellipsis">
Data Analysis
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_20_label" aria-expanded="false">
<label class="md-nav__title" for="__nav_20">
<span class="md-nav__icon md-icon"></span>
Data Analysis
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../data-analysis/" class="md-nav__link">
<span class="md-ellipsis">
Jupyter Notebooks
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../strategy_analysis_example/" class="md-nav__link">
<span class="md-ellipsis">
Strategy analysis
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../advanced-backtesting/" class="md-nav__link">
<span class="md-ellipsis">
Backtest analysis
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item md-nav__item--nested">
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_21" >
<label class="md-nav__link" for="__nav_21" id="__nav_21_label" tabindex="0">
<span class="md-ellipsis">
Advanced Topics
</span>
<span class="md-nav__icon md-icon"></span>
</label>
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_21_label" aria-expanded="false">
<label class="md-nav__title" for="__nav_21">
<span class="md-nav__icon md-icon"></span>
Advanced Topics
</label>
<ul class="md-nav__list" data-md-scrollfix>
<li class="md-nav__item">
<a href="../advanced-setup/" class="md-nav__link">
<span class="md-ellipsis">
Advanced Post-installation Tasks
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../trade-object/" class="md-nav__link">
<span class="md-ellipsis">
Trade Object
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../lookahead-analysis/" class="md-nav__link">
<span class="md-ellipsis">
Lookahead analysis
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../recursive-analysis/" class="md-nav__link">
<span class="md-ellipsis">
Recursive analysis
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../strategy-advanced/" class="md-nav__link">
<span class="md-ellipsis">
Advanced Strategy
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../advanced-hyperopt/" class="md-nav__link">
<span class="md-ellipsis">
Advanced Hyperopt
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../producer-consumer/" class="md-nav__link">
<span class="md-ellipsis">
Producer/Consumer mode
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../edge/" class="md-nav__link">
<span class="md-ellipsis">
Edge Positioning
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="../faq/" class="md-nav__link">
<span class="md-ellipsis">
FAQ
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../sql_cheatsheet/" class="md-nav__link">
<span class="md-ellipsis">
SQL Cheat-sheet
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../strategy_migration/" class="md-nav__link">
<span class="md-ellipsis">
Strategy migration
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../updating/" class="md-nav__link">
<span class="md-ellipsis">
Updating Freqtrade
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../deprecated/" class="md-nav__link">
<span class="md-ellipsis">
Deprecated Features
</span>
</a>
</li>
<li class="md-nav__item">
<a href="../developer/" class="md-nav__link">
<span class="md-ellipsis">
Contributors Guide
</span>
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<!-- Table of contents -->
<div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" >
<div class="md-sidebar__scrollwrap">
<div class="md-sidebar__inner">
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
<label class="md-nav__title" for="__toc">
<span class="md-nav__icon md-icon"></span>
Table of contents
</label>
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
<li class="md-nav__item">
<a href="#background-and-terminology" class="md-nav__link">
<span class="md-ellipsis">
Background and terminology
</span>
</a>
<nav class="md-nav" aria-label="Background and terminology">
<ul class="md-nav__list">
<li class="md-nav__item">
<a href="#what-is-rl-and-why-does-freqai-need-it" class="md-nav__link">
<span class="md-ellipsis">
What is RL and why does FreqAI need it?
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#the-rl-interface" class="md-nav__link">
<span class="md-ellipsis">
The RL interface
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#important-considerations" class="md-nav__link">
<span class="md-ellipsis">
Important considerations
</span>
</a>
</li>
</ul>
</nav>
</li>
<li class="md-nav__item">
<a href="#running-reinforcement-learning" class="md-nav__link">
<span class="md-ellipsis">
Running Reinforcement Learning
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#configuring-the-reinforcement-learner" class="md-nav__link">
<span class="md-ellipsis">
Configuring the Reinforcement Learner
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#creating-a-custom-reward-function" class="md-nav__link">
<span class="md-ellipsis">
Creating a custom reward function
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#using-tensorboard" class="md-nav__link">
<span class="md-ellipsis">
Using Tensorboard
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#custom-logging" class="md-nav__link">
<span class="md-ellipsis">
Custom logging
</span>
</a>
</li>
<li class="md-nav__item">
<a href="#choosing-a-base-environment" class="md-nav__link">
<span class="md-ellipsis">
Choosing a base environment
</span>
</a>
</li>
</ul>
</nav>
</div>
</div>
</div>
<div class="md-content" data-md-component="content">
<article class="md-content__inner md-typeset">
<h1 id="reinforcement-learning">Reinforcement Learning<a class="headerlink" href="#reinforcement-learning" title="Permanent link">&para;</a></h1>
<div class="admonition note">
<p class="admonition-title">Installation size</p>
<p>Reinforcement learning dependencies include large packages such as <code>torch</code>, which should be explicitly requested during <code>./setup.sh -i</code> by answering "y" to the question "Do you also want dependencies for freqai-rl (~700mb additional space required) [y/N]?".
Users who prefer docker should ensure they use the docker image appended with <code>_freqairl</code>.</p>
</div>
<h2 id="background-and-terminology">Background and terminology<a class="headerlink" href="#background-and-terminology" title="Permanent link">&para;</a></h2>
<h3 id="what-is-rl-and-why-does-freqai-need-it">What is RL and why does FreqAI need it?<a class="headerlink" href="#what-is-rl-and-why-does-freqai-need-it" title="Permanent link">&para;</a></h3>
<p>Reinforcement learning involves two important components, the <em>agent</em> and the training <em>environment</em>. During agent training, the agent moves through historical data candle by candle, always making 1 of a set of actions: Long entry, long exit, short entry, short exit, neutral). During this training process, the environment tracks the performance of these actions and rewards the agent according to a custom user made <code>calculate_reward()</code> (here we offer a default reward for users to build on if they wish <a href="#creating-a-custom-reward-function">details here</a>). The reward is used to train weights in a neural network.</p>
<p>A second important component of the FreqAI RL implementation is the use of <em>state</em> information. State information is fed into the network at each step, including current profit, current position, and current trade duration. These are used to train the agent in the training environment, and to reinforce the agent in dry/live (this functionality is not available in backtesting). <em>FreqAI + Freqtrade is a perfect match for this reinforcing mechanism since this information is readily available in live deployments.</em></p>
<p>Reinforcement learning is a natural progression for FreqAI, since it adds a new layer of adaptivity and market reactivity that Classifiers and Regressors cannot match. However, Classifiers and Regressors have strengths that RL does not have such as robust predictions. Improperly trained RL agents may find "cheats" and "tricks" to maximize reward without actually winning any trades. For this reason, RL is more complex and demands a higher level of understanding than typical Classifiers and Regressors.</p>
<h3 id="the-rl-interface">The RL interface<a class="headerlink" href="#the-rl-interface" title="Permanent link">&para;</a></h3>
<p>With the current framework, we aim to expose the training environment via the common "prediction model" file, which is a user inherited <code>BaseReinforcementLearner</code> object (e.g. <code>freqai/prediction_models/ReinforcementLearner</code>). Inside this user class, the RL environment is available and customized via <code>MyRLEnv</code> as <a href="#creating-a-custom-reward-function">shown below</a>.</p>
<p>We envision the majority of users focusing their effort on creative design of the <code>calculate_reward()</code> function <a href="#creating-a-custom-reward-function">details here</a>, while leaving the rest of the environment untouched. Other users may not touch the environment at all, and they will only play with the configuration settings and the powerful feature engineering that already exists in FreqAI. Meanwhile, we enable advanced users to create their own model classes entirely.</p>
<p>The framework is built on stable_baselines3 (torch) and OpenAI gym for the base environment class. But generally speaking, the model class is well isolated. Thus, the addition of competing libraries can be easily integrated into the existing framework. For the environment, it is inheriting from <code>gym.Env</code> which means that it is necessary to write an entirely new environment in order to switch to a different library.</p>
<h3 id="important-considerations">Important considerations<a class="headerlink" href="#important-considerations" title="Permanent link">&para;</a></h3>
<p>As explained above, the agent is "trained" in an artificial trading "environment". In our case, that environment may seem quite similar to a real Freqtrade backtesting environment, but it is <em>NOT</em>. In fact, the RL training environment is much more simplified. It does not incorporate any of the complicated strategy logic, such as callbacks like <code>custom_exit</code>, <code>custom_stoploss</code>, leverage controls, etc. The RL environment is instead a very "raw" representation of the true market, where the agent has free will to learn the policy (read: stoploss, take profit, etc.) which is enforced by the <code>calculate_reward()</code>. Thus, it is important to consider that the agent training environment is not identical to the real world.</p>
<h2 id="running-reinforcement-learning">Running Reinforcement Learning<a class="headerlink" href="#running-reinforcement-learning" title="Permanent link">&para;</a></h2>
<p>Setting up and running a Reinforcement Learning model is the same as running a Regressor or Classifier. The same two flags, <code>--freqaimodel</code> and <code>--strategy</code>, must be defined on the command line:</p>
<div class="highlight"><pre><span></span><code>freqtrade<span class="w"> </span>trade<span class="w"> </span>--freqaimodel<span class="w"> </span>ReinforcementLearner<span class="w"> </span>--strategy<span class="w"> </span>MyRLStrategy<span class="w"> </span>--config<span class="w"> </span>config.json
</code></pre></div>
<p>where <code>ReinforcementLearner</code> will use the templated <code>ReinforcementLearner</code> from <code>freqai/prediction_models/ReinforcementLearner</code> (or a custom user defined one located in <code>user_data/freqaimodels</code>). The strategy, on the other hand, follows the same base <a href="../freqai-feature-engineering/">feature engineering</a> with <code>feature_engineering_*</code> as a typical Regressor. The difference lies in the creation of the targets, Reinforcement Learning doesn't require them. However, FreqAI requires a default (neutral) value to be set in the action column:</p>
<div class="highlight"><pre><span></span><code> <span class="k">def</span> <span class="nf">set_freqai_targets</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataframe</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">DataFrame</span><span class="p">:</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> *Only functional with FreqAI enabled strategies*</span>
<span class="sd"> Required function to set the targets for the model.</span>
<span class="sd"> All targets must be prepended with `&amp;` to be recognized by the FreqAI internals.</span>
<span class="sd"> More details about feature engineering available:</span>
<span class="sd"> https://www.freqtrade.io/en/latest/freqai-feature-engineering</span>
<span class="sd"> :param df: strategy dataframe which will receive the targets</span>
<span class="sd"> usage example: dataframe[&quot;&amp;-target&quot;] = dataframe[&quot;close&quot;].shift(-1) / dataframe[&quot;close&quot;]</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="c1"># For RL, there are no direct targets to set. This is filler (neutral)</span>
<span class="c1"># until the agent sends an action.</span>
<span class="n">dataframe</span><span class="p">[</span><span class="s2">&quot;&amp;-action&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span>
<span class="k">return</span> <span class="n">dataframe</span>
</code></pre></div>
<p>Most of the function remains the same as for typical Regressors, however, the function below shows how the strategy must pass the raw price data to the agent so that it has access to raw OHLCV in the training environment:</p>
<div class="highlight"><pre><span></span><code> <span class="k">def</span> <span class="nf">feature_engineering_standard</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">dataframe</span><span class="p">:</span> <span class="n">DataFrame</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">DataFrame</span><span class="p">:</span>
<span class="c1"># The following features are necessary for RL models</span>
<span class="n">dataframe</span><span class="p">[</span><span class="sa">f</span><span class="s2">&quot;%-raw_close&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">dataframe</span><span class="p">[</span><span class="s2">&quot;close&quot;</span><span class="p">]</span>
<span class="n">dataframe</span><span class="p">[</span><span class="sa">f</span><span class="s2">&quot;%-raw_open&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">dataframe</span><span class="p">[</span><span class="s2">&quot;open&quot;</span><span class="p">]</span>
<span class="n">dataframe</span><span class="p">[</span><span class="sa">f</span><span class="s2">&quot;%-raw_high&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">dataframe</span><span class="p">[</span><span class="s2">&quot;high&quot;</span><span class="p">]</span>
<span class="n">dataframe</span><span class="p">[</span><span class="sa">f</span><span class="s2">&quot;%-raw_low&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="n">dataframe</span><span class="p">[</span><span class="s2">&quot;low&quot;</span><span class="p">]</span>
<span class="k">return</span> <span class="n">dataframe</span>
</code></pre></div>
<p>Finally, there is no explicit "label" to make - instead it is necessary to assign the <code>&amp;-action</code> column which will contain the agent's actions when accessed in <code>populate_entry/exit_trends()</code>. In the present example, the neutral action to 0. This value should align with the environment used. FreqAI provides two environments, both use 0 as the neutral action.</p>
<p>After users realize there are no labels to set, they will soon understand that the agent is making its "own" entry and exit decisions. This makes strategy construction rather simple. The entry and exit signals come from the agent in the form of an integer - which are used directly to decide entries and exits in the strategy:</p>
<div class="highlight"><pre><span></span><code> <span class="k">def</span> <span class="nf">populate_entry_trend</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">df</span><span class="p">:</span> <span class="n">DataFrame</span><span class="p">,</span> <span class="n">metadata</span><span class="p">:</span> <span class="nb">dict</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">DataFrame</span><span class="p">:</span>
<span class="n">enter_long_conditions</span> <span class="o">=</span> <span class="p">[</span><span class="n">df</span><span class="p">[</span><span class="s2">&quot;do_predict&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">df</span><span class="p">[</span><span class="s2">&quot;&amp;-action&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">]</span>
<span class="k">if</span> <span class="n">enter_long_conditions</span><span class="p">:</span>
<span class="n">df</span><span class="o">.</span><span class="n">loc</span><span class="p">[</span>
<span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">&amp;</span> <span class="n">y</span><span class="p">,</span> <span class="n">enter_long_conditions</span><span class="p">),</span> <span class="p">[</span><span class="s2">&quot;enter_long&quot;</span><span class="p">,</span> <span class="s2">&quot;enter_tag&quot;</span><span class="p">]</span>
<span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="s2">&quot;long&quot;</span><span class="p">)</span>
<span class="n">enter_short_conditions</span> <span class="o">=</span> <span class="p">[</span><span class="n">df</span><span class="p">[</span><span class="s2">&quot;do_predict&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">df</span><span class="p">[</span><span class="s2">&quot;&amp;-action&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">3</span><span class="p">]</span>
<span class="k">if</span> <span class="n">enter_short_conditions</span><span class="p">:</span>
<span class="n">df</span><span class="o">.</span><span class="n">loc</span><span class="p">[</span>
<span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">&amp;</span> <span class="n">y</span><span class="p">,</span> <span class="n">enter_short_conditions</span><span class="p">),</span> <span class="p">[</span><span class="s2">&quot;enter_short&quot;</span><span class="p">,</span> <span class="s2">&quot;enter_tag&quot;</span><span class="p">]</span>
<span class="p">]</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="s2">&quot;short&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="n">df</span>
<span class="k">def</span> <span class="nf">populate_exit_trend</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">df</span><span class="p">:</span> <span class="n">DataFrame</span><span class="p">,</span> <span class="n">metadata</span><span class="p">:</span> <span class="nb">dict</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="n">DataFrame</span><span class="p">:</span>
<span class="n">exit_long_conditions</span> <span class="o">=</span> <span class="p">[</span><span class="n">df</span><span class="p">[</span><span class="s2">&quot;do_predict&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">df</span><span class="p">[</span><span class="s2">&quot;&amp;-action&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">2</span><span class="p">]</span>
<span class="k">if</span> <span class="n">exit_long_conditions</span><span class="p">:</span>
<span class="n">df</span><span class="o">.</span><span class="n">loc</span><span class="p">[</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">&amp;</span> <span class="n">y</span><span class="p">,</span> <span class="n">exit_long_conditions</span><span class="p">),</span> <span class="s2">&quot;exit_long&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="n">exit_short_conditions</span> <span class="o">=</span> <span class="p">[</span><span class="n">df</span><span class="p">[</span><span class="s2">&quot;do_predict&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">df</span><span class="p">[</span><span class="s2">&quot;&amp;-action&quot;</span><span class="p">]</span> <span class="o">==</span> <span class="mi">4</span><span class="p">]</span>
<span class="k">if</span> <span class="n">exit_short_conditions</span><span class="p">:</span>
<span class="n">df</span><span class="o">.</span><span class="n">loc</span><span class="p">[</span><span class="n">reduce</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">:</span> <span class="n">x</span> <span class="o">&amp;</span> <span class="n">y</span><span class="p">,</span> <span class="n">exit_short_conditions</span><span class="p">),</span> <span class="s2">&quot;exit_short&quot;</span><span class="p">]</span> <span class="o">=</span> <span class="mi">1</span>
<span class="k">return</span> <span class="n">df</span>
</code></pre></div>
<p>It is important to consider that <code>&amp;-action</code> depends on which environment they choose to use. The example above shows 5 actions, where 0 is neutral, 1 is enter long, 2 is exit long, 3 is enter short and 4 is exit short.</p>
<h2 id="configuring-the-reinforcement-learner">Configuring the Reinforcement Learner<a class="headerlink" href="#configuring-the-reinforcement-learner" title="Permanent link">&para;</a></h2>
<p>In order to configure the <code>Reinforcement Learner</code> the following dictionary must exist in the <code>freqai</code> config:</p>
<div class="highlight"><pre><span></span><code><span class="w"> </span><span class="nt">&quot;rl_config&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;train_cycles&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">25</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;add_state_info&quot;</span><span class="p">:</span><span class="w"> </span><span class="kc">true</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;max_trade_duration_candles&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">300</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;max_training_drawdown_pct&quot;</span><span class="p">:</span><span class="w"> </span><span class="mf">0.02</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;cpu_count&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">8</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;model_type&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;PPO&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;policy_type&quot;</span><span class="p">:</span><span class="w"> </span><span class="s2">&quot;MlpPolicy&quot;</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;model_reward_parameters&quot;</span><span class="p">:</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="nt">&quot;rr&quot;</span><span class="p">:</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span>
<span class="w"> </span><span class="nt">&quot;profit_aim&quot;</span><span class="p">:</span><span class="w"> </span><span class="mf">0.025</span>
<span class="w"> </span><span class="p">}</span>
<span class="w"> </span><span class="p">}</span>
</code></pre></div>
<p>Parameter details can be found <a href="../freqai-parameter-table/">here</a>, but in general the <code>train_cycles</code> decides how many times the agent should cycle through the candle data in its artificial environment to train weights in the model. <code>model_type</code> is a string which selects one of the available models in <a href="https://stable-baselines3.readthedocs.io/en/master/">stable_baselines</a>(external link).</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>If you would like to experiment with <code>continual_learning</code>, then you should set that value to <code>true</code> in the main <code>freqai</code> configuration dictionary. This will tell the Reinforcement Learning library to continue training new models from the final state of previous models, instead of retraining new models from scratch each time a retrain is initiated.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Remember that the general <code>model_training_parameters</code> dictionary should contain all the model hyperparameter customizations for the particular <code>model_type</code>. For example, <code>PPO</code> parameters can be found <a href="https://stable-baselines3.readthedocs.io/en/master/modules/ppo.html">here</a>.</p>
</div>
<h2 id="creating-a-custom-reward-function">Creating a custom reward function<a class="headerlink" href="#creating-a-custom-reward-function" title="Permanent link">&para;</a></h2>
<div class="admonition danger">
<p class="admonition-title">Not for production</p>
<p>Warning!
The reward function provided with the Freqtrade source code is a showcase of functionality designed to show/test as many possible environment control features as possible. It is also designed to run quickly on small computers. This is a benchmark, it is <em>not</em> for live production. Please beware that you will need to create your own custom_reward() function or use a template built by other users outside of the Freqtrade source code.</p>
</div>
<p>As you begin to modify the strategy and the prediction model, you will quickly realize some important differences between the Reinforcement Learner and the Regressors/Classifiers. Firstly, the strategy does not set a target value (no labels!). Instead, you set the <code>calculate_reward()</code> function inside the <code>MyRLEnv</code> class (see below). A default <code>calculate_reward()</code> is provided inside <code>prediction_models/ReinforcementLearner.py</code> to demonstrate the necessary building blocks for creating rewards, but this is <em>not</em> designed for production. Users <em>must</em> create their own custom reinforcement learning model class or use a pre-built one from outside the Freqtrade source code and save it to <code>user_data/freqaimodels</code>. It is inside the <code>calculate_reward()</code> where creative theories about the market can be expressed. For example, you can reward your agent when it makes a winning trade, and penalize the agent when it makes a losing trade. Or perhaps, you wish to reward the agent for entering trades, and penalize the agent for sitting in trades too long. Below we show examples of how these rewards are all calculated:</p>
<div class="admonition note">
<p class="admonition-title">Hint</p>
<p>The best reward functions are ones that are continuously differentiable, and well scaled. In other words, adding a single large negative penalty to a rare event is not a good idea, and the neural net will not be able to learn that function. Instead, it is better to add a small negative penalty to a common event. This will help the agent learn faster. Not only this, but you can help improve the continuity of your rewards/penalties by having them scale with severity according to some linear/exponential functions. In other words, you'd slowly scale the penalty as the duration of the trade increases. This is better than a single large penalty occurring at a single point in time.</p>
</div>
<div class="highlight"><pre><span></span><code><span class="kn">from</span> <span class="nn">freqtrade.freqai.prediction_models.ReinforcementLearner</span> <span class="kn">import</span> <span class="n">ReinforcementLearner</span>
<span class="kn">from</span> <span class="nn">freqtrade.freqai.RL.Base5ActionRLEnv</span> <span class="kn">import</span> <span class="n">Actions</span><span class="p">,</span> <span class="n">Base5ActionRLEnv</span><span class="p">,</span> <span class="n">Positions</span>
<span class="k">class</span> <span class="nc">MyCoolRLModel</span><span class="p">(</span><span class="n">ReinforcementLearner</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> User created RL prediction model.</span>
<span class="sd"> Save this file to `freqtrade/user_data/freqaimodels`</span>
<span class="sd"> then use it with:</span>
<span class="sd"> freqtrade trade --freqaimodel MyCoolRLModel --config config.json --strategy SomeCoolStrat</span>
<span class="sd"> Here the users can override any of the functions</span>
<span class="sd"> available in the `IFreqaiModel` inheritance tree. Most importantly for RL, this</span>
<span class="sd"> is where the user overrides `MyRLEnv` (see below), to define custom</span>
<span class="sd"> `calculate_reward()` function, or to override any other parts of the environment.</span>
<span class="sd"> This class also allows users to override any other part of the IFreqaiModel tree.</span>
<span class="sd"> For example, the user can override `def fit()` or `def train()` or `def predict()`</span>
<span class="sd"> to take fine-tuned control over these processes.</span>
<span class="sd"> Another common override may be `def data_cleaning_predict()` where the user can</span>
<span class="sd"> take fine-tuned control over the data handling pipeline.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">class</span> <span class="nc">MyRLEnv</span><span class="p">(</span><span class="n">Base5ActionRLEnv</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> User made custom environment. This class inherits from BaseEnvironment and gym.Env.</span>
<span class="sd"> Users can override any functions from those parent classes. Here is an example</span>
<span class="sd"> of a user customized `calculate_reward()` function.</span>
<span class="sd"> Warning!</span>
<span class="sd"> This is function is a showcase of functionality designed to show as many possible</span>
<span class="sd"> environment control features as possible. It is also designed to run quickly</span>
<span class="sd"> on small computers. This is a benchmark, it is *not* for live production.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="nf">calculate_reward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">action</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">float</span><span class="p">:</span>
<span class="c1"># first, penalize if the action is not valid</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_is_valid</span><span class="p">(</span><span class="n">action</span><span class="p">):</span>
<span class="k">return</span> <span class="o">-</span><span class="mi">2</span>
<span class="n">pnl</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">get_unrealized_profit</span><span class="p">()</span>
<span class="n">factor</span> <span class="o">=</span> <span class="mi">100</span>
<span class="n">pair</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">pair</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s1">&#39;:&#39;</span><span class="p">,</span> <span class="s1">&#39;&#39;</span><span class="p">)</span>
<span class="c1"># you can use feature values from dataframe</span>
<span class="c1"># Assumes the shifted RSI indicator has been generated in the strategy.</span>
<span class="n">rsi_now</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">raw_features</span><span class="p">[</span><span class="sa">f</span><span class="s2">&quot;%-rsi-period_10_shift-1_</span><span class="si">{</span><span class="n">pair</span><span class="si">}</span><span class="s2">_&quot;</span>
<span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">config</span><span class="p">[</span><span class="s1">&#39;timeframe&#39;</span><span class="p">]</span><span class="si">}</span><span class="s2">&quot;</span><span class="p">]</span><span class="o">.</span><span class="n">iloc</span><span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">_current_tick</span><span class="p">]</span>
<span class="c1"># reward agent for entering trades</span>
<span class="k">if</span> <span class="p">(</span><span class="n">action</span> <span class="ow">in</span> <span class="p">(</span><span class="n">Actions</span><span class="o">.</span><span class="n">Long_enter</span><span class="o">.</span><span class="n">value</span><span class="p">,</span> <span class="n">Actions</span><span class="o">.</span><span class="n">Short_enter</span><span class="o">.</span><span class="n">value</span><span class="p">)</span>
<span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_position</span> <span class="o">==</span> <span class="n">Positions</span><span class="o">.</span><span class="n">Neutral</span><span class="p">):</span>
<span class="k">if</span> <span class="n">rsi_now</span> <span class="o">&lt;</span> <span class="mi">40</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">=</span> <span class="mi">40</span> <span class="o">/</span> <span class="n">rsi_now</span>
<span class="k">else</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">=</span> <span class="mi">1</span>
<span class="k">return</span> <span class="mi">25</span> <span class="o">*</span> <span class="n">factor</span>
<span class="c1"># discourage agent from not entering trades</span>
<span class="k">if</span> <span class="n">action</span> <span class="o">==</span> <span class="n">Actions</span><span class="o">.</span><span class="n">Neutral</span><span class="o">.</span><span class="n">value</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_position</span> <span class="o">==</span> <span class="n">Positions</span><span class="o">.</span><span class="n">Neutral</span><span class="p">:</span>
<span class="k">return</span> <span class="o">-</span><span class="mi">1</span>
<span class="n">max_trade_duration</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rl_config</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;max_trade_duration_candles&#39;</span><span class="p">,</span> <span class="mi">300</span><span class="p">)</span>
<span class="n">trade_duration</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_current_tick</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">_last_trade_tick</span>
<span class="k">if</span> <span class="n">trade_duration</span> <span class="o">&lt;=</span> <span class="n">max_trade_duration</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">*=</span> <span class="mf">1.5</span>
<span class="k">elif</span> <span class="n">trade_duration</span> <span class="o">&gt;</span> <span class="n">max_trade_duration</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">*=</span> <span class="mf">0.5</span>
<span class="c1"># discourage sitting in position</span>
<span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">_position</span> <span class="ow">in</span> <span class="p">(</span><span class="n">Positions</span><span class="o">.</span><span class="n">Short</span><span class="p">,</span> <span class="n">Positions</span><span class="o">.</span><span class="n">Long</span><span class="p">)</span> <span class="ow">and</span> \
<span class="n">action</span> <span class="o">==</span> <span class="n">Actions</span><span class="o">.</span><span class="n">Neutral</span><span class="o">.</span><span class="n">value</span><span class="p">:</span>
<span class="k">return</span> <span class="o">-</span><span class="mi">1</span> <span class="o">*</span> <span class="n">trade_duration</span> <span class="o">/</span> <span class="n">max_trade_duration</span>
<span class="c1"># close long</span>
<span class="k">if</span> <span class="n">action</span> <span class="o">==</span> <span class="n">Actions</span><span class="o">.</span><span class="n">Long_exit</span><span class="o">.</span><span class="n">value</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_position</span> <span class="o">==</span> <span class="n">Positions</span><span class="o">.</span><span class="n">Long</span><span class="p">:</span>
<span class="k">if</span> <span class="n">pnl</span> <span class="o">&gt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">profit_aim</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rr</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rl_config</span><span class="p">[</span><span class="s1">&#39;model_reward_parameters&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;win_reward_factor&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="k">return</span> <span class="nb">float</span><span class="p">(</span><span class="n">pnl</span> <span class="o">*</span> <span class="n">factor</span><span class="p">)</span>
<span class="c1"># close short</span>
<span class="k">if</span> <span class="n">action</span> <span class="o">==</span> <span class="n">Actions</span><span class="o">.</span><span class="n">Short_exit</span><span class="o">.</span><span class="n">value</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">_position</span> <span class="o">==</span> <span class="n">Positions</span><span class="o">.</span><span class="n">Short</span><span class="p">:</span>
<span class="k">if</span> <span class="n">pnl</span> <span class="o">&gt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">profit_aim</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">rr</span><span class="p">:</span>
<span class="n">factor</span> <span class="o">*=</span> <span class="bp">self</span><span class="o">.</span><span class="n">rl_config</span><span class="p">[</span><span class="s1">&#39;model_reward_parameters&#39;</span><span class="p">]</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="s1">&#39;win_reward_factor&#39;</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
<span class="k">return</span> <span class="nb">float</span><span class="p">(</span><span class="n">pnl</span> <span class="o">*</span> <span class="n">factor</span><span class="p">)</span>
<span class="k">return</span> <span class="mf">0.</span>
</code></pre></div>
<h2 id="using-tensorboard">Using Tensorboard<a class="headerlink" href="#using-tensorboard" title="Permanent link">&para;</a></h2>
<p>Reinforcement Learning models benefit from tracking training metrics. FreqAI has integrated Tensorboard to allow users to track training and evaluation performance across all coins and across all retrainings. Tensorboard is activated via the following command:</p>
<div class="highlight"><pre><span></span><code>tensorboard<span class="w"> </span>--logdir<span class="w"> </span>user_data/models/unique-id
</code></pre></div>
<p>where <code>unique-id</code> is the <code>identifier</code> set in the <code>freqai</code> configuration file. This command must be run in a separate shell to view the output in the browser at 127.0.0.1:6006 (6006 is the default port used by Tensorboard).</p>
<p><img alt="tensorboard" src="../assets/tensorboard.jpg" /></p>
<h2 id="custom-logging">Custom logging<a class="headerlink" href="#custom-logging" title="Permanent link">&para;</a></h2>
<p>FreqAI also provides a built in episodic summary logger called <code>self.tensorboard_log</code> for adding custom information to the Tensorboard log. By default, this function is already called once per step inside the environment to record the agent actions. All values accumulated for all steps in a single episode are reported at the conclusion of each episode, followed by a full reset of all metrics to 0 in preparation for the subsequent episode.</p>
<p><code>self.tensorboard_log</code> can also be used anywhere inside the environment, for example, it can be added to the <code>calculate_reward</code> function to collect more detailed information about how often various parts of the reward were called:</p>
<div class="highlight"><pre><span></span><code> <span class="k">class</span> <span class="nc">MyRLEnv</span><span class="p">(</span><span class="n">Base5ActionRLEnv</span><span class="p">):</span>
<span class="w"> </span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd"> User made custom environment. This class inherits from BaseEnvironment and gym.Env.</span>
<span class="sd"> Users can override any functions from those parent classes. Here is an example</span>
<span class="sd"> of a user customized `calculate_reward()` function.</span>
<span class="sd"> &quot;&quot;&quot;</span>
<span class="k">def</span> <span class="nf">calculate_reward</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">action</span><span class="p">:</span> <span class="nb">int</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">float</span><span class="p">:</span>
<span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_is_valid</span><span class="p">(</span><span class="n">action</span><span class="p">):</span>
<span class="bp">self</span><span class="o">.</span><span class="n">tensorboard_log</span><span class="p">(</span><span class="s2">&quot;invalid&quot;</span><span class="p">)</span>
<span class="k">return</span> <span class="o">-</span><span class="mi">2</span>
</code></pre></div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The <code>self.tensorboard_log()</code> function is designed for tracking incremented objects only i.e. events, actions inside the training environment. If the event of interest is a float, the float can be passed as the second argument e.g. <code>self.tensorboard_log("float_metric1", 0.23)</code>. In this case the metric values are not incremented.</p>
</div>
<h2 id="choosing-a-base-environment">Choosing a base environment<a class="headerlink" href="#choosing-a-base-environment" title="Permanent link">&para;</a></h2>
<p>FreqAI provides three base environments, <code>Base3ActionRLEnvironment</code>, <code>Base4ActionEnvironment</code> and <code>Base5ActionEnvironment</code>. As the names imply, the environments are customized for agents that can select from 3, 4 or 5 actions. The <code>Base3ActionEnvironment</code> is the simplest, the agent can select from hold, long, or short. This environment can also be used for long-only bots (it automatically follows the <code>can_short</code> flag from the strategy), where long is the enter condition and short is the exit condition. Meanwhile, in the <code>Base4ActionEnvironment</code>, the agent can enter long, enter short, hold neutral, or exit position. Finally, in the <code>Base5ActionEnvironment</code>, the agent has the same actions as Base4, but instead of a single exit action, it separates exit long and exit short. The main changes stemming from the environment selection include:</p>
<ul>
<li>the actions available in the <code>calculate_reward</code></li>
<li>the actions consumed by the user strategy</li>
</ul>
<p>All of the FreqAI provided environments inherit from an action/position agnostic environment object called the <code>BaseEnvironment</code>, which contains all shared logic. The architecture is designed to be easily customized. The simplest customization is the <code>calculate_reward()</code> (see details <a href="#creating-a-custom-reward-function">here</a>). However, the customizations can be further extended into any of the functions inside the environment. You can do this by simply overriding those functions inside your <code>MyRLEnv</code> in the prediction model file. Or for more advanced customizations, it is encouraged to create an entirely new environment inherited from <code>BaseEnvironment</code>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only the <code>Base3ActionRLEnv</code> can do long-only training/trading (set the user strategy attribute <code>can_short = False</code>).</p>
</div>
</article>
</div>
<script>var target=document.getElementById(location.hash.slice(1));target&&target.name&&(target.checked=target.name.startsWith("__tabbed_"))</script>
</div>
<button type="button" class="md-top md-icon" data-md-component="top" hidden>
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M13 20h-2V8l-5.5 5.5-1.42-1.42L12 4.16l7.92 7.92-1.42 1.42L13 8v12Z"/></svg>
Back to top
</button>
</main>
<footer class="md-footer">
<nav class="md-footer__inner md-grid" aria-label="Footer" >
<a href="../freqai-running/" class="md-footer__link md-footer__link--prev" aria-label="Previous: Running FreqAI">
<div class="md-footer__button md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12Z"/></svg>
</div>
<div class="md-footer__title">
<span class="md-footer__direction">
Previous
</span>
<div class="md-ellipsis">
Running FreqAI
</div>
</div>
</a>
<a href="../freqai-developers/" class="md-footer__link md-footer__link--next" aria-label="Next: Developer guide">
<div class="md-footer__title">
<span class="md-footer__direction">
Next
</span>
<div class="md-ellipsis">
Developer guide
</div>
</div>
<div class="md-footer__button md-icon">
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4Z"/></svg>
</div>
</a>
</nav>
<div class="md-footer-meta md-typeset">
<div class="md-footer-meta__inner md-grid">
<div class="md-copyright">
Made with
<a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener">
Material for MkDocs
</a>
</div>
</div>
</div>
</footer>
<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src="https://buttons.github.io/buttons.js"></script>
<script src="https://code.jquery.com/jquery-3.4.1.min.js"
integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>
</div>
<div class="md-dialog" data-md-component="dialog">
<div class="md-dialog__inner md-typeset"></div>
</div>
<script id="__config" type="application/json">{"base": "..", "features": ["content.code.annotate", "search.share", "content.code.copy", "navigation.top", "navigation.footer"], "search": "../assets/javascripts/workers/search.b8dbb3d2.min.js", "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": {"provider": "mike"}}</script>
<script src="../assets/javascripts/bundle.bd41221c.min.js"></script>
<script src="../javascripts/config.js"></script>
<script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script>
<script src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</body>
</html>