#!/usr/bin/env python # coding: utf-8 # # Quickstart or _"How to get 100% return per year"_ # First, do some initialization and set debugging level to `debug` to see progress of computation. # In[1]: get_ipython().run_line_magic('matplotlib', 'inline') get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'") import numpy as np import pandas as pd import seaborn as sns import datetime as dt import matplotlib.pyplot as plt import universal as up from universal import tools, algos from universal.algos import * sns.set_context("notebook") plt.rcParams["figure.figsize"] = (16, 8) # In[2]: # ignore logged warnings import logging logging.getLogger().setLevel(logging.ERROR) # Let's try to replicate the results of B.Li and S.Hoi from their article [On-Line Portfolio Selection with Moving Average Reversion](http://arxiv.org/abs/1206.4626). They claim superior performance on several datasets using their OLMAR algorithm. These datasets are available in `data/` directory in `.csv` format. Those are all raw prices and artificial tickers. We can start with NYSE stocks from period 1/1/1985 - 30/6/2010. # In[3]: # load data using tools module data = tools.dataset('nyse_o') # plot first three of them as example data.iloc[:,:3].plot() # Now we need an implementation of the OLMAR algorithm. Fortunately, it is already implemented in module `algos`, so all we have to do is load it and set its parameters. Authors recommend lookback window $w = 5$ and threshold $\epsilon = 10$ (these are default parameters anyway). Just call `run` method on our data to get results for analysis. # In[4]: # set algo parameters algo = algos.OLMAR(window=5, eps=10) # run result = algo.run(data) # Ok, let's see some results. First print some basic summary metrics and plot portfolio equity with UCRP (uniform constant rebalanced portfolio). # In[5]: print(result.summary()) result.plot(weights=False, assets=False, ucrp=True, logy=True); # That seems really impressive, in fact it looks too good to be true. Let's see how individual stocks contribute to portfolio equity and disable legend to keep the graph clean. # In[6]: result.plot_decomposition(legend=False, logy=True) # As you can see, almost all wealth comes from single stock (don't forget it has logarithm scale!). So if we used just 5 of all these stocks, we would get almost the same equity as if we used all of them. To stress test the strategy, we can remove that stock and rerun the algorithm. # In[7]: # find name of the most profitable asset most_profitable = result.equity_decomposed.iloc[-1].idxmax() # rerun an algorithm on data without it result_without = algo.run(data.drop(columns=[most_profitable])) # and print results print(result_without.summary()) result_without.plot(weights=False, assets=False, ucrp=True, logy=True); # We lost about 7 orders of wealth, but the results are more realistic now. Let's move on and try adding fees of 0.1% per transaction (we pay \\$1 for every \\$1000 of stocks bought or sold). # In[8]: result_without.fee = 0.001 print(result_without.summary()) result_without.plot(weights=False, assets=False, ucrp=True, logy=True) # Results still hold, although our Sharpe ratio decreased from 3.14 to 1.56 and annualized return from 466% to 109%. Now some of you trained in quantitative finance might start asking: "_Isn't there some [survivorship bias](http://en.wikipedia.org/wiki/Survivorship_bias)?_". Yes, it is. In fact, a huge one considering that we have almost 25 years of data and mean-reversion type of strategy. # # Testing Yahoo data # Let's see whether the algo works on recent data, too. First download closing prices of several (randomly chosen) stocks from Yahoo. # In[9]: from pandas_datareader import data as pdr import yfinance as yf yf.pdr_override() # load data from Yahoo yahoo_data_raw = pdr.get_data_yahoo(['MSFT', 'IBM', 'AAPL', 'GOOG'], start=dt.datetime(2005,1,1)) yahoo_data = yahoo_data_raw['Adj Close'] # plot normalized prices of these stocks (yahoo_data / yahoo_data.iloc[0,:]).plot() # Instead of using fixed parameters, we will test several `window` parameters with function `run_combination`. It the same as `run`, just use it as classmethod and use lists for combination of values. `run_combination` returns list of results which can be used similarly to `result`. # In[10]: list_result = algos.OLMAR.run_combination(yahoo_data, window=[3,5,10,15], eps=10) print(list_result.summary()) list_result.plot() # Since we don't know the best parameters in hindsight, we will invest equal money in each of them in the beginning and let them run. This is called _buy and hold_ strategy. Portfolio equities in `list_result` can be regarded as stock prices and used as an input for new algo (_buy and hold_ in this case). This way you can chain algorithms however you like, for example OLMAR on OLMAR, etc. # # To compare it with individual assets or uniform constant rebalanced portfolio, use parameters `assets` and `ucrp`. # In[11]: # run buy and hold on OLMAR results and show its equity together with original assets algos.BAH().run(list_result).plot(assets=True, weights=False, ucrp=True); # Ok, so that was enough for the start. There are plenty of other algorithms in module `algos` collected across research papers about online-portfolios including famous [Universal portfolio](http://en.wikipedia.org/wiki/Universal_portfolio_algorithm) by Thomas Cover. # # How to write your own algorithm # Entire package is actually pretty simple. Algorithms are subclasses of base `Algo` class and methods for reporting, plotting and analysing are built on top of this class. I will illustrate it on this mean-reversion strategy # # 1. use logarithm of price # 2. calculate difference $\delta_i$ between current price of $i$-th stock and its moving average of $n$ days # 3. if $\delta_i > 0$, assign zero portfolio weight $w_i = 0$ for $i$-th stock # 4. if $\delta_i < 0$, assign weight $w_i = -\delta_i$ for $i$-th stock # 5. normalize all weights so that $\sum w_i = 1$ # # The idea is that badly performing stocks will revert to its mean and have higher returns than those above their mean. Here is the complete code, comments should be self-explanatory. # In[12]: from universal.algo import Algo import numpy as np class MeanReversion(Algo): # use logarithm of prices PRICE_TYPE = 'log' def __init__(self, n): # length of moving average self.n = n # step function will be called after min_history days super().__init__(min_history=n) def init_weights(self, cols): # use zero weights for start return pd.Series(np.zeros(len(cols)), cols) def step(self, x, last_b, history): # calculate moving average ma = history.iloc[-self.n:].mean() # weights delta = x - ma w = np.maximum(-delta, 0.) # normalize so that they sum to 1 return w / sum(w) # That's all. Now let's try it on nyse data. # In[13]: mr = MeanReversion(n=20) result = mr.run(data) print(result.summary()) result.plot(assets=False, logy=True, weights=False, ucrp=True); # Not bad considering how simple that strategy is. Next step could be performance optimization. To profile your strategy, you can use function `profile` in `universal.tools` which profile the code using fantastic [line_profiler](http://pythonhosted.org/line_profiler/). After identifying the most critical parts of the code, you have two options. Either optimize your `step` function (using tools such as [weave](http://docs.scipy.org/doc/scipy/reference/tutorial/weave.html), [numba](http://numba.pydata.org/), [theano](http://deeplearning.net/software/theano/) or [cython](http://cython.org/)) or subclass `weights` method if your code could be vectorized easily (beware the forward bias!).