In this post I will calculate Value At Risk for 1 stock using python. I will do the calculations normal returns and log normal returns and later on I will calculatee the Value at Risk for a portfolio of values.
In [1]:
# VAR AT RISK CALCULATION FOR 1 STOCK
# Importing libraries
# Data manipulation
import numpy as np
import pandas as pd
# Visualization
import matplotlib.pyplot as plt
import seaborn
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
# Statistical calculation
from scipy.stats import norm
# Data reading
import pandas_datareader.data as web
import datetime
# Tabular data output
from tabulate import tabulate
VaR Calculation with Variance and Covariance method for 1 stock, normal returns and log normal¶
In [2]:
# I will calculate VaR for Amazon
# I am going to use quandl for retrieving the data as data source
symbol='AMZN'
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime(2019, 1, 1)
df = web.DataReader(symbol, 'quandl', start, end)
In [3]:
df.head(5)
Out[3]:
In [4]:
# I will calculate daily returns using the value adjusted at closing date
df = df[['AdjClose']]
df['returns'] = df.AdjClose.pct_change()
df['log_returns'] = np.log(df.AdjClose/df.AdjClose.shift(1))
df.head(5)
Out[4]:
In [5]:
# As we have only 1 stock in the portfolio, we can calculate Var straigh away.
# If we had had several values in the portfolio, we would have needed to know the weights
# and perform further calculations using the covariance as we will see later.
# For know we are doing it the easy way only with 1 stock
# We caculate the mean and the standard deviation for the daily returns
mean_normal = np.mean(df['returns'])
std_dev_normal = np.std(df['returns'])
mean_lognormal = np.mean(df['log_returns'])
std_dev_lognormal = np.std(df['log_returns'])
print(mean_normal)
print(std_dev_normal)
print(mean_lognormal)
print(std_dev_lognormal)
In [6]:
# Let's print the distribution of daily normal returns
df['returns'].hist(bins=40, normed=True, histtype='stepfilled', alpha=0.5)
x = np.linspace(mean_normal - 3*std_dev_normal, mean_normal + 3*std_dev_normal, 100)
# on green we can see the perfect normal distribution
# on the histogram we can see the actual distribution
# Clearly the normal returns does not follow a normal distribution, for instance the tails are very fat
# Later on we will repeat the calculation with the log normal returns
plt.plot(x,mlab.normpdf(x, mean_normal, std_dev_normal),"g")
plt.savefig('Value at Risk')
In [7]:
# Lets print the distribution of daily lognormal returns
df['log_returns'].hist(bins=40, normed=True, histtype='stepfilled', alpha=0.5)
x = np.linspace(mean_lognormal - 3*std_dev_lognormal, mean_lognormal + 3*std_dev_lognormal, 100)
# on green we can see the perfect normal distribution
# on the histogram we can see the actual distribution
plt.plot(x,mlab.normpdf(x, mean_lognormal, std_dev_lognormal),"g")
plt.show()
In [8]:
# calculating the value at risk, 1 day for AMZN is straightforward
# we can calculate it for different confidence levels, that assess the
# certainty with which we would lost the Value at Risk
# we are going to do this calculation with normal returns
# we will repeat the same calculation but using the log normal returns,
# and then we will calculate the Var for a entire portfolio using the Variance - Covariance Approach
# but using log normal returns
VaR_90 = norm.ppf(1-0.9, mean_normal, std_dev_normal)
VaR_95 = norm.ppf(1-0.95, mean_normal, std_dev_normal)
VaR_99 = norm.ppf(1-0.99, mean_normal, std_dev_normal)
print tabulate([['90%', VaR_90], ['95%', VaR_95], ["99%", VaR_99]],
headers=['Confidence Level', 'Value at Risk'])
In [9]:
# Calculating the value at risk, 1 day for AMZN with log normal returns
VaR_90 = norm.ppf(1-0.9, mean_lognormal, std_dev_lognormal)
VaR_95 = norm.ppf(1-0.95, mean_lognormal, std_dev_lognormal)
VaR_99 = norm.ppf(1-0.99, mean_lognormal, std_dev_lognormal)
print tabulate([['90%', VaR_90], ['95%', VaR_95], ["99%", VaR_99]],
headers=['Confidence Level', 'Value at Risk'])
VaR Calculation with Historical data (quantile) for 1 stock, normal returns¶
In [10]:
# We are going to calculate the Value at Risk with the frequency with which the returns appear
# to do this we will sort the returns and select the quantiles that belong to the confidence levels.
df = df.dropna()
df.sort_values('returns', inplace= True, ascending = True)
plt.hist(df.returns, bins=40)
plt.xlabel('Returns')
plt.ylabel('Fequency')
plt.grid(True)
plt.show()
In [11]:
# we will select the quantile 10%, 5% and 1% for the normal returns
VaR_90= df['returns'].quantile(0.1)
VaR_95= df['returns'].quantile(0.05)
VaR_99= df['returns'].quantile(0.01)
print tabulate([['90%', VaR_90], ['95%', VaR_95], ["99%", VaR_99]], headers=['Confidence Level', 'Value at Risk'])
VaR Calculation with Historical data (quantile) for 1 stock, log normal returns¶
In [12]:
# We are going to calculate the Value at Risk with the frequency with which the returns appear
# to do this we will sort thee returns and select the quantiles that belong to the confidence levels.
df = df.dropna()
df.sort_values('log_returns', inplace= True, ascending = True)
plt.hist(df.returns, bins=40)
plt.xlabel('Log_Returns')
plt.ylabel('Fequency')
plt.grid(True)
plt.show()
In [13]:
# we will select the quantile 10%, 5% and 1% for the log normal returns
VaR_90= df['log_returns'].quantile(0.1)
VaR_95= df['log_returns'].quantile(0.05)
VaR_99= df['log_returns'].quantile(0.01)
print tabulate([['90%', VaR_90], ['95%', VaR_95], ["99%", VaR_99]], headers=['Confidence Level', 'Value at Risk'])
In [14]:
# As we can see, the normal returns understimate the losses
VaR Calculation for a portfolio with log normal returns¶
In [15]:
# lets calculate the value at risk for a portfolio made of 50% AMZN and 50% TSLA stocks
tickers = ['AMZN', 'TSLA']
start = datetime.datetime(2016, 1, 1)
end = datetime.datetime(2019, 1, 1)
portfolio = pd.DataFrame()
for t in tickers:
portfolio[t] = web.DataReader(t,'quandl', start, end)['AdjClose']
In [16]:
portfolio.head(5)
Out[16]:
In [17]:
# lets calculate the log normal returns
portfolio_returns = np.log(portfolio / portfolio.shift(1))
portfolio_returns.head(5)
Out[17]:
In [18]:
#Let's calculate annual returns and annual standard deviation for AMZN
print(portfolio['AMZN'].mean(),
portfolio['AMZN'].mean() * 250,
portfolio['AMZN'].std()** 0.5,
portfolio['AMZN'].std() * 250 ** 0.5)
In [19]:
#Lets calculate annual returns and annual standard deviation for TSLA
print(portfolio['TSLA'].mean(),
portfolio['TSLA'].mean() * 250,
portfolio['TSLA'].std()** 0.5,
portfolio['TSLA'].std() * 250 ** 0.5)
In [20]:
#Let's calculate the covariance matrix
cov_matrix = portfolio_returns.cov()
cov_matrix
Out[20]:
In [21]:
#Let's calculate the anual covariance matrix
cov_matrix = portfolio_returns.cov()*250
cov_matrix
Out[21]:
In [22]:
#let's calculate how the stocks are correlated
corr_matrix = portfolio_returns.corr()
corr_matrix
Out[22]:
In [23]:
#let's calculate the portfolio risk
# we assume that our portfolio is made of 50% AMZN and 50% TSLA
weights = np.array([0.5, 0.5])
In [24]:
#First we calculate the portfolio variance
pfolio_var = np.dot(weights.T, np.dot(portfolio_returns.cov() * 250, weights))
pfolio_var
Out[24]:
In [25]:
# As the variance is the square o the standard deviation
# we calculate the standard deviation or volatility or risk
pfolio_vol = (np.dot(weights.T, np.dot(portfolio_returns.cov() * 250, weights))) ** 0.5
pfolio_vol
Out[25]:
In [26]:
# so the annual risk of our portofolio , 1 year Var
print (str(round(pfolio_vol, 5) * 100) + ' %')
In [27]:
# so the annual risk of our portofolio , 1 year Var, 95%
print (str(round(pfolio_vol*1.65, 5) * 100) + ' %')
In [28]:
# so the annual risk of our portofolio , 1 year Var, 99%
print (str(round(pfolio_vol*2.33, 5) * 100) + ' %')
In [ ]:
In [ ]: