In a portfolio with 2 assets, total portfolio risk is calculated by
In a portfolio with N assets, total portfolio risk is calculated by
To calculate the risk contribution of one asset, take the contribution of asset i's variance, $w_i^2 \sigma_i^2$, and add the part of the overlapping components of the covariance where asset i is involved: $\sum_{j \neq i}^{N} w_i w_j \sigma_{ij}$
To get the proportion of asset i's risk contribution to the total portfolio risk, divide its risk contribution by the total portfolio risk, $\sigma_p^2$
def risk_contribution(w,cov):
"""
Compute the contributions to risk of the constituents of a portfolio, given a set of portfolio weights and a covariance matrix
"""
total_portfolio_var = portfolio_vol(w,cov)**2
# Marginal contribution of each constituent
marginal_contrib = cov@w
risk_contrib = np.multiply(marginal_contrib,w.T)/total_portfolio_var
return risk_contrib
If you calculate the risk contribution of each asset in your portfolio, you can find the effective number of correlated bets (ENCB) with 1 divided by the sum of the squared risk contributions.
$\text{ENCB} = (\sum_{i=1}^N p_i^2)^{-1}$
From this formula, we can see that ENCB is maximized when all the risk contributions are equal. A risk parity portfolio is a portfolio where all risk contributions are equal.
There's no analytical expression to calculate the weights for the risk parity portfolio. To calculate them, you need to use a numerical optimization algorithm:
def target_risk_contributions(target_risk, cov):
"""
Returns the weights of the portfolio that gives you the weights such
that the contributions to portfolio risk are as close as possible to
the target_risk, given the covariance matrix
"""
n = cov.shape[0]
init_guess = np.repeat(1/n, n)
bounds = ((0.0, 1.0),) * n # an N-tuple of 2-tuples!
# construct the constraints
weights_sum_to_1 = {'type': 'eq',
'fun': lambda weights: np.sum(weights) - 1
}
def msd_risk(weights, target_risk, cov):
"""
Returns the Mean Squared Difference in risk contributions
between weights and target_risk
"""
w_contribs = risk_contribution(weights, cov)
return ((w_contribs-target_risk)**2).sum()
weights = minimize(msd_risk, init_guess,
args=(target_risk, cov), method='SLSQP',
options={'disp': False},
constraints=(weights_sum_to_1,),
bounds=bounds)
return weights.x
def equal_risk_contributions(cov):
"""
Returns the weights of the portfolio that equalizes the contributions
of the constituents based on the given covariance matrix
"""
n = cov.shape[0]
return target_risk_contributions(target_risk=np.repeat(1/n,n), cov=cov)
def weight_erc(r, cov_estimator=sample_cov, **kwargs):
"""
Produces the weights of the ERC portfolio given a covariance matrix of the returns
"""
est_cov = cov_estimator(r, **kwargs)
return equal_risk_contributions(est_cov)
Use the 30 industry returns dataset from 1990 to 2015. Use cap-weights from 1990-01 as the initial weights.
import edhec_risk_kit as erk
import numpy as np
import pandas as pd
ind_rets = erk.get_ind_returns(n_inds=30)['1990':'2018']
ind_mcaps = erk.get_ind_market_caps(n_inds=30, weights=True)['1990':'2018']
cw_weights = erk.weight_cw(r=ind_rets['1990-01'], cap_weights=ind_mcaps['1990-1'])
cov = ind_rets.cov()
cw_rc = erk.risk_contribution(w=cw_weights, cov=cov)
cw_rc.idxmax()
cw_rc.sort_values(inplace=True, ascending=False)
cw_rc.head()
cw_rc.tail()
ew_weights = erk.weight_ew(r=ind_rets['1990-01'], cap_weights=ind_mcaps['1990-1'])
ew_rc = erk.risk_contribution(w=ew_weights, cov=cov)
ew_rc.sort_values(inplace=True, ascending=False)
ew_rc.head()
ew_rc.tail()
print('Max risk contribution of a single asset in the cap-weighted portfolio:', cw_rc.max())
print('Max risk contribution of a single asset in the equal-weighted portfolio:', ew_rc.max())
weights_erc = pd.Series(erk.weight_erc(r=ind_rets['1990':'2014']), index=ind_rets.columns)
weights_erc.sort_values(ascending=False, inplace=True)
# highest allocation in ERC portfolio
weights_erc.head()
# lowest allocation in ERC portfolio
weights_erc.tail()