He estado intentando utilizar la Aproximación de Heaviside para una opción de cesta simple de modo que pueda resolver para Gammas con AAD (Diferenciación Automática Adjunta). Esta rutina suaviza la función de pago frente a una función máxima, por lo que el pago es continuo en lugar de discreto - es una versión suave de la función escalón. Creo que he llamado correctamente a la función de aproximación de Heaviside, pero podría haber cometido un pequeño error. Usted puede ver la línea en el código: payoff = HeavisideApprox(payoff, np.sum(F0*weights*vols*np.sqrt(T)* 0.05))* payoff
Mis preguntas son 1) ¿He aplicado correctamente Heaviside? 2) ¿Es aceptable la diferencia de Gammas entre AAD y FD? 3) He tenido que multiplicar los Gammas de AAD por los pesos para que coincidan con FD, y no estoy seguro de si otras griegas necesitan que se haga eso también (de 2º orden quizás). ¡Cualquier sugerencia es apreciada! Código sigue:
import autograd.numpy as np
from autograd import elementwise_grad as egrad # egrad handles multiple sets of inputs
def Dirac(x):
return np.sin((1+np.sign(x))/2*np.pi)
def HeavisideApprox(x, epsilon):
p =0.5*(np.sign(x+epsilon) - np.sign(x - epsilon)) - 1/2* Dirac(x+epsilon) + 1/2* Dirac(x - epsilon)
p = p* (0.5+ 0.5* np.sin(x/2/epsilon* np.pi))
p = p+ (1+ np.sign(x - epsilon))/2 - 0.5* Dirac(x - epsilon)
return p
def MC_basket(F0, vols, K, T, IR, corr, weights, trials, steps, maximum=False):
# simple Euro option if steps=1, or (forward) Asian with more steps
np.random.seed(123)
eigvals, eigvects = np.linalg.eigh(corr)
eigvals[eigvals < 0.00001] = 0.00001
V = np.real(eigvects.dot(np.sqrt(np.diag(eigvals))))
randnums = np.random.randn(steps, F0.shape[0], trials) #3D Asian
randnumscorr = V.dot(randnums).T
paths = F0 *np.exp((-0.5*vols*vols)*T + randnumscorr*vols*np.sqrt(T))
paths = np.mean(paths, axis=1) # average aross steps #3D Asian
if maximum == False:
payoff = np.sum(paths*weights, axis=1)-K
payoff = HeavisideApprox(payoff, np.sum(F0*weights*vols*np.sqrt(T)* 0.05))* payoff
return np.mean(payoff)* np.exp(-IR*T)
else:
# normal max function, does not work with AAD Gamma
payoff = np.mean(np.maximum(np.sum(paths*weights, axis=1)-K,0)) * np.exp(-IR*T)
return payoff
def Greeks(F0, vols, K, T, IR, corr, weights, trials, steps=None):
# setup gradient functions to evaluate with AAD
gradient_func = egrad(MC_basket, (0, 1, 3, 4)) # tuple specifies inputs we want to differentiate
gradient_func2 = egrad(egrad(MC_basket, (0))) # second derivative egrad(egrad())
# solve for Greeks
delta, vega, theta, rho = gradient_func(F0, vols, K, T, IR, corr, weights, trials, steps)
gamma = weights * gradient_func2(F0, vols, K, T, IR, corr, weights, trials, steps)
# standardize Greeks for normal output
theta /= -365
vega /= 100
rho /= 100
return delta, gamma, vega, theta, rho
corr = np.array([[1,0.9,0.8],[0.9,1,0.94],[0.8,0.9,1]])
F0 = np.array([12.0,10.0,8.0])
weights = np.array([0.6, 0.3, 0.1])
vols = np.array([0.1,0.2,0.3])
K = 12
T = 0.5
IR = 0.03
steps = 1
trials = 100000
MCb = MC_basket(F0, vols, K, T, IR, corr, weights, trials, steps)
delta, gamma, vega, theta, rho = Greeks(F0, vols, K, T, IR, corr, weights, trials, steps)
print("\n[HEAVISIDE]\n\nMC basket price:", round(MCb,3), "\ndeltas AAD:", np.round(delta,3), "\ngammas AAD:", np.round(gamma,3))#, "\nvegas:", np.round(vega,3), "\ntheta:", np.round(theta,3), "\nrho:", np.round(rho,3))
bump = 0.005
# calculate FD delta and gamma using Heaviside for comparison with AAD
gammaFD = np.zeros((len(F0),))
deltaFD = np.zeros((len(F0),))
for i in range(0,F0.shape[0]):
MCb_up = np.zeros((len(F0),))
MCb_down = np.zeros((len(F0),))
MCb_up[i] = bump
MCb_down[i] = -bump
# percentage bumps
MCb_up[i] = MC_basket((MCb_up+1)*F0, vols, K, T, IR, corr, weights, trials, steps)
MCb_down[i] = MC_basket((MCb_down+1)*F0, vols, K, T, IR, corr, weights, trials, steps)
gammaFD[i] = (MCb_up[i] - 2*MCb + MCb_down[i]) / (bump**2*F0[i]**2)
deltaFD[i] = (MCb_up[i] - MCb_down[i]) / (2*bump*F0[i])
print("\ndeltas FD:",deltaFD.round(3))
print("gammas FD:", gammaFD.round(3))
# calculate FD delta and gamma using the max payoff function
MCb = MC_basket(F0, vols, K, T, IR, corr, weights, trials, steps, maximum=True)
gammaFD = np.zeros((len(F0),))
deltaFD = np.zeros((len(F0),))
for i in range(0,F0.shape[0]):
MCb_up = np.zeros((len(F0),))
MCb_down = np.zeros((len(F0),))
MCb_up[i] = bump
MCb_down[i] = -bump
# percentage bumps
MCb_up[i] = MC_basket((MCb_up+1)*F0, vols, K, T, IR, corr, weights, trials, steps, maximum=True)
MCb_down[i] = MC_basket((MCb_down+1)*F0, vols, K, T, IR, corr, weights, trials, steps, maximum=True)
gammaFD[i] = (MCb_up[i] - 2*MCb + MCb_down[i]) / (bump**2*F0[i]**2)
deltaFD[i] = (MCb_up[i] - MCb_down[i]) / (2*bump*F0[i])
print("\n[MAX]\n\nMC basket price:",MCb.round(3))
print("deltas FD:",deltaFD.round(3))
print("gammas FD:", gammaFD.round(3))
Salidas:
[HEAVISIDE]
MC basket price: 0.109
deltas AAD: [0.11 0.061 0.022]
gammas AAD: [0.09 0.024 0.003]
deltas FD: [0.11 0.061 0.022]
gammas FD: [0.087 0.025 0.003]
[MAX]
MC basket price: 0.109
deltas FD: [0.11 0.061 0.022]
gammas FD: [0.087 0.025 0.003]