Nao Mimoto - Dept. of Statistics : The University of Akron
TS Class Web Page – R resource page
Conditional Mean: \(0\)
Conditional Variance : \(\sigma_t^2\)
(unconditional) Mean: \(0\)
(unconditional) Variance : \(\sigma_t^2\)
Daily Price of SP500 ETF (SPY) downloaded from Yahoo!
library(quantmod)
source('https://nmimoto.github.io/R/TS-00.txt')
getSymbols("SPY") #- SP500 download from Yahoo!
## [1] "SPY"
## [1] FALSE
## [1] TRUE
# Fit GARCH model
library(fGarch)
Y <- diff( log(SPY) )[-1] # remove the first diff for NA
Fit01 <- garchFit(~ garch(1,1), data=Y, cond.dist="norm", include.mean = FALSE, trace = FALSE)
Fit01
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~garch(1, 1), data = Y, cond.dist = "norm",
## include.mean = FALSE, trace = FALSE)
##
## Mean and Variance Equation:
## data ~ garch(1, 1)
## <environment: 0x7f82dc61aa70>
## [data = Y]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## omega alpha1 beta1
## 3.7887e-06 1.7598e-01 7.8776e-01
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## omega 3.789e-06 5.539e-07 6.840 7.91e-12 ***
## alpha1 1.760e-01 1.766e-02 9.966 < 2e-16 ***
## beta1 7.878e-01 1.814e-02 43.420 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 8736.457 normalized: 3.380982
##
## Description:
## Mon Apr 13 18:20:03 2020 by user:
## Fit01@fit$par # estimated parameters
## Fit01@residuals # this is not GARCH residuals! This is same as Y.
## Fit01@sigma.t # estimated sig_t
## Fit01@residuals/Fit1@sigma.t # this is the (standardized) GARCH residuals
Fit01@fit$ics # AIC and BIC are here
## AIC BIC SIC HQIC
## -6.759641 -6.752841 -6.759644 -6.757177
sigma.upper = xts(1.96*Fit01@sigma.t, order.by=index(Y))
sigma.lower = xts(-1.96*Fit01@sigma.t, order.by=index(Y))
plot( cbind(Y, sigma.upper, sigma.lower),
col=c("black", "red", "red"), lwd=c(2,1,1) )
plot( as.numeric( Y["2018::"]), type="h", lwd=2)
lines( as.numeric(sigma.upper["2018::"]), col="red")
lines( as.numeric(sigma.lower["2018::"]), col="red")
## B-L test H0: the series is uncorrelated
## M-L test H0: the square of the series is uncorrelated
## J-B test H0: the series came from Normal distribution
## SD : Standard Deviation of the series
## BL15 BL20 BL25 ML15 ML20 JB SD
## [1,] 0.4 0.271 0.063 0.463 0.74 0 0.998
GARCH(1,1)
\[\begin{align*} Y_t &= \sigma_t e_t \hspace{10mm} e_t \sim_{iid} N(0,1)\\\\ \sigma_t^2 &= \omega + \alpha Y_{t-1}^2 + \beta \sigma^2_{t-1} \end{align*}\]
\[\begin{align*} Y_t &= \sigma_t e_t \hspace{10mm} e_t \sim_{iid} N(0,1)\\\\ \sigma_t^2 &= \omega + \alpha Y_{t-1}^2 + \beta \sigma^2_{t-1} \end{align*}\]
[Conditonal distribution] \(=\) [Distribution of \(e_t\)]
Guessing correct distribution for \(e_t\) is very important in GARCH parameter estimation.
x <- seq(-10,10, .1)
#- Standardized t-distribution
plot(x, dstd(x, mean = 0, sd = 1, nu = 5), type="l", ylim=c(0,.5))
lines(x, dnorm(x, mean = 0, sd = 1), col="red" )
#- Generalized Error Distribution
plot(x, dged(x, mean = 0, sd = 1, nu = 4), type="l", ylim=c(0,.5))
lines(x, dnorm(x, mean = 0, sd=1) , col="red" )
#- Skewed-Standardized t-distribution
plot( x, dsstd(x, mean = 0, sd = 1, nu = 5, xi = 1.5), type="l", ylim=c(0,.5))
lines(x, dnorm(x, mean = 0, sd = 1), col="red" )
#- Skewed-Generalized Error Distribution
plot(x, dsged(x, mean = 0, sd = 1, nu = 5, xi = 1.5) , type="l", ylim=c(0,.5))
lines(x, dnorm(x, mean = 0, sd = 1), col="red" )
True parameter (.024, .1, .8)
True conditional distribution: Normal Estimated using: Normal
True conditional distribution: std(5) Estimated using: Normal
True conditional distribution: sged(skew=.7, shape=1.45) Estimated using: Normal
# Simulation showing how error distribution affect parameter estimation in GARCH(1,1) ---
# (Takes about 3min)
n <- 1000
itt <- 1000
theta <- c(.024, .1, .8)
set.seed(62192) #36621)
Param11 <- matrix(rep(0,3*itt), itt, 3)
for (i in 1:itt) {
spec <- garchSpec(model = list(omega=theta[1], alpha=theta[2], beta=theta[3]),
cond.dist="norm")
x <- garchSim(spec, n = n, extended=FALSE)
est2 <- garchFit(~ garch(1,1), data=x, cond.dist="norm", include.mean = FALSE, trace = FALSE)
Param11[i,] <- coef(est2)
}
Param12 <- matrix(rep(0,3*itt), itt, 3)
for (i in 1:itt) {
spec <- garchSpec(model = list(shape=5, omega=theta[1], alpha=theta[2], beta=theta[3]),
cond.dist="std")
x <- garchSim(spec, n = n, extended=FALSE)
est2 <- garchFit(~ garch(1,1), data=x, cond.dist="norm", include.mean = FALSE, trace = FALSE)
Param12[i,] <- coef(est2)
}
Param13 <- matrix(rep(0,3*itt), itt, 3)
for (i in 1:itt) {
spec <- garchSpec(model = list(skew=.9, shape=1.45, omega=theta[1], alpha=theta[2], beta=theta[3]),
cond.dist="sged")
x <- garchSim(spec, n = n, extended=FALSE)
est2 <- garchFit(~ garch(1,1), data=x, cond.dist="norm", include.mean = FALSE, trace = FALSE)
Param13[i,] <- coef(est2)
}
## Warning in sqrt(diag(fit$cvar)): NaNs produced
layout(matrix(1:9, 3, 3, byrow=T))
hist(Param11[,1], 10, xlim=c(0,.5)); hist(Param11[,2], 10, xlim=c(0,.5));
hist(Param11[,3], xlim=c(0.4,1), breaks=10)
hist(Param12[,1], 10, xlim=c(0,.5)); hist(Param12[,2], 10, xlim=c(0,.5));
hist(Param12[,3], xlim=c(0.4,1), breaks=10)
hist(Param13[,1], 10, xlim=c(0,.5)); hist(Param13[,2], 10, xlim=c(0,.5));
hist(Param13[,3], xlim=c(0.4,1), breaks=10)
P11 <- (Param11 - t(matrix(theta, 3, itt) ))^2
P12 <- (Param12 - t(matrix(theta, 3, itt) ))^2
P13 <- (Param13 - t(matrix(theta, 3, itt) ))^2
# Raw MSE
rbind(apply(P11, 2, mean),
apply(P12, 2, mean),
apply(P13, 2, mean) )
## [,1] [,2] [,3]
## [1,] 0.0001829513 0.0008560599 0.005964670
## [2,] 0.0003238745 0.0021516543 0.011084095
## [3,] 0.0002726883 0.0011342782 0.008536437
# Relative MSE
rbind(apply(P11, 2, mean)/apply(P11, 2, mean), # MSE1 / MSE1
apply(P12, 2, mean)/apply(P11, 2, mean), # MSE2 / MSE1
apply(P13, 2, mean)/apply(P11, 2, mean) ) # MSE3 / MSE1
## [,1] [,2] [,3]
## [1,] 1.000000 1.000000 1.000000
## [2,] 1.770277 2.513439 1.858291
## [3,] 1.490497 1.324999 1.431167
GARCH(1,1) is weakly stationary if \[ E( \log(\beta + \alpha e_t^2)) < 0 \]
This is satisfied if \[ \alpha + \beta < 1. \]
GARCH(1,1) \[\begin{align*} Y_t &= \sigma_t e_t \hspace{10mm} e_t \sim_{iid} N(0,1)\\\\ \sigma_t^2 &= \omega + \alpha Y_{t-1}^2 + \beta \sigma^2_{t-1} \end{align*}\]
Using observation \(\{Y_1, \ldots, Y_n\}\), the residuals are \[ \hat e_t = Y_t/ \hat \sigma_t \]
how can we get \(\hat \sigma_t\)?
Starting with the definition of GARCH(1,1),
\[\begin{align*} \sigma_t^2 &= \omega + \alpha Y_{t-1}^2 + \beta \sigma^2_{t-1} \\\\ &= \omega + \alpha Y_{t-1}^2 + \beta \Big(\omega + \alpha Y_{t-2}^2 + \beta \sigma^2_{t-2} \Big)\\ \\ &= \omega + \beta \omega + \alpha Y_{t-1}^2 + \beta \alpha Y_{t-2}^2 + \beta^2 \sigma^2_{t-2} \\ \\ &= \omega + \beta \omega + \alpha Y_{t-1}^2 + \beta \alpha Y_{t-2}^2 + \beta^2 \Big(\omega + \alpha Y_{t-3}^2 + \beta \sigma^2_{t-3} \Big) \\ \\ &= \omega + \beta \omega + \beta^2 \omega + \alpha Y_{t-1}^2 + \beta \alpha Y_{t-2}^2 + \beta^2 \alpha Y_{t-3}^2 + \beta^3 \sigma^2_{t-3} \end{align*}\]
Continuing, we get
\[\begin{align*} &= \omega (1 + \beta + \beta^2 + \cdots ) + \alpha \sum_{i=0}^k \beta^i Y_{t-1-i}^2 + \beta^{k+1} \sigma^2_{t-1-k} \\\\ &= \frac{\omega}{1-\beta} + \alpha \sum_{i=0}^\infty \beta^i Y_{t-1-i}^2 \end{align*}\]
That means if we keep going, we can write \[ \sigma_t^2 \hspace{3mm} = \hspace{3mm} \frac{\omega}{1-\beta} + \alpha \sum_{i=0}^\infty \beta^i Y_{t-1-i}^2 \]
We will use the truncated and estimated version of this, \[ \hat \sigma_t^2 \hspace{3mm} = \hspace{3mm} \frac{\hat \omega}{1- \hat \beta} \hspace{3mm} + \hspace{3mm} \hat \alpha \sum_{i=0}^{t-1} \hat \beta^i Y_{t-1-i}^2 \]
GARCH(1,1) says, \[ Y_t \hspace{3mm} = \hspace{3mm} \sigma_t e_t \]
Using observation \(\{Y_1, \ldots, Y_n\}\), the residuals are \[ \hat e_t = Y_t/ \hat \sigma_t \]
GARCH(1,1) \[\begin{align*} Y_t &= \sigma_t e_t \hspace{10mm} e_t \sim_{iid} N(0,1)\\\\ \sigma_t^2 &= \omega + \alpha Y_{t-1}^2 + \beta \sigma^2_{t-1} \end{align*}\]
Now have 1-step forecast of tomorrow’s volatility \(\sigma_{t+1}\), \[\begin{align*} \sigma_{t}^2(1) &= \hat \omega + \hat \alpha Y_{t}^2 + \hat \beta \hat \sigma^2_{t} \end{align*}\]
How well can GARCH predict \(\sigma_t\) of the future?
library(quantmod)
library(fGarch)
source('https://nmimoto.github.io/R/TS-00.txt')
theta = c(.5, .7, .2)
spec1 <- garchSpec(model = list(omega=theta[1], alpha=theta[2], beta=theta[3]), cond.dist="norm")
#- Generate GARCH
set.seed(66343)
X <- garchSim(spec1, n = 1000, extended=T) #extended=T: now Y has 3 columns: garch sigma eps
## Warning in if (extended) ans else ans[, "garch"]: the condition has length > 1
## and only the first element will be used
Y <- X[,1] #- simulated GARCH
sig.true <- X[,2] #- this is true sigma
plot(Y, main="Simulated GARCH")
#- Estimate GARCH
out1 <- garchFit(~ garch(1,1), data=Y, cond.dist="norm", include.mean = FALSE, trace = FALSE)
sig.estim <- xts(out1@sigma.t, order.by=index(Y))
#- compare true sigma vs estimated sigma
head( cbind(sig.true, sig.estim) )
## sigma sig.estim
## 2017-07-18 0.9844680 1.7773189
## 2017-07-19 0.8719402 1.0198346
## 2017-07-20 0.8528739 0.9322072
## 2017-07-21 0.9465185 1.0167695
## 2017-07-22 0.9430606 1.0033031
## 2017-07-23 1.1075998 1.1624516
plot(cbind(sig.estim["2019"], sig.true["2019"]), col=c("red", "black"),
main="True Sigma (Blk) vs Estimated (Red)")
#- Forecast sig.t for tomorrow (forecast sigma_t) by hand
w <- out1@fit$par[1]
a <- out1@fit$par[2]
b <- out1@fit$par[3]
sig.pred <- xts( sqrt( w + a*Y[1000]^2 + b*out1@sigma.t[1000]^2 ), order.by=index(Y[1000])+1)
#- Forecast sig.t (same as above)
sig.pred2 <- predict(out1, 1)
sig.pred2
## meanForecast meanError standardDeviation
## 1 0 1.061806 1.061806
#- Rolling 1-step prediction of sig.t
sig.pred1 <- numeric(0)
date.stp <- numeric(0)
for (i in 1:750){
T <- Y[(1:250)+(i-1)]
n <- length(T)
out1 <- garchFit(~ garch(1,1), data=T, cond.dist="norm", include.mean = FALSE, trace = FALSE)
sig.pred1[i] <- predict(out1)[1,3] #- sig.t prediction
date.stp[i] <- index(T[n])+1
}
sig.pred <- xts(sig.pred1, order.by=as.Date(date.stp))
#- Plot and compare estimated vs rolling predicted
plot(cbind(sig.pred, sig.estim, sig.true),
col=c("blue", "red", "black"),
main="sigma.t: True (Blk) vs Estim (Red) vs Pred(Blue)")
plot(cbind(sig.pred['2019'], sig.estim['2019'], sig.true['2019']),
col=c("blue", "red", "black"),
lwd=c(1,1,1),
main="sigma.t: True (Blk) vs Estim (Red) vs Pred(Blue)")
sigma.upper.t = xts( 1.96*sig.true, order.by=index(Y))
sigma.lower.t = xts(-1.96*sig.true, order.by=index(Y))
sigma.upper.e = xts( 1.96*sig.estim, order.by=index(sig.estim))
sigma.lower.e = xts(-1.96*sig.estim, order.by=index(sig.estim))
sigma.upper.p = xts( 1.96*sig.pred, order.by=index(sig.pred))
sigma.lower.p = xts(-1.96*sig.pred, order.by=index(sig.pred))
plot( cbind(Y, sigma.upper.t, sigma.lower.t),
col=c("black", "black", "black"), lwd=c(2,1,1) )
plot( cbind(Y, sigma.upper.t, sigma.lower.t,
sigma.upper.e, sigma.lower.e,
sigma.upper.p, sigma.lower.p),
col=c("black", "black", "black", "red", "red", "blue", "blue"),
lwd=c(2,1,1,1,1) )
plot( cbind(Y["2019"],
sigma.upper.t["2019"], sigma.lower.t["2019"],
sigma.upper.e["2019"], sigma.lower.e["2019"],
sigma.upper.p["2019"], sigma.lower.p["2019"]),
col=c("black", "black", "black", "red", "red", "blue", "blue"),
lwd=c(2,1,1,1,1) )
plot( as.numeric( Y["2019::"]), type="h", lwd=2,
xlab="diff( log(Y) )")
lines( as.numeric(sigma.upper.e["2019::"]), col="red")
lines( as.numeric(sigma.lower.e["2019::"]), col="red")
lines( as.numeric(sigma.upper.p["2019::"]), col="blue")
lines( as.numeric(sigma.lower.p["2019::"]), col="blue")
See how close estimated sigma and rolling predicted sigma, using real data.
library(quantmod) source(‘https://nmimoto.github.io/R/TS-00.txt’)
getSymbols(“SPY”) #- SP500 download from Yahoo! SPY <- Ad(SPY)[‘2010::’]
is.ts(SPY) # not ts object is.xts(SPY) # its xts object
plot( log(SPY) ) plot( diff( log(SPY) ) )
```
# Fit GARCH model
library(fGarch)
Y <- diff( log(SPY) )[-1] # remove the first diff for NA
Fit01 <- garchFit(~ garch(1,1), data=Y, cond.dist="norm", include.mean = FALSE, trace = FALSE)
Fit01
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~garch(1, 1), data = Y, cond.dist = "norm",
## include.mean = FALSE, trace = FALSE)
##
## Mean and Variance Equation:
## data ~ garch(1, 1)
## <environment: 0x7f82e3fb4238>
## [data = Y]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## omega alpha1 beta1
## 3.7887e-06 1.7598e-01 7.8776e-01
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## omega 3.789e-06 5.539e-07 6.840 7.91e-12 ***
## alpha1 1.760e-01 1.766e-02 9.966 < 2e-16 ***
## beta1 7.878e-01 1.814e-02 43.420 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 8736.457 normalized: 3.380982
##
## Description:
## Mon Apr 13 18:22:52 2020 by user:
sig.estim <- xts(Fit01@sigma.t, order.by=index(Y))
## Fit01@fit$par # estimated parameters
## Fit01@residuals # this is not GARCH residuals! This is same as Y.
## Fit01@sigma.t # estimated sig_t
## Fit01@residuals/Fit1@sigma.t # this is the (standardized) GARCH residuals
Fit01@fit$ics # AIC and BIC are here
## AIC BIC SIC HQIC
## -6.759641 -6.752841 -6.759644 -6.757177
## meanForecast meanError standardDeviation
## 1 0 1.937290 1.937290
## 2 0 1.934433 1.934433
## 3 0 1.932181 1.932181
## 4 0 1.930406 1.930406
## 5 0 1.929008 1.929008
## 6 0 1.927907 1.927907
## 7 0 1.927040 1.927040
## 8 0 1.926358 1.926358
## 9 0 1.925820 1.925820
## 10 0 1.925397 1.925397
## [1] 2584
#- Rolling 1-step prediction of sig.t
Y <- Y # Original data
window.size <- 250 # Window size for estimation
sig.pred1 <- numeric(0)
date.stp <- numeric(0)
for (i in 1:(length(Y)-window.size)){
T <- Y[(1:window.size)+(i-1)]
n <- length(T)
out1 <- garchFit(~ garch(1,1), data=T, cond.dist="norm", include.mean = FALSE, trace = FALSE)
sig.pred1[i] <- predict(out1)[1,3] #- sig.t prediction
date.stp[i] <- index(T[n])+1
}
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
## Warning in sqrt(diag(fit$cvar)): NaNs produced
sig.pred <- xts(sig.pred1, order.by=as.Date(date.stp))
#- Plot and compare estimated vs rolling predicted
plot(cbind(sig.estim['2016::2019'],
sig.pred[ '2016::2019']),
col=c("red", "blue"), lwd=c(1,1,1),
main="sigma.t: Estim (Red) vs Pred(Blue)")
plot(cbind(sig.estim['2019::'],
sig.pred[ '2019::']),
col=c("red", "blue"), lwd=c(1,1,1),
main="sigma.t: Estim (Red) vs Pred(Blue)")
sigma.upper.e = xts( 1.96*sig.estim, order.by=index(sig.estim))
sigma.lower.e = xts(-1.96*sig.estim, order.by=index(sig.estim))
sigma.upper.p = xts( 1.96*sig.pred, order.by=index(sig.pred))
sigma.lower.p = xts(-1.96*sig.pred, order.by=index(sig.pred))
plot( cbind(Y[ "2017::"],
sigma.upper.e["2017::"], sigma.lower.e["2017::"],
sigma.upper.p["2017::"], sigma.lower.p["2017::"]),
main="Y with 95perc daily PI",
col=c("black", "red", "red", "blue", "blue"), lwd=c(2,1,1,1,1) )
plot( cbind(Y[ "2019::"],
sigma.upper.e["2019::"], sigma.lower.e["2019::"],
sigma.upper.p["2019::"], sigma.lower.p["2019::"]),
main="Y with 95perc daily PI",
col=c("black", "red", "red", "blue", "blue"), lwd=c(2,1,1,1,1) )
plot( as.numeric( Y["2019::"]), type="h", lwd=2, ylim=c(-.12, .12),
xlab="diff( log(Y) )")
lines( as.numeric(sigma.upper.e["2019::"]), col="red")
lines( as.numeric(sigma.lower.e["2019::"]), col="red")
lines( as.numeric(sigma.upper.p["2019::"]), col="blue")
lines( as.numeric(sigma.lower.p["2019::"]), col="blue")
Cryer : CREF stock values
Shumway : NYSE us GNP
Cowpertwait : SP500, Southern hem Temp.