Skip to content

Commit bca496f

Browse files
committed
first commit
0 parents  commit bca496f

14 files changed

+1199
-0
lines changed

acf_pacf_plot.R

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
# Code adapted from:
2+
# "Creating an Autocorrelation Plot in ggplot2"
3+
# Peter DeWitt
4+
# file: files.meetup.com/1696476/ACFinGGPLOT2Presentation.pdf
5+
6+
7+
qacf <- function(x,
8+
fun=c("acf","pacf"),
9+
transformation="level",
10+
conf.level = 0.95,
11+
max.lag = 20,
12+
min.lag = 0,
13+
title = "",
14+
legend = 1)
15+
{
16+
if ((fun != "acf") & (fun != "pacf"))
17+
{
18+
stop("fun needs to be either 'acf' or 'pacf'")
19+
}
20+
21+
if ((transformation != "level") & (transformation != "square") & (transformation != "abs"))
22+
{
23+
stop("Wrong transformation. Allowed: 'level', 'square', 'abs'.")
24+
}
25+
26+
# Apply transformation to data
27+
if (transformation == "square")
28+
{
29+
x <- x^2
30+
}
31+
32+
if (transformation == "abs")
33+
{
34+
x <- abs(x)
35+
}
36+
37+
38+
39+
typefun <- match.fun(fun)
40+
41+
# Line for confidence interval
42+
ciline <- qnorm((1 - conf.level)/2)/sqrt(length(x))
43+
44+
# ACF or PACF
45+
bacf <- typefun(x, plot = FALSE, lag.max = max.lag)
46+
cfdata <- with(bacf, data.frame(lag, acf))
47+
48+
49+
if (min.lag > 0)
50+
{
51+
cfdata <- cfdata[-seq(1, min.lag), ]
52+
}
53+
54+
55+
significant <- (abs(cfdata[, 2]) > abs(ciline))^2
56+
cfdata <- cbind(cfdata, significant)
57+
ylabel <- "ACF"
58+
if (fun == "pacf")
59+
{
60+
ylabel <- "Partial ACF"
61+
}
62+
63+
# "Stupid" way of making sure legend shows all factors
64+
# Cols of cfdata: lag, acf, significant
65+
lb <- length(cfdata$lag)
66+
cfdata[lb+1,] <- c(cfdata[lb,1]+1, 0, 0)
67+
cfdata[lb+2,] <- c(cfdata[lb,1]+2, 0, 1)
68+
69+
q <- qplot(x=lag, y=acf,
70+
data = cfdata,
71+
geom = "bar",
72+
stat = "identity",
73+
position = "identity",
74+
ylab = ylabel,
75+
xlab = "Lags",
76+
main = title,
77+
fill = factor(significant))
78+
q <- q + geom_hline(yintercept = -ciline, color = "blue", size = 0.5)
79+
q <- q + geom_hline(yintercept = ciline, color = "blue", size = 0.5)
80+
q <- q + geom_hline(yintercept = 0, color = "red", size = 0.3)
81+
q <- q + scale_fill_hue(name = paste("Significant at the\n", conf.level, "level"),
82+
breaks = 0:1, labels = c("False", "True"))
83+
q <- q + theme_bw() + xlim(cfdata[1,"lag"] - 0.5, cfdata[lb,"lag"]+0.5)
84+
85+
if (legend == 0){
86+
q <- q + theme(legend.position = "none")
87+
}
88+
return(q)
89+
}
90+
91+
####
92+
# dats <- rnorm(100)
93+
# dplot <- qacf(x=dats, fun="pacf", transformation="level")

all_tests.R

+58
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,58 @@
1+
# Load the test functions
2+
source("./var_tests.R")
3+
source("./bootstrap_test.R")
4+
source("./v_tests.R")
5+
6+
7+
# Use to extract correct probability for index in loop below
8+
prob.idxs <- seq(from=1, to=2*nq*nmodels, by=(2*nmodels)) + 2*nmodels
9+
10+
# Matrices to store test results
11+
pvals.var <- mat.or.vec(nr=3, nc=ncol(data.exres))
12+
pvals.boot <- mat.or.vec(nr=1, nc=ncol(data.exres))
13+
vals.vtest <- mat.or.vec(nr=3, nc=ncol(data.exres))
14+
15+
# Loop over each estimation series
16+
for (j in 1:ncol(data.exres)){
17+
18+
# Index in qs corresponding to current j
19+
for (k in prob.idxs){
20+
if (j < k){
21+
prob.idx <- which(k == prob.idxs)
22+
break
23+
}
24+
}
25+
26+
##############################################################################################
27+
### Define data to be used
28+
# Probability corresponding to current data
29+
current.prob <- qs[prob.idx]
30+
31+
# Data used for bootstrap test
32+
current.exres <- data.exres[,j]
33+
current.exres <- current.exres[as.logical(data.break[,j])]
34+
current.exres <- current.exres[!is.na(current.exres)]
35+
36+
37+
current.diff <- data.diff[,j]
38+
current.diff <- current.diff[!is.na(current.diff)]
39+
40+
# If any NA in current data, it better line up with NA in the breaks...
41+
current.break <- data.break[,j]
42+
current.break <- as.logical(current.break[!is.na(current.break)])
43+
##############################################################################################
44+
45+
##############################################################################################
46+
### Run tests, store results
47+
# Run tests on VaR-breaks
48+
pvals.var[, j] <- mcsim.test(hit.sequence=current.break, hit.probability=(1-current.prob))
49+
50+
# Run bootstrap test on exceedance residuals
51+
pvals.boot[j] <- bootstrap.test(current.exres)
52+
53+
# Run V-tests
54+
vals.vtest[, j] <- v.tests(diff.series=current.diff, breaks=current.break, q=current.prob)
55+
##############################################################################################
56+
57+
print(j)
58+
}

example_data.csv

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
date,price
2+
1/2/1980,283.8
3+
1/3/1980,284.9
4+
1/4/1980,284.6
5+
1/7/1980,280.3
6+
1/8/1980,278.8
7+
1/9/1980,276.8

garch_specs.R

+64
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
# Which solver to use
2+
slvr <- "hybrid"
3+
slvr.ctrl <- list(n.restarts=3, parallel=TRUE, pkg="multicore", cores=4)
4+
5+
6+
7+
# All models have an AR(1) process to model the mean
8+
9+
######################################### GARCH(1,1) ###################################
10+
### GARCH(1,1) with normal innovations
11+
garch.spec.norm <- ugarchspec(variance.model = list(model="sGARCH", garchOrder=c(1,1)),
12+
mean.model = list(armaOrder=c(1,0)),
13+
distribution.model="norm")
14+
15+
### GARCH(1,1) with t innovations, 4 degrees of freedom
16+
garch.spec.std <- ugarchspec(variance.model = list(model="sGARCH", garchOrder=c(1,1)),
17+
mean.model = list(armaOrder=c(1,0)),
18+
distribution.model="std",
19+
fixed.pars = list(shape=df))
20+
21+
### GARCH(1,1) with skew-t innovations
22+
garch.spec.sstd <- ugarchspec(variance.model = list(model="sGARCH", garchOrder=c(1,1)),
23+
mean.model = list(armaOrder=c(1,0)),
24+
distribution.model="sstd",
25+
fixed.pars = list(shape=df))
26+
#########################################################################################
27+
28+
29+
######################################### GJR-GARCH(1,1) ##############################
30+
### GJR-GARCH(1,1) with normal innovations
31+
gjr.spec.norm <- ugarchspec(variance.model = list(model="gjrGARCH", garchOrder=c(1,1)),
32+
mean.model = list(armaOrder=c(1,0)),
33+
distribution.model="norm")
34+
35+
### GJR-GARCH(1,1) with t innovations
36+
gjr.spec.std <- ugarchspec(variance.model = list(model="gjrGARCH", garchOrder=c(1,1)),
37+
mean.model = list(armaOrder=c(1,0)),
38+
distribution.model="std",
39+
fixed.pars = list(shape=df))
40+
41+
#########################################################################################
42+
43+
######################################### FAMILY-GARCH(1,1) ###########################
44+
### family-GARCH(1,1) with normal innovations
45+
fam.spec.norm <- ugarchspec(variance.model = list(model="fGARCH", garchOrder=c(1,1), submodel="ALLGARCH"),
46+
mean.model = list(armaOrder=c(1,0)),
47+
distribution.model="norm")
48+
49+
fam.spec.std <- ugarchspec(variance.model = list(model="fGARCH", garchOrder=c(1,1), submodel="ALLGARCH"),
50+
mean.model = list(armaOrder=c(1,0)),
51+
distribution.model="std",
52+
fixed.pars = list(shape=df))
53+
#########################################################################################
54+
55+
######################################### component-GARCH(1,1) ########################
56+
cs.spec.norm <- ugarchspec(variance.model = list(model="csGARCH", garchOrder=c(1,1)),
57+
mean.model = list(armaOrder=c(1,0)),
58+
distribution.model="norm")
59+
60+
cs.spec.std <- ugarchspec(variance.model = list(model="csGARCH", garchOrder=c(1,1)),
61+
mean.model = list(armaOrder=c(1,0)),
62+
distribution.model="std",
63+
fixed.pars = list(shape=df))
64+
#########################################################################################

get_thresholds.R

+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
setwd("~/your/path/here")
2+
3+
# Load packages to be used
4+
library(timeSeries)
5+
library(fGarch)
6+
library(rugarch)
7+
library(ismev)
8+
library(boot)
9+
library(doParallel)
10+
registerDoParallel(cores = 4)
11+
# library(doMC)
12+
# registerDoMC(cores=4)
13+
library(gdata)
14+
15+
# Load variables and helper functions
16+
source("./variable_definitions.R")
17+
source("./garch_specs.R")
18+
source("./threshold_choice.R")
19+
source("./pot_threshold.R")
20+
21+
22+
# Note! Only run one data set at a time! I.e. length(dsets)==1
23+
24+
for (dset in dsets){
25+
dset.name <- paste("./", dset, ".csv", sep="")
26+
dataset <-read.csv(dset.name, header = TRUE)
27+
28+
29+
dataset <- timeSeries::as.timeSeries(dataset, FinCenter = "GMT")
30+
31+
# If testing on subset of data
32+
# dataset <- dataset[1:1050,]
33+
34+
# Convert price series to loss series
35+
dataset <- -returns(dataset, method = "continuous")
36+
37+
length.dataset <- length(dataset[, 1])
38+
39+
40+
# Normalize entire dataset to have variance one
41+
dataset.sd <- apply(dataset, 2, sd)
42+
dataset <- (dataset / dataset.sd)
43+
44+
loop.seq <- seq(from=1, to=(length.dataset - n), by=500)
45+
46+
thresholds <- mat.or.vec(nr=length(loop.seq), nc=nmodels)
47+
48+
t.counter <- 1
49+
50+
#
51+
ptime <- system.time({
52+
for (i in loop.seq)
53+
{
54+
data.window <- dataset[i:(n-1+i), 1]
55+
56+
# Produce threshold for each model
57+
for (j in 1:nmodels){
58+
thresholds[t.counter, j] <- pot.threshold(model=models[[j]][1], distribution=models[[j]][2])
59+
print(paste("(", as.character(t.counter), as.character(j), "): ",
60+
as.character(thresholds[t.counter, j])))
61+
}
62+
cat(paste(Sys.time()),"\n"); flush(stdout())
63+
64+
t.counter <- t.counter + 1
65+
}
66+
})[3]
67+
68+
# Remove all variables except threshold, ptime
69+
keep(thresholds, dset, ptime, sure=TRUE)
70+
71+
# Save results
72+
save.image(file=paste("./", dset, "_thresholds",".RData", sep=""))
73+
rm(dset)
74+
}

0 commit comments

Comments
 (0)