This vignette visualizes (log) likelihood functions of Archimedean
copulas, some of which are numerically challenging to compute. Because
of this computational challenge, we also check for equivalence of some
of the several computational methods, testing for numerical
near-equality using all.equal(L1, L2)
.
We start by defining the following auxiliary functions.
##' @title [m]inus Log-Likelihood for Archimedean Copulas ("fast version")
##' @param theta parameter (length 1 for our current families)
##' @param acop Archimedean copula (of class "acopula")
##' @param u data matrix n x d
##' @param n.MC if > 0 MC is applied with sample size equal to n.MC; otherwise,
##' the exact formula is used
##' @param ... potential further arguments, passed to <acop> @dacopula()
##' @return negative log-likelihood
##' @author Martin Maechler (Marius originally)
mLogL <- function(theta, acop, u, n.MC=0, ...) { # -(log-likelihood)
-sum(acop@dacopula(u, theta, n.MC=n.MC, log=TRUE, ...))
}
##' @title Plotting the Negative Log-Likelihood for Archimedean Copulas
##' @param cop an outer_nacopula (currently with no children)
##' @param u n x d data matrix
##' @param xlim x-range for curve() plotting
##' @param main title for curve()
##' @param XtrArgs a list of further arguments for mLogL()
##' @param ... further arguments for curve()
##' @return invisible()
##' @author Martin Maechler
curveLogL <- function(cop, u, xlim, main, XtrArgs=list(), ...) {
unam <- deparse(substitute(u))
stopifnot(is(cop, "outer_nacopula"), is.list(XtrArgs),
(d <- ncol(u)) >= 2, d == dim(cop),
length(cop@childCops) == 0# not yet *nested* A.copulas
)
acop <- cop@copula
th. <- acop@theta # the true theta
acop <- setTheta(acop, NA) # so it's clear, the true theta is not used below
if(missing(main)) {
tau. <- cop@copula@tau(th.)
main <- substitute("Neg. Log Lik."~ -italic(l)(theta, UU) ~ TXT ~~
FUN(theta['*'] == Th) %=>% tau['*'] == Tau,
list(UU = unam,
TXT= sprintf("; n=%d, d=%d; A.cop",
nrow(u), d),
FUN = acop@name,
Th = format(th.,digits=3),
Tau = format(tau., digits=3)))
}
r <- curve(do.call(Vectorize(mLogL, "theta"), c(list(x, acop, u), XtrArgs)),
xlim=xlim, main=main,
xlab = expression(theta),
ylab = substitute(- log(L(theta, u, ~~ COP)), list(COP=acop@name)),
...)
if(is.finite(th.))
axis(1, at = th., labels=expression(theta["*"]),
lwd=2, col="dark gray", tck = -1/30)
else warning("non-finite cop@copula@theta = ", th.)
axis(1, at = initOpt(acop@name),
labels = FALSE, lwd = 2, col = 2, tck = 1/20)
invisible(r)
}
Ensure that we are told about it, if the numerical algorithms choose
methods using Rmpfr
(R package interfacing to multi
precision arithmetic MPFR):
n <- 200
d <- 100
tau <- 0.2
theta <- copJoe@iTau(tau)
cop <- onacopulaL("Joe", list(theta,1:d))
theta
## [1] 1.443824
Here, the three different methods work “the same”:
## [1] 1.432898
th4 <- 1 + (1:4)/4
mL.tr <- c(-3558.5, -3734.4, -3299.5, -2505.)
mLt1 <- sapply(th4, function(th) mLogL(th, cop@copula, U1, method="log.poly")) # default
mLt2 <- sapply(th4, function(th) mLogL(th, cop@copula, U1, method="log1p"))
mLt3 <- sapply(th4, function(th) mLogL(th, cop@copula, U1, method="poly"))
stopifnot(all.equal(mLt1, mL.tr, tolerance=5e-5),
all.equal(mLt2, mL.tr, tolerance=5e-5),
all.equal(mLt3, mL.tr, tolerance=5e-5))
system.time(r1l <- curveLogL(cop, U1, c(1, 2.5), X=list(method="log.poly")))
## user system elapsed
## 0.288 0.004 0.290
mtext("all three polyJ() methods on top of each other")
system.time({
r1J <- curveLogL(cop, U1, c(1, 2.5), X=list(method="poly"),
add=TRUE, col=adjustcolor("red", .4))
r1m <- curveLogL(cop, U1, c(1, 2.5), X=list(method="log1p"),
add=TRUE, col=adjustcolor("blue",.5))
})
## user system elapsed
## 0.554 0.000 0.554
U2 <- rnacopula(n,cop)
summary(dCopula(U2, cop)) # => density for the *correct* parameter looks okay
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000e+00 4.900e+01 6.430e+02 2.777e+175 1.932e+04 5.553e+177
## hmm: max = 5.5e177
if(doExtras)
system.time(r2 <- curveLogL(cop, U2, c(1, 2.5)))
stopifnot(all.equal(enacopula(U2, cop, "mle"), 1.43992755, tolerance=1e-5),
all.equal(mLogL(1.8, cop@copula, U2), -4070.1953,tolerance=1e-5)) # (was -Inf)
## [1] 1.449569
## user system elapsed
## 0.271 0.000 0.271
## [1] 1.451916
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000e+00 7.500e+01 9.080e+02 1.981e+259 1.434e+04 3.961e+261
## [1] -1789.59
## [1] -3882.819
## [1] -3517.366
if(doExtras) # each curve takes almost 2 sec
system.time({
curveLogL(cop, U4, c(1, 1.01))
curveLogL(cop, U4, c(1, 1.0001))
curveLogL(cop, U4, c(1, 1.000001))
})
## --> limit goes *VERY* steeply up to 0
## --> theta 1.164 is about the boundary:
stopifnot(identical(setTheta(cop, 1.164), onacopula(cop@copula, C(1.164, 1:100))),
all.equal(600.59577,
cop@copula@dacopula(U4[118,,drop=FALSE],
theta=1.164, log = TRUE), tolerance=1e-5)) # was "Inf"
## [1] 1.772108
## [1] 1.78459
## user system elapsed
## 0.37 0.00 0.37
## [1] 2.219066
## [1] 2.217593
n <- 200
d <- 50 # smaller 'd' -- so as to not need 'Rmpfr' here
tau <- 0.2
(theta <- copGumbel@iTau(tau))
## [1] 1.25
set.seed(1)
U1 <- rnacopula(n,cop)
if(doExtras) {
U2 <- rnacopula(n,cop)
U3 <- rnacopula(n,cop)
}
enacopula(U1, cop, "mle") # 1.227659 (was 1.241927)
## [1] 1.227659
## user system elapsed
## 0.411 0.000 0.410
## [1] 2.5
set.seed(17)
U4 <- rnacopula(n,cG.5)
U5 <- rnacopula(n,cG.5)
U6 <- rnacopula(n,cG.5)
if(doExtras) { ## "Rmpfr" is used {2012-06-21}: -- therefore about 18 seconds!
tol <- if(interactive()) 1e-12 else 1e-8
print(system.time(
ee. <- c(enacopula(U4, cG.5, "mle", tol=tol),
enacopula(U5, cG.5, "mle", tol=tol),
enacopula(U6, cG.5, "mle", tol=tol))))
dput(ee.)# in case the following fails
## tol=1e-12 Linux nb-mm3 3.2.0-25-generic x86_64 (2012-06-23):
## c(2.47567251789004, 2.48424484287686, 2.50410767129408)
## c(2.475672518, 2.484244763, 2.504107671),
stopifnot(all.equal(ee., c(2.475672518, 2.484244763, 2.504107671),
tolerance= max(1e-7, 16*tol)))
}
## --> Plots with "many" likelihood evaluations
th. <- seq(1, 3, by= 1/4)
if(doExtras) # "default2012" (polyG default) partly uses Rmpfr here:
system.time(r4 <- sapply(th., mLogL, acop=cG.5@copula, u=U4))## 25.6 sec
## whereas this (polyG method) is very fast {and still ok}:
system.time(r4.p <- sapply(th., mLogL, acop=cG.5@copula, u=U4, method="pois"))
## user system elapsed
## 0.084 0.000 0.083
r4. <- c(0, -18375.33, -21948.033, -24294.995, -25775.502,
-26562.609, -26772.767, -26490.809, -25781.224)
stopifnot(!doExtras ||
all.equal(r4, r4., tolerance = 8e-8),
all.equal(r4.p, r4., tolerance = 8e-8))
## --> use fast method here as well:
system.time(r5.p <- sapply(th., mLogL, acop=cG.5@copula, u=U5, method="pois"))
## user system elapsed
## 0.083 0.000 0.083
## user system elapsed
## 0.082 0.000 0.082
if(doExtras) {
if(FALSE) # for speed analysis, etc
debug(copula:::polyG)
mLogL(1.65, cG.5@copula, U4) # -23472.96
}
dd <- dCopula(U4, setTheta(cG.5, 1.64), log = TRUE,
method = if(doExtras)"default" else "pois")
summary(dd)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 41.59 53.30 81.09 116.91 137.54 707.13
## [1] 18.19154
set.seed(11) # these seeds give no problems: 101, 41, 21
U. <- rnacopula(n,cop)
cop@copula <- setTheta(cop@copula, NA) # forget the true theta
system.time(f.ML <- emle(U., cop)); f.ML # --> fine: theta = 18.033, Log-lik = 314.01
## user system elapsed
## 0.012 0.000 0.013
##
## Call:
## bbmle::mle2(minuslogl = nLL, start = start, optimizer = "optimize",
## lower = interval[1], upper = interval[2])
##
## Coefficients:
## theta
## 18.0333
##
## Log-likelihood: 314.01
if(doExtras)
system.time(f.mlMC <- emle(U., cop, n.MC = 1e4)) # with MC
stopifnot(all.equal(unname(coef(f.ML)), 18.03331, tolerance= 1e-6),
all.equal(f.ML@min, -314.0143, tolerance=1e-6),
!doExtras || ## Simulate MLE (= SMLE) is "extra" random, hmm...
all.equal(unname(coef(f.mlMC)), 17.8, tolerance= 0.01)
## 64-bit ubuntu: 17.817523
## ? 64-bit Mac: 17.741
)
cop@copula <- setTheta(cop@copula, theta)
r. <- curveLogL(cop, U., c(1, 200)) # => now looks fine
## x y
## 87 172.14 2105.690
## 88 174.13 2143.642
## 89 176.12 2181.637
## 90 178.11 2219.675
## 91 180.10 2257.754
## 92 182.09 2295.874
## 93 184.08 2334.034
## 94 186.07 2372.232
## 95 188.06 2410.468
## 96 190.05 2448.742
## 97 192.04 2487.051
## 98 194.03 2525.396
## 99 196.02 2563.776
## 100 198.01 2602.189
## 101 200.00 2640.636
stopifnot( is.finite( r.$y ),
## and is convex (everywhere):
diff(r.$y, d=2) > 0)
options(op) # revert to previous state
## R version 4.4.2 (2024-10-31)
## Platform: x86_64-pc-linux-gnu
## Running under: Ubuntu 24.04.1 LTS
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/openblas-pthread/libblas.so.3
## LAPACK: /usr/lib/x86_64-linux-gnu/openblas-pthread/libopenblasp-r0.3.26.so; LAPACK version 3.12.0
##
## attached base packages:
## [1] parallel grid stats4 tools stats graphics grDevices
## [8] utils datasets methods base
##
## other attached packages:
## [1] rugarch_1.5-3 gsl_2.1-8 rmarkdown_2.29 mev_1.17 lattice_0.22-6
## [6] bbmle_1.0.25.1 copula_1.1-5
##
## loaded via a namespace (and not attached):
## [1] gmp_0.7-5 ks_1.14.3
## [3] sass_0.4.9 KernSmooth_2.23-24
## [5] SkewHyperbolic_0.4-2 pracma_2.4.4
## [7] digest_0.6.37 evaluate_1.0.1
## [9] nleqslv_3.3.5 mvtnorm_1.3-2
## [11] fastmap_1.2.0 jsonlite_1.8.9
## [13] Matrix_1.8-0 mclust_6.1.1
## [15] truncnorm_1.0-9 stabledist_0.7-2
## [17] spd_2.0-1 numDeriv_2022.9-1
## [19] jquerylib_0.1.4 Rdpack_2.6.2
## [21] cli_3.6.3 rlang_1.1.4
## [23] rbibutils_2.3 pspline_1.0-20
## [25] cachem_1.1.0 yaml_2.3.10
## [27] polynom_1.4-1 nloptr_2.1.1
## [29] bdsmatrix_1.3-7 mathjaxr_1.6-0
## [31] Runuran_0.40 partitions_1.10-7
## [33] buildtools_1.0.0 R6_2.5.1
## [35] zoo_1.8-13 lifecycle_1.0.4
## [37] ADGofTest_0.3 MASS_7.3-61
## [39] Rsolnp_1.16 pcaPP_2.0-5
## [41] GeneralizedHyperbolic_0.8-6 bslib_0.8.0
## [43] Rcpp_1.0.13-1 xfun_0.49
## [45] sys_3.4.3 knitr_1.49
## [47] htmltools_0.5.8.1 xts_0.14.1
## [49] maketools_1.3.1 fracdiff_1.5-3
## [51] compiler_4.4.2 alabama_2023.1.0
## [53] DistributionUtils_0.6-1