1 TMB fit

Start by fitting the model with TMB. This is quick to do and allows us to get a model we can play with to figure out which hyperparmeters are important. In particular we obtain the mode and Hessian at the mode of the Laplace approximation to the hyperparameter marginal posterior.

TMB::compile("naomi_simple.cpp")
## [1] 0
dyn.load(TMB::dynlib("naomi_simple"))

# tmb <- readRDS("depends/tmb.rds")
tmb <- readRDS("tmb.rds")
fit <- tmb$fit

2 Point concentration proportional to standard deviation

hypers <- names(fit$par)

There are 24 hyperparameters, comprised ofa 8 on the logit scale, and 15 on the log scale.

We can calculate their posterior standard deviations using the delta method via TMB::sdreport as follows:

testrun <- parallel::mcparallel({TMB::sdreport(obj = fit$obj, par.fixed = fit$par, getJointPrecision = TRUE)})
sd_out <- parallel::mccollect(testrun, wait = TRUE, timeout = 0, intermediate = FALSE)
sd_out <- sd_out[[1]]

The marginal standard deviations for each of the hyperparameters are:

cbpalette <- c("#56B4E9","#009E73", "#E69F00", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#999999")

sd <- sqrt(diag(sd_out$cov.fixed))

data.frame(par = names(sd), sd = unname(sd)) %>%
  mutate(
    start = str_extract(par, "^[^_]+"),
    scale = fct_recode(start, "Log" = "log", "Logit" = "logit", "Other" = "OmegaT")
  ) %>%
  ggplot(aes(x = reorder(par, sd), y = sd, col = scale)) +
    geom_point(size = 2) +
    coord_flip() +
    scale_color_manual(values = c("#56B4E9","#009E73", "#E69F00")) +
    lims(y = c(0, 2)) +
    theme_minimal() +
    labs(x = "", y = "Marginal standard deviation", col = "Scale")
Marginal standard deviations.

Figure 2.1: Marginal standard deviations.

ggsave("marginal-sd.png", height = 3.5, w = 6.25)

Or, more generally, the covariance matrix is:

C <- sd_out$cov.fixed
(C_plot <- plot_matrix(C))
Covariance matrix.

Figure 2.2: Covariance matrix.

One way to place grid points would be proportional to the standard deviations shown in the plot above. For example, we could a product grid with the minimum number of points (2) could be obtained by choosing k = 2 points in the dimension with the highest SD and k = 1 point in every other dimension. This would look as follows:

base_grid <- sd_levels_ghe_grid(
  dim = length(hypers),
  level = c(1, 2),
  cut_off = c(0, 1.9),
  sd = sqrt(diag(sd_out$cov.fixed))
)

base_grid
## Grid for Numerical Integration  
##  # dimensions: 24     # gridpoints: 2     used memory:832 bytes
## 
## nD-Construction principle:  'product'
##  individual quadrature rule for each dimension 
##  dim = 1 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 2 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 3 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 4 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 5 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 6 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 7 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 8 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 9 -->      type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 10 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 11 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 12 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 13 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 14 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 15 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 16 -->     type: GHe   level:  2   initial domain: -Inf Inf 
##  dim = 17 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 18 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 19 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 20 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 21 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 22 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 23 -->     type: GHe   level:  1   initial domain: -Inf Inf 
##  dim = 24 -->     type: GHe   level:  1   initial domain: -Inf Inf

The total number of points for this grid is 2. Let’s try fitting this, and see how long it takes. First prepare the data:

area_merged <- read_sf(system.file("extdata/demo_areas.geojson", package = "naomi"))
pop_agesex <- read_csv(system.file("extdata/demo_population_agesex.csv", package = "naomi"))
survey_hiv_indicators <- read_csv(system.file("extdata/demo_survey_hiv_indicators.csv", package = "naomi"))
art_number <- read_csv(system.file("extdata/demo_art_number.csv", package = "naomi"))
anc_testing <- read_csv(system.file("extdata/demo_anc_testing.csv", package = "naomi"))
pjnz <- system.file("extdata/demo_mwi2019.PJNZ", package = "naomi")
spec <- naomi::extract_pjnz_naomi(pjnz)

scope <- "MWI"
level <- 4
calendar_quarter_t1 <- "CY2016Q1"
calendar_quarter_t2 <- "CY2018Q3"
calendar_quarter_t3 <- "CY2019Q4"
prev_survey_ids  <- c("DEMO2016PHIA", "DEMO2015DHS")
artcov_survey_ids  <- "DEMO2016PHIA"
vls_survey_ids <- NULL
recent_survey_ids <- "DEMO2016PHIA"
artnum_calendar_quarter_t1 <- "CY2016Q1"
artnum_calendar_quarter_t2 <- "CY2018Q3"
anc_clients_year2 <- 2018
anc_clients_year2_num_months <- 9
anc_prevalence_year1 <- 2016
anc_prevalence_year2 <- 2018
anc_art_coverage_year1 <- 2016
anc_art_coverage_year2 <- 2018

naomi_mf <- naomi_model_frame(
  area_merged,
  pop_agesex,
  spec,
  scope = scope,
  level = level,
  calendar_quarter_t1,
  calendar_quarter_t2,
  calendar_quarter_t3
)

naomi_data <- select_naomi_data(
  naomi_mf,
  survey_hiv_indicators,
  anc_testing,
  art_number,
  prev_survey_ids,
  artcov_survey_ids,
  recent_survey_ids,
  vls_survey_ids,
  artnum_calendar_quarter_t1,
  artnum_calendar_quarter_t2,
  anc_prevalence_year1,
  anc_prevalence_year2,
  anc_art_coverage_year1,
  anc_art_coverage_year2
)

tmb_inputs <- prepare_tmb_inputs(naomi_data)
tmb_inputs_simple <- local_exclude_inputs(tmb_inputs)

Now let’s fit the model with aghq, and time how long it takes:

start <- Sys.time()
quad <- fit_aghq(tmb_inputs, k = 1, basegrid = base_grid)
## Warning: '.T2Cmat' is deprecated.
## Use '.T2CR' instead.
## See help("Deprecated") and help("Matrix-deprecated").
end <- Sys.time()

end - start
## Time difference of 54.84788 secs

Verifying the number of grid points used:

stopifnot(nrow(quad$modesandhessians) == 2)

How are these grid points spaced across the 24 dimensions?1

quad$modesandhessians[, 1:24] %>%
  pivot_longer(cols = everything()) %>%
  mutate(id = rep(1:24, times = 2)) %>%
  ggplot(aes(x = reorder(name, id), y = value)) +
    geom_jitter(shape = 1, height = 0, width = 0.2) +
    coord_flip() +
    labs(x = "", y = "Value") + 
    theme_minimal()

Interesting to note that for the Cholesky adapted grid with levels = c(1, 1, ..., 2, 1, ..., 1) there is variation in all of the dimensions greater than or equal to the 16th. This is related to the Cholesky adaption being based on a lower triangular matrix. To verify this, we can look at the differences between the two grid points. For many of the dimensions the difference is too small to see on the plot above.

t(unname(diff(as.matrix(quad$modesandhessians[, 1:24]))))
##                [,1]
##  [1,]  0.000000e+00
##  [2,]  0.000000e+00
##  [3,]  0.000000e+00
##  [4,]  0.000000e+00
##  [5,]  0.000000e+00
##  [6,]  0.000000e+00
##  [7,]  0.000000e+00
##  [8,]  0.000000e+00
##  [9,]  0.000000e+00
## [10,]  0.000000e+00
## [11,]  0.000000e+00
## [12,]  0.000000e+00
## [13,]  0.000000e+00
## [14,]  0.000000e+00
## [15,]  0.000000e+00
## [16,]  3.814905e+00
## [17,]  1.606655e+00
## [18,]  1.518063e-03
## [19,] -3.494600e-03
## [20,] -7.841190e-06
## [21,]  4.996001e-05
## [22,] -2.818483e-04
## [23,] -6.447179e-03
## [24,] -1.368464e-04

Let’s also verify that adaption is happening as we expect it to be:

base_grid_copy <- rlang::duplicate(base_grid)

#' Rescale it according to the mode and Hessian with Cholesky factor
mvQuad::rescale(base_grid, m = quad$optresults$mode, C = Matrix::forceSymmetric(solve(quad$optresults$hessian)), dec.type = 2)

#' Verify that the adapted grid has the same nodes and are used by aghq::aghq
stopifnot(all(mvQuad::getNodes(base_grid) == unname(as.matrix(quad$modesandhessians[, 1:24]))))

How would a spectral grid behave with the same choice of levels? Notice that there is a difference for all of the 24 dimenions when dec.type = 1 (spectral) is used:

mvQuad::rescale(base_grid_copy, m = quad$optresults$mode, C = Matrix::forceSymmetric(solve(quad$optresults$hessian)), dec.type = 1)
t(diff(mvQuad::getNodes(base_grid_copy)))
##                [,1]
##  [1,] -4.562110e-03
##  [2,] -4.788531e-03
##  [3,] -5.313556e-03
##  [4,]  3.876109e-01
##  [5,]  6.925962e-04
##  [6,] -1.955305e-03
##  [7,]  5.923101e-03
##  [8,] -1.301535e-02
##  [9,]  0.000000e+00
## [10,]  8.809706e-03
## [11,] -9.468319e-02
## [12,]  2.367271e-02
## [13,] -3.194831e-01
## [14,] -1.485009e-02
## [15,]  3.214202e-02
## [16,] -7.588094e-02
## [17,]  1.752933e-01
## [18,] -1.072913e-02
## [19,]  2.496620e-04
## [20,]  3.893256e-05
## [21,] -3.087050e-04
## [22,]  4.770154e-03
## [23,]  4.556001e-01
## [24,] -3.400338e-03

This model fit in a very reasonable amount of time. More generally, it would be of interest to know more about the relationship between the number of points in the grid, and the length of time it takes run fit_aghq. Are there any theoretical considerations statements that can be made e.g. scaling \(\mathcal{O}(n)\) where \(n\) is the number of grid points?

2.1 Limitations

Some hyperparameters have higher standard deviation than others in part due to the different scales that they are on. For example if the hyperparameter is on the logit scale then it looks as though it will have a higher standard deviation than another those on the log scale (Figure 2.1). The average standard deviation for hyperparameters starting with "log_" is 0.49 as compared with 1.603 for those starting with "logit_".

It is not clear that this reflects our intuitions regarding which hyperparameters it is important to take greater account of. Perhaps ideally we would like to know how much variation in each hyperparameter effects variation in model outputs, and then take that into account in the importance we place on each dimension.

3 Eigendecomposition

Another option to using the standard deviations is to use principle components analysis (PCA) to create a grid on a subspace of \(\mathbb{R}^{24}\) which retains most of the variation.

m <- sd_out$par.fixed
eigenC <- eigen(C)
lambda <- eigenC$values
Lambda <- diag(lambda)
E <- eigenC$vectors

Such that C can be obtained by \(E \Lambda E^\top\), or in code E %*% diag(lambda) %*% t(E):

max(C - (E %*% Lambda %*% t(E))) < 10E-12
## [1] TRUE

The relative contributions of each principle component are given by \(\lambda_i / \sum_i \lambda_i\):

plot_total_variation(eigenC, label_x = 20)
Scree plot.

Figure 3.1: Scree plot.

ggsave("total-variation.png", h = 3, w = 6.25)

tv <- cumsum(eigenC$values) / sum(eigenC$values)

Based on Figure 3.1 with 5 dimensions included, we can explain 67.5% of the total variation. Or, with 10 dimensions included, that percentage increases to 92.7%.

What do the PC loadings (columns of the matrix E) look like?

rownames(E) <- hypers
plot_pc_loadings(eigenC)
PC loadings.

Figure 3.2: PC loadings.

ggsave("pc-loadings.png", h = 3.5, w = 6.25)

3.1 Keeping a smaller number of dimensions

Let’s start by keeping s = 8 dimensions. We can create a dense GHQ grid with k = 3 on 8 dimensions.

s <- 8
d <- dim(E)[1]
gg_s <- mvQuad::createNIGrid(dim = s, type = "GHe", level = 3) 

This grid has \(3^8 = 6561\) nodes:

(n_nodes <- nrow(mvQuad::getNodes(gg_s)))
## [1] 6561

How does the reconstruction of the covariance matrix with 8 components look?

E_s <- E[, 1:s]
Lambda_s <- Lambda[1:s, 1:s]
C_s <- E_s %*% Lambda_s %*% t(E_s)

C_s_plot <- plot_matrix(C_s)
{C_plot + theme(legend.position = "none") + labs(subtitle = "Full rank")} + {C_s_plot + theme(legend.position = "none") + labs(subtitle = "Reduced rank")}
Hessian matrix recons.

Figure 3.3: Hessian matrix recons.

ggsave("reduced-rank.png", h = 3.5, w = 6.25)

3.2 Scoping the application to Naomi

Let’s see how large the PCA approach would make the grids for Naomi:

m <- sd_out$par.fixed
C <- sd_out$cov.fixed

gg3 <- pca_rescale(m = m, C = C, s = 3, k = 3)
nrow(mvQuad::getNodes(gg3))
## [1] 27
gg5 <- pca_rescale(m = m, C = C, s = 5, k = 3)
nrow(mvQuad::getNodes(gg5))
## [1] 243
gg7 <- pca_rescale(m = m, C = C, s = 7, k = 3)
nrow(mvQuad::getNodes(gg7))
## [1] 2187
gg9 <- pca_rescale(m = m, C = C, s = 9, k = 3)
nrow(mvQuad::getNodes(gg9))
## [1] 19683

4 Dealing with the scales issue

Eventually we want to put parameters on the log and logit scales in parity.

4.1 Using the correlation matrix

C <- sd_out$cov.fixed
d <- 1 / (sqrt(diag(C)))
R <- diag(d) %*% C %*% diag(d)
plot_matrix(R)

eigenR <- eigen(R)
plot_total_variation(eigenR, label_x = 5)

rownames(eigenR$vectors) <- hypers
plot_pc_loadings(eigenR)

4.2 Scale standardised system

C <- sd_out$cov.fixed
var <- diag(C)

df_std <- data.frame(var = var) %>%
  tibble::rownames_to_column("par") %>%
  mutate(
    scale = fct_recode(str_extract(par, "^[^_]+"), "Log" = "log", "Logit" = "logit", "Other" = "OmegaT")
  ) %>%
  group_by(scale) %>%
  mutate(mean_var = mean(var)) %>%
  ungroup() %>%
  mutate(var_std = var / mean_var)

df_std %>%  
  pivot_longer(
    cols = c("var", "var_std"),
    names_to = "method",
    values_to = "value"
  ) %>%
  mutate(
    method = fct_recode(method, "Default" = "var", "Scale standardised" = "var_std"),
    start = str_extract(par, "^[^_]+"),
    scale = fct_recode(start, "Log" = "log", "Logit" = "logit", "Other" = "OmegaT") 
  ) %>%
  ggplot(aes(x = reorder(par, value), y = value, col = scale)) +
    geom_point() +
    facet_grid(~ method) +
    coord_flip() +
    scale_color_manual(values = c("#56B4E9","#009E73", "#E69F00")) +
    labs(x = "Hyperparameter", y = "Marginal variance", col = "Scale") +
    theme_minimal()

Rs <- diag(1 / sqrt(df_std$mean_var)) %*% C %*% diag(1 / sqrt(df_std$mean_var))
plot_matrix(Rs)

eigenRs <- eigen(Rs)
plot_total_variation(eigenRs, label_x = 5)

rownames(eigenRs$vectors) <- hypers
plot_pc_loadings(eigenRs)

4.3 Creating quadrature grids with different coordinate systems

Want to use a particular coordinate system, but still the covariance matrix scaling as in adaptive quadrature.

m <- c(1, 1.5)
C <- matrix(c(2, 1, 1, 1), ncol = 2)

obj <- function(theta) {
  mvtnorm::dmvnorm(theta, mean = m, sigma = C, log = TRUE)
}

ff <- list(
  fn = obj,
  gr = function(theta) numDeriv::grad(obj, theta),
  he = function(theta) numDeriv::hessian(obj, theta)
)

grid <- expand.grid(
  theta1 = seq(-6, 6, length.out = 600),
  theta2 = seq(-6, 6, length.out = 600)
)

ground_truth <- cbind(grid, pdf = exp(obj(grid)))

plot <- ggplot(ground_truth, aes(x = theta1, y = theta2, z = pdf)) +
  geom_contour(col = multi.utils::cbpalette()[1]) +
  coord_fixed(xlim = c(-6, 6), ylim = c(-6, 6), ratio = 1) +
  labs(x = "", y = "") +
  theme_minimal()

grid_5 <- mvQuad::createNIGrid(dim = 2, type = "GHe", level = 5)
mvQuad::rescale(grid_5, m = m, C = C, dec.type = 1)

pca_grid_5 <- pca_rescale(m = m, C = C, s = 1, k = 5)

plot_points <- function(gg) {
  plot +
    geom_point(
      data = mvQuad::getNodes(gg) %>%
        as.data.frame() %>%
        mutate(weights = mvQuad::getWeights(gg)),
      aes(x = V1, y = V2, size = weights),
      alpha = 0.8,
      col = multi.utils::cbpalette()[2],
      inherit.aes = FALSE
    ) +
    scale_size_continuous(range = c(1, 2)) +
    labs(x = "", y = "", size = "Weight") +
    theme_minimal()
}

Create a grid similar to A above but using the second principal component rather than the first:

eigenC <- eigen(C)
E <- eigenC$vectors
lambda <- eigenC$values

gg_s <- mvQuad::createNIGrid(dim = 1, type = "GHe", level = 5)
nodes_out <- t(E[, 2] %*% diag(sqrt(lambda[2]), ncol = 1) %*% t(mvQuad::getNodes(gg_s)))
for(j in 1:2) nodes_out[, j] <- nodes_out[, j] + m[j] # Adaption
weights_out <- mvQuad::getWeights(gg_s) * as.numeric(mvQuad::getWeights(mvQuad::createNIGrid(dim = 1, type = "GHe", level = 1)))
weights_out <- det(chol(C)) * weights_out # Adaption

pc2_grid_5 <- mvQuad::createNIGrid(dim = 2, type = "GHe", level = 1)
pc2_grid_5$level <- rep(NA, times = 2)
pc2_grid_5$ndConstruction <- "custom"
pc2_grid_5$nodes <- nodes_out
pc2_grid_5$weights <- weights_out

We know that \(C = E \Lambda E^\top = \sum \lambda_i \mathbf{e}_i \mathbf{e}_i^\top\)

lambda[1] * E[, 1] %*% t(E[, 1]) + lambda[2] * E[, 2] %*% t(E[, 2])
##      [,1] [,2]
## [1,]    2    1
## [2,]    1    1

We also know that any vector can be written in terms of the eigendecomposition:

a <- c(1, 1) %*% E
a[1] * E[, 1] + a[2] * E[, 2]
## [1] 1 1

We also have the definition of an eigenvalue and vector:

sum(t(lambda[1] %*% E[, 1]) - C %*% E[, 1]) < 10e-12
## [1] TRUE

The definition of the unit eigenvectors is that they maximise the amount of variation explained, which is given by this function variance_explained. By definition E[, 1] maximises this!

variance_explained <- function(v) {
  v %*% C %*% v / v %*% v
}

variance_explained(c(1, 1))
##      [,1]
## [1,]  2.5
variance_explained(E[, 1])
##          [,1]
## [1,] 2.618034
v <- c(1, 1)
v_norm <- v / sqrt(c(v %*% v))

gg_s <- mvQuad::createNIGrid(dim = 1, type = "GHe", level = 5)
nodes_out <- t(v_norm %*% sqrt(variance_explained(v_norm)) %*% t(mvQuad::getNodes(gg_s)))
for(j in 1:2) nodes_out[, j] <- nodes_out[, j] + m[j] # Adaption
weights_out <- mvQuad::getWeights(gg_s) * as.numeric(mvQuad::getWeights(mvQuad::createNIGrid(dim = 1, type = "GHe", level = 1)))
weights_out <- det(chol(C)) * weights_out # Adaption

v_norm_grid_5 <- mvQuad::createNIGrid(dim = 2, type = "GHe", level = 1)
v_norm_grid_5$level <- rep(NA, times = 2)
v_norm_grid_5$ndConstruction <- "custom"
v_norm_grid_5$nodes <- nodes_out
v_norm_grid_5$weights <- weights_out

{plot_points(grid_5) + plot_points(pca_grid_5)} / {plot_points(pc2_grid_5) + plot_points(v_norm_grid_5)} + plot_annotation(tag_levels = "A")

ggsave("pca-demo.png", h = 6, w = 6.25)

4.4 Application of custom coordinate system to scale standardised system

Now we apply what we’ve learnt about making grids with custom coordinate systems to make a grid with coordinate system defined by the eigendecomposition of the scale standardised variance matrix!

#' TODO

5 Testing prerotation

See Jäckel (2005).

R45 <- matrix(
  c(cos(pi / 2), sin(pi / 2), -sin(pi / 2), cos(pi / 2)), ncol = 2, nrow = 2
)
#' TODO!

6 Testing pruning

Remove any nodes which have small enough weight (Jäckel 2005).

#' TODO!

Original computing environment

sessionInfo()
## R version 4.2.0 (2022-04-22)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS 13.3.1
## 
## Matrix products: default
## LAPACK: /Library/Frameworks/R.framework/Versions/4.2/Resources/lib/libRlapack.dylib
## 
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] patchwork_1.1.2      naomi_2.8.5          sf_1.0-9             aghq_0.4.1           tmbstan_1.0.4       
##  [6] rstan_2.21.5         StanHeaders_2.21.0-7 TMB_1.9.2            Matrix_1.5-4.1       stringr_1.5.0       
## [11] purrr_1.0.1          tidyr_1.3.0          readr_2.1.3          ggplot2_3.4.0        forcats_0.5.2       
## [16] dplyr_1.0.10         multi.utils_0.1.0   
## 
## loaded via a namespace (and not attached):
##   [1] colorspace_2.0-3    deldir_1.0-6        ellipsis_0.3.2      class_7.3-20        rprojroot_2.0.3    
##   [6] eppasm_0.7.1        rstudioapi_0.14     proxy_0.4-27        farver_2.1.1        bit64_4.0.5        
##  [11] mvtnorm_1.1-3       fansi_1.0.4         codetools_0.2-18    cachem_1.0.6        knitr_1.41         
##  [16] jsonlite_1.8.4      data.tree_1.0.0     compiler_4.2.0      assertthat_0.2.1    fastmap_1.1.0      
##  [21] cli_3.6.1           s2_1.1.1            htmltools_0.5.3     prettyunits_1.1.1   tools_4.2.0        
##  [26] gtable_0.3.1        glue_1.6.2          reshape2_1.4.4      wk_0.7.0            V8_4.2.2           
##  [31] fastmatch_1.1-3     Rcpp_1.0.10         jquerylib_0.1.4     vctrs_0.6.1         spdep_1.2-7        
##  [36] mvQuad_1.0-6        xfun_0.37           ps_1.7.3            brio_1.1.3          lifecycle_1.0.3    
##  [41] statmod_1.4.36      orderly_1.4.3       ids_1.0.1           zoo_1.8-11          scales_1.2.1       
##  [46] vroom_1.6.0         ragg_1.2.2          hms_1.1.2           parallel_4.2.0      inline_0.3.19      
##  [51] yaml_2.3.7          curl_5.0.0          gridExtra_2.3       loo_2.5.1           sass_0.4.4         
##  [56] stringi_1.7.8       highr_0.9           e1071_1.7-12        boot_1.3-28         pkgbuild_1.3.1     
##  [61] zip_2.2.2           spData_2.2.1        rlang_1.1.0         pkgconfig_2.0.3     systemfonts_1.0.4  
##  [66] matrixStats_0.62.0  evaluate_0.20       lattice_0.20-45     labeling_0.4.2      bit_4.0.5          
##  [71] processx_3.8.0      tidyselect_1.2.0    traduire_0.0.6      plyr_1.8.8          magrittr_2.0.3     
##  [76] bookdown_0.26       R6_2.5.1            generics_0.1.3      DBI_1.1.3           pillar_1.9.0       
##  [81] withr_2.5.0         units_0.8-0         sp_1.5-1            tibble_3.2.1        crayon_1.5.2       
##  [86] first90_1.5.3       uuid_1.1-0          KernSmooth_2.23-20  utf8_1.2.3          tzdb_0.3.0         
##  [91] rmarkdown_2.18      isoband_0.2.6       grid_4.2.0          data.table_1.14.6   callr_3.7.3        
##  [96] digest_0.6.31       classInt_0.4-8      numDeriv_2016.8-1.1 textshaping_0.3.6   openssl_2.0.5      
## [101] RcppParallel_5.1.5  stats4_4.2.0        munsell_0.5.0       viridisLite_0.4.1   bslib_0.4.1        
## [106] askpass_1.1

Bibliography

Jäckel, Peter. 2005. “A Note on Multivariate Gauss-Hermite Quadrature.” London: ABN-Amro. Re.

  1. Note that this section was added in later after in order to build intuitions, and doesn’t flow very naturally!↩︎