Go back to the Contents page.
Press Show to reveal the code chunks.
# Set seed for reproducibility
set.seed(1982)
# Set global options for all code chunks
knitr::opts_chunk$set(
# Disable messages printed by R code chunks
message = FALSE,
# Disable warnings printed by R code chunks
warning = FALSE,
# Show R code within code chunks in output
echo = TRUE,
# Include both R code and its results in output
include = TRUE,
# Evaluate R code chunks
eval = TRUE,
# Enable caching of R code chunks for faster rendering
cache = FALSE,
# Align figures in the center of the output
fig.align = "center",
# Enable retina display for high-resolution figures
retina = 2,
# Show errors in the output instead of stopping rendering
error = TRUE,
# Do not collapse code and output into a single block
collapse = FALSE
)
# Start the figure counter
fig_count <- 0
# Define the captioner function
captioner <- function(caption) {
fig_count <<- fig_count + 1
paste0("Figure ", fig_count, ": ", caption)
}
# remotes::install_github("davidbolin/rspde", ref = "devel")
# remotes::install_github("davidbolin/metricgraph", ref = "devel")
library(rSPDE)
library(MetricGraph)
library(grateful)
library(ggplot2)
library(reshape2)
library(plotly)
We analyze and numerically approximate solutions to fractional diffusion equations on metric graphs of the form \[\begin{equation} \label{eq:maineq} \tag{1} \left\{ \begin{aligned} \partial_t u(s,t) + (\kappa^2 - \Delta_\Gamma)^{\alpha/2} u(s,t) &= f(s,t), && \quad (s,t) \in \Gamma \times (0, T), \\ u(s,0) &= u_0(s), && \quad s \in \Gamma, \end{aligned} \right. \end{equation}\] with \(u(\cdot,t)\) satisfying the Kirchhoff vertex conditions \[\begin{equation} \label{eq:Kcond} \tag{2} \mathcal{K} = \left\{\phi\in C(\Gamma)\;\middle|\; \forall v\in \mathcal{V}:\; \sum_{e\in\mathcal{E}_v}\partial_e \phi(v)=0 \right\}. \end{equation}\] Here \(\Gamma = (\mathcal{V},\mathcal{E})\) is a metric graph, \(\kappa>0\), \(\alpha\in(0,2]\) determines the smoothness of \(u(\cdot,t)\), \(\Delta_{\Gamma}\) is the so-called Kirchhoff–Laplacian, and \(f:\Gamma\times (0,T)\to\mathbb{R}\) and \(u_0: \Gamma \to \mathbb{R}\) are fixed functions, called right-hand side and initial condition, respectively.
Let \(\alpha\in(0,2]\) and \(U_h^\tau\) denote the sequence of approximations of the solution to the weak form of problem \(\eqref{eq:maineq}\) at each time step on a mesh indexed by \(h\). Let \(U^0_h = P_hu_0\). For \(k=0,\dots, N-1\), \(U_h^{k+1}\in V_h\) solves the following scheme \[\begin{align} \label{system:fully_discrete_scheme} \tag{3} \langle\delta U_h^{k+1},\phi\rangle + \mathfrak{a}(U_h^{k+1},\phi) = \langle f^{k+1},\phi\rangle ,\quad\forall\phi\in V_h, \end{align}\] where \(f^{k+1} = \displaystyle\dfrac{1}{\tau}\int_{t_k}^{t^{k+1}}f(t)dt\). At each time step \(t_k\), the finite element solution \(U_h^k\in V_h\) to \(\eqref{system:fully_discrete_scheme}\) can be expressed as a linear combination of the basis functions \(\{\psi^i_h\}_{i=1}^{N_h}\) introduced in the Preliminaries page, namely, \[\begin{align} \label{num_sol} \tag{4} U_h^k(s) = \sum_{i=1}^{N_h}u_i^k\psi^i_h(s), \;s\in\Gamma. \end{align}\] Replacing \(\eqref{num_sol}\) into \(\eqref{system:fully_discrete_scheme}\) yields the following linear system \[\begin{align*} \sum_{j=1}^{N_h}u_j^{k+1}[(\psi_j,\psi_i)_{L_2(\Gamma)}+ \tau\mathfrak{a}(\psi_j,\psi_i)] = \sum_{j=1}^{N_h}u_j^{k}(\psi_j,\psi_i)_{L_2(\Gamma)}+\tau( f^{k+1},\psi_i)_{L_2(\Gamma)}, \end{align*}\] for \(i = 1,\dots, N_h\). In matrix notation, \[\begin{align} \label{diff_eq_discrete} (\boldsymbol{C}+\tau \boldsymbol{L}^{\alpha/2})\boldsymbol{U}^{k+1} = \boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}, \end{align}\] or by introducing the scaling parameter \(\kappa^2>0\), \[\begin{align} (\boldsymbol{C}+\tau (\kappa^2)^{\alpha/2}(\boldsymbol{L}/\kappa^2)^{\alpha/2})\boldsymbol{U}^{k+1} = \boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}, \end{align}\] where \(\boldsymbol{C}\) has entries \(\boldsymbol{C}_{ij} = (\psi_j,\psi_i)_{L_2(\Gamma)}\), \(\boldsymbol{L}^{\alpha/2}\) has entries \(\mathfrak{a}(\psi_j,\psi_i)\), \(\boldsymbol{U}^k\) has entries \(u_j^k\), and \(\boldsymbol{F}^k\) has entries \(( f^{k},\psi_i)_{L_2(\Gamma)}\). To reduce computational cost and promote sparsity, we replace the mass matrix \(\boldsymbol{C}\) with a lumped mass matrix \(\tilde{\boldsymbol{C}}\), which is diagonal with entries \(\tilde{\boldsymbol{C}}_{ii}=\sum_{j=1}^{N_h}\boldsymbol{C}_{ij}\). For convenience, we write \(\boldsymbol{C}\) instead of \(\tilde{\boldsymbol{C}}\) in the following. Applying \((\boldsymbol{L}/\kappa^2)^{-\alpha/2}\) to both sides yields \[\begin{equation} ((\boldsymbol{L}/\kappa^2)^{-\alpha/2}\boldsymbol{C}+\tau (\kappa^2)^{\alpha/2}\boldsymbol{I})\boldsymbol{U}^{k+1} = (\boldsymbol{L}/\kappa^2)^{-\alpha/2}(\boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}). \end{equation}\] Following Bolin and Kirchner (2020), we approximate \((\boldsymbol{L}/\kappa^2)^{-\alpha/2}\) by \(\boldsymbol{P}_\ell^{-\top}\boldsymbol{P}_r^\top\) to arrive at \[\begin{equation} \label{eq:scheme2} \tag{5} (\boldsymbol{P}_\ell^{-\top}\boldsymbol{P}_r^\top \boldsymbol{C}+\tau(\kappa^2)^{\alpha/2} \boldsymbol{I})\boldsymbol{U}^{k+1} = \boldsymbol{P}_\ell^{-\top}\boldsymbol{P}_r^\top(\boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}). \end{equation}\] where \[\begin{equation} \label{eq:PLPR} \tag{6} \boldsymbol{P}_r = \prod_{i=1}^m \left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{C}^{-1}\boldsymbol{L}}{\kappa^2}\right)\quad\text{and}\quad \boldsymbol{P}_\ell = \dfrac{1}{\texttt{factor}}\boldsymbol{C}\prod_{j=1}^{m+1} \left(\boldsymbol{I}-r_{2j}\dfrac{\boldsymbol{C}^{-1}\boldsymbol{L}}{\kappa^2}\right), \end{equation}\] and \(\texttt{factor} = \dfrac{c_m}{b_{m+1}}\), and \(\{r_{1i}\}_{i=1}^m\) and \(\{r_{2j}\}_{j=1}^{m+1}\) are the roots of \(q_1(x) =\sum_{i=0}^mc_ix^{i}\) and \(q_2(x)=\sum_{j=0}^{m+1}b_jx^{j}\), respectively. The coefficients \(\{c_i\}_{i=0}^m\) and \(\{b_j\}_{j=0}^{m+1}\) are determined as the best rational approximation \(\hat{r}_m =q_1/q_2\) of the function \(\hat{f}(x) := x^{\alpha/2-1}\) over the interval \(J_h: = [\kappa^{2}\lambda_{N_h,h}^{-1}, \kappa^{2}\lambda_{1,h}^{-1}]\), where \(\lambda_{1,h}, \lambda_{N_h,h}>0\) are the smallest and the largest eigenvalue of \(L_h\), respectively.
For the sake of clarity, we note that the numerical implementation of
Bolin and Kirchner (2020) actually defines
\(\boldsymbol{P}_r\) and \(\boldsymbol{P}_\ell\) as \[\begin{equation}
\label{eq:PLPRbolin}
\tag{7}
\boldsymbol{P}_r = \prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{C}^{-1}\boldsymbol{L}}{\kappa^2}\right)\quad\text{and}\quad
\boldsymbol{P}_\ell =
\dfrac{\kappa^{2\beta}}{\texttt{factor}}\boldsymbol{C}\prod_{j=1}^{m+1}
\left(\boldsymbol{I}-r_{2j}\dfrac{\boldsymbol{C}^{-1}\boldsymbol{L}}{\kappa^2}\right),
\end{equation}\] where \(\beta =
\alpha/2\) and the scaling factor \((\kappa^2)^{\alpha/2}\) or \(\kappa^{2\beta}\) is already incorporated
in \(\boldsymbol{P}_\ell\), a
convention we adopt in the following. With this under consideration, we
can rewrite \(\eqref{eq:scheme2}\) as
\[\begin{equation}
\tag{8}
(\boldsymbol{P}_r^\top \boldsymbol{C}+\tau
\boldsymbol{P}_\ell^\top)\boldsymbol{U}^{k+1} =
\boldsymbol{P}_r^\top(\boldsymbol{C}\boldsymbol{U}^k+\tau
\boldsymbol{F}^{k+1}),
\label{eq:scheme}
\end{equation}\] where
\[\begin{equation}
\boldsymbol{P}_r^\top = \prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\quad\text{and}\quad
\boldsymbol{P}_\ell^\top =
\dfrac{\kappa^{2\beta}}{\texttt{factor}}\prod_{j=1}^{m+1}
\left(\boldsymbol{I}-r_{2j}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\cdot
\boldsymbol{C}
\end{equation}\] since \(\boldsymbol{L}\) and \(\boldsymbol{C}^{-1}\) are symmetric and the
factors in the product commute. Replacing these two into \(\eqref{eq:scheme}\) yields \[\begin{equation}
\left(\prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)+\dfrac{\tau
\kappa^{2\beta}}{\texttt{factor}}\prod_{j=1}^{m+1}
\left(\boldsymbol{I}-r_{2j}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\right)\boldsymbol{C}\boldsymbol{U}^{k+1}
= \prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\cdot
(\boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}),
\end{equation}\] that is, \[\begin{equation}
\label{eq:final_scheme}
\tag{9}
\boldsymbol{U}^{k+1} = \boldsymbol{C}^{-1}\left(\prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)+\dfrac{\tau
\kappa^{2\beta}}{\texttt{factor}}\prod_{j=1}^{m+1}
\left(\boldsymbol{I}-r_{2j}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\right)^{-1}
\prod_{i=1}^m
\left(\boldsymbol{I}-r_{1i}\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}\right)\cdot
(\boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}).
\end{equation}\] Considering the partial fraction decomposition
\[\begin{equation}
\label{eq:partial_fraction}
\tag{10}
\dfrac{\prod_{i=1}^m (1-r_{1i}x)}{\prod_{i=1}^m (1-r_{1i}x)+\dfrac{\tau
\kappa^{2\beta}}{\texttt{factor}} \prod_{j=1}^{m+1}
(1-r_{2j}x)}=\sum_{k=1}^{m+1} a_k(x-p_k)^{-1} + r,
\end{equation}\] scheme \(\eqref{eq:final_scheme}\) can be expressed
as \[\begin{equation}
\label{eq:final_scheme2}
\tag{11}
\boldsymbol{U}^{k+1} = \boldsymbol{C}^{-1}\left(\sum_{k=1}^{m+1}
a_k\left(
\dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}-p_k\boldsymbol{I}\right)^{-1}
+ r\boldsymbol{I}\right) (\boldsymbol{C}\boldsymbol{U}^k+\tau
\boldsymbol{F}^{k+1})
\end{equation}\]
In practice, since the rational function in \(\eqref{eq:partial_fraction}\) is proper, there is no remainder \(r\). Moreover, since \(\left( \dfrac{\boldsymbol{L}\boldsymbol{C}^{-1}}{\kappa^2}-p_k\boldsymbol{I}\right)^{-1} = \boldsymbol{C}\left( \dfrac{\boldsymbol{L}}{\kappa^2}-p_k\boldsymbol{C}\right)^{-1}\), we have that \(\eqref{eq:final_scheme2}\) can be rewritten as
\[\begin{equation} \label{eq:final_scheme3} \tag{12} \boldsymbol{U}^{k+1} = \left(\sum_{k=1}^{m+1} a_k\left( \dfrac{\boldsymbol{L}}{\kappa^2}-p_k\boldsymbol{C}\right)^{-1}\right) (\boldsymbol{C}\boldsymbol{U}^k+\tau \boldsymbol{F}^{k+1}). \end{equation}\]
my.get.roots()
For each rational order \(m\)
(1,2,3,4) and smoothness parameter \(\beta\) (= \(\alpha/2\) with \(\alpha\) between 0.5 and 2), function
my.get.roots()
(adapted from the rSPDE
package) returns \(\texttt{factor} =
\dfrac{c_m}{b_{m+1}}\), and the roots \(\{r_{1i}\}_{i=1}^m\) and \(\{r_{2j}\}_{j=1}^{m+1}\).
# Function to compute the roots and factor for the rational approximation
my.get.roots <- function(m, # rational order, m = 1, 2, 3, or 4
beta # smoothness parameter, beta = alpha/2 with alpha between 0.5 and 2
) {
m1table <- rSPDE:::m1table
m2table <- rSPDE:::m2table
m3table <- rSPDE:::m3table
m4table <- rSPDE:::m4table
mt <- get(paste0("m", m, "table"))
rb <- rep(0, m + 1)
rc <- rep(0, m)
if(m == 1) {
rc = approx(mt$beta, mt[[paste0("rc")]], beta)$y
} else {
rc = sapply(1:m, function(i) {
approx(mt$beta, mt[[paste0("rc.", i)]], beta)$y
})
}
rb = sapply(1:(m+1), function(i) {
approx(mt$beta, mt[[paste0("rb.", i)]], xout = beta)$y
})
factor = approx(mt$beta, mt$factor, xout = beta)$y
return(list(pl_roots = rb, # roots \{r_{2j}\}_{j=1}^{m+1}
pr_roots = rc, # roots \{r_{1i}\}_{i=1}^m
factor = factor # this is c_m/b_{m+1}
))
}
poly.from.roots()
Function poly.from.roots()
computes the coefficients of
a polynomial from its roots.
compute.partial.fraction.param()
Given factor
\(=\texttt{factor}
= \dfrac{c_m}{b_{m+1}}\), pr_roots
\(=\{r_{1i}\}_{i=1}^m\),
pl_roots
\(=\{r_{2j}\}_{j=1}^{m+1}\),
time_step
\(=\tau\), and
scaling
\(=\kappa^{2\beta}\), function
compute.partial.fraction.param()
computes the parameters
for the partial fraction decomposition \(\eqref{eq:partial_fraction}\).
# Function to compute the parameters for the partial fraction decomposition
compute.partial.fraction.param <- function(factor, # c_m/b_{m+1}
pr_roots, # roots \{r_{1i}\}_{i=1}^m
pl_roots, # roots \{r_{2j}\}_{j=1}^{m+1}
time_step, # \tau
scaling # \kappa^{2\beta}
) {
pr_coef <- c(0, poly.from.roots(pr_roots))
pl_coef <- poly.from.roots(pl_roots)
factor_pr_coef <- pr_coef
pr_plus_pl_coef <- factor_pr_coef + ((scaling * time_step)/factor) * pl_coef
res <- gsignal::residue(factor_pr_coef, pr_plus_pl_coef)
return(list(r = res$r, # residues \{a_k\}_{k=1}^{m+1}
p = res$p, # poles \{p_k\}_{k=1}^{m+1}
k = res$k # remainder r
))
}
my.fractional.operators.frac()
Given the Laplacian matrix L
, the smoothness parameter
beta
, the mass matrix C
(not lumped), the
scaling factor scale.factor
\(=\kappa^2\), the rational order
m
, and the time step time_step
\(=\tau\), function
my.fractional.operators.frac()
computes the fractional
operator and returns a list containing the necessary matrices and
parameters for the fractional diffusion equation.
# Function to compute the fractional operator
my.fractional.operators.frac <- function(L, # Laplacian matrix
beta, # smoothness parameter beta
C, # mass matrix (not lumped)
scale.factor, # scaling parameter = kappa^2
m = 1, # rational order, m = 1, 2, 3, or 4
time_step # time step = tau
) {
I <- Matrix::Diagonal(dim(C)[1])
L <- L / scale.factor
if(beta == 1){
L <- L * scale.factor^beta
return(list(C = C, # mass matrix
L = L, # Laplacian matrix scaled
m = m, # rational order
beta = beta, # smoothness parameter
LHS = C + time_step * L # left-hand side of the linear system
))
} else {
scaling <- scale.factor^beta
roots <- my.get.roots(m, beta)
poles_rs_k <- compute.partial.fraction.param(roots$factor, roots$pr_roots, roots$pl_roots, time_step, scaling)
partial_fraction_terms <- list()
for (i in 1:(m+1)) {
# Here is where the terms in the sum in eq 12 are computed
partial_fraction_terms[[i]] <- (L - poles_rs_k$p[i] * C)/poles_rs_k$r[i]
}
return(list(C = C, # mass matrix
L = L, # Laplacian matrix scaled
m = m, # rational order
beta = beta, # smoothness parameter
partial_fraction_terms = partial_fraction_terms # partial fraction terms
))
}
}
my.solver.frac()
Given the object returned by
my.fractional.operators.frac()
and a vector v
,
function my.solver.frac()
solves the linear system \(\eqref{eq:final_scheme2}\) for the vector
v
. If beta = 1
, it solves the linear system
directly; otherwise, it uses the partial fraction decomposition.
# Function to solve the iteration
my.solver.frac <- function(obj, # object returned by my.fractional.operators.frac()
v # vector to be solved for
){
beta <- obj$beta
m <- obj$m
if (beta == 1){
return(solve(obj$LHS, v) # solve the linear system directly for beta = 1
)
} else {
partial_fraction_terms <- obj$partial_fraction_terms
output <- v*0
for (i in 1:(m+1)) {output <- output + solve(partial_fraction_terms[[i]], v)}
return(output # solve the linear system using the partial fraction decomposition
)
}
}
solve_fractional_evolution()
Given the fractional operator object my_op_frac
, a time
step time_step
, a sequence of time points
time_seq
, an initial value val_at_0
, and the
right-hand side matrix RHST
, function
solve_fractional_evolution()
computes the solution to the
fractional diffusion equation at each time step using scheme \(\eqref{eq:final_scheme2}\).
solve_fractional_evolution <- function(my_op_frac, time_step, time_seq, val_at_0, RHST) {
CC <- my_op_frac$C
SOL <- matrix(NA, nrow = nrow(CC), ncol = length(time_seq))
SOL[, 1] <- val_at_0
for (k in 1:(length(time_seq) - 1)) {
rhs <- CC %*% SOL[, k] + time_step * RHST[, k + 1]
SOL[, k + 1] <- as.matrix(my.solver.frac(my_op_frac, rhs))
}
return(SOL)
}
gets.graph.tadpole()
Given a mesh size h
, function
gets.graph.tadpole()
builds a tadpole graph and creates a
mesh.
# Function to build a tadpole graph and create a mesh
gets.graph.tadpole <- function(h){
edge1 <- rbind(c(0,0),c(1,0))
theta <- seq(from=-pi,to=pi,length.out = 10000)
edge2 <- cbind(1+1/pi+cos(theta)/pi,sin(theta)/pi)
edges <- list(edge1, edge2)
graph <- metric_graph$new(edges = edges, verbose = 0)
graph$set_manual_edge_lengths(edge_lengths = c(1,2))
graph$build_mesh(h = h)
return(graph)
}
tadpole.eig()
Given a mode number k
and a tadpole graph
graph
, function tadpole.eig()
computes the
eigenpairs of the tadpole graph.
# Function to compute the eigenfunctions of the tadpole graph
tadpole.eig <- function(k,graph){
x1 <- c(0,graph$get_edge_lengths()[1]*graph$mesh$PtE[graph$mesh$PtE[,1]==1,2])
x2 <- c(0,graph$get_edge_lengths()[2]*graph$mesh$PtE[graph$mesh$PtE[,1]==2,2])
if(k==0){
f.e1 <- rep(1,length(x1))
f.e2 <- rep(1,length(x2))
f1 = c(f.e1[1],f.e2[1],f.e1[-1], f.e2[-1])
f = list(phi=f1/sqrt(3))
} else {
f.e1 <- -2*sin(pi*k*1/2)*cos(pi*k*x1/2)
f.e2 <- sin(pi*k*x2/2)
f1 = c(f.e1[1],f.e2[1],f.e1[-1], f.e2[-1])
if((k %% 2)==1){
f = list(phi=f1/sqrt(3))
} else {
f.e1 <- (-1)^{k/2}*cos(pi*k*x1/2)
f.e2 <- cos(pi*k*x2/2)
f2 = c(f.e1[1],f.e2[1],f.e1[-1],f.e2[-1])
f <- list(phi=f1,psi=f2/sqrt(3/2))
}
}
return(f)
}
gets.eigen.params()
Given a finite number of modes N_finite
, a scaling
parameter kappa
, a smoothness parameter alpha
,
and a tadpole graph graph
, function
gets.eigen.params()
computes EIGENVAL_ALPHA
(a
vector with entries \(\lambda_j^{\alpha/2}\)),
EIGENVAL_MINUS_ALPHA
(a vector with entries \(\lambda_j^{-\alpha/2}\)), and
EIGENFUN
(a matrix with columns \(e_j\) on the mesh of
graph
).
# Function to compute the eigenpairs of the tadpole graph
gets.eigen.params <- function(N_finite = 4, kappa = 1, alpha = 0.5, graph){
EIGENVAL <- NULL
EIGENVAL_ALPHA <- NULL
EIGENVAL_MINUS_ALPHA <- NULL
EIGENFUN <- NULL
INDEX <- NULL
for (j in 0:N_finite) {
lambda_j <- kappa^2 + (j*pi/2)^2
lambda_j_alpha_half <- lambda_j^(alpha/2)
lambda_j_minus_alpha_half <- lambda_j^(-alpha/2)
e_j <- tadpole.eig(j,graph)$phi
EIGENVAL <- c(EIGENVAL, lambda_j)
EIGENVAL_ALPHA <- c(EIGENVAL_ALPHA, lambda_j_alpha_half)
EIGENVAL_MINUS_ALPHA <- c(EIGENVAL_MINUS_ALPHA, lambda_j_minus_alpha_half)
EIGENFUN <- cbind(EIGENFUN, e_j)
INDEX <- c(INDEX, j)
if (j>0 && (j %% 2 == 0)) {
lambda_j <- kappa^2 + (j*pi/2)^2
lambda_j_alpha_half <- lambda_j^(alpha/2)
lambda_j_minus_alpha_half <- lambda_j^(-alpha/2)
e_j <- tadpole.eig(j,graph)$psi
EIGENVAL <- c(EIGENVAL, lambda_j)
EIGENVAL_ALPHA <- c(EIGENVAL_ALPHA, lambda_j_alpha_half)
EIGENVAL_MINUS_ALPHA <- c(EIGENVAL_MINUS_ALPHA, lambda_j_minus_alpha_half)
EIGENFUN <- cbind(EIGENFUN, e_j)
INDEX <- c(INDEX, j+0.1)
}
}
return(list(EIGENVAL = EIGENVAL,
EIGENVAL_ALPHA = EIGENVAL_ALPHA,
EIGENVAL_MINUS_ALPHA = EIGENVAL_MINUS_ALPHA,
EIGENFUN = EIGENFUN,
INDEX = INDEX))
}
construct_piecewise_projection()
Given a matrix projected_U_approx
with approximated
values at discrete time points, a sequence of time points
time_seq
, and an extended sequence of time points
overkill_time_seq
, function
construct_piecewise_projection()
constructs a piecewise
constant projection of the approximated values over the extended time
sequence.
# Function to construct a piecewise constant projection of approximated values
construct_piecewise_projection <- function(projected_U_approx, time_seq, overkill_time_seq) {
projected_U_piecewise <- matrix(NA, nrow = nrow(projected_U_approx), ncol = length(overkill_time_seq))
# Assign value at t = 0
projected_U_piecewise[, which(overkill_time_seq == 0)] <- projected_U_approx[, 1]
# Assign values for intervals (t_{k-1}, t_k]
for (k in 2:length(time_seq)) {
idxs <- which(overkill_time_seq > time_seq[k - 1] & overkill_time_seq <= time_seq[k])
projected_U_piecewise[, idxs] <- projected_U_approx[, k]
}
return(projected_U_piecewise)
}
loglog_line_equation <- function(x1, y1, slope) {
b <- log10(y1 / (x1 ^ slope))
function(x) {
(x ^ slope) * (10 ^ b)
}
}
exp_line_equation <- function(x1, y1, slope) {
lnC <- log(y1) - slope * x1
function(x) {
exp(lnC + slope * x)
}
}
compute_guiding_lines <- function(x_axis_vector, errors, theoretical_rates, line_equation_fun) {
guiding_lines <- matrix(NA, nrow = length(x_axis_vector), ncol = length(theoretical_rates))
for (j in seq_along(theoretical_rates)) {
guiding_lines_aux <- matrix(NA, nrow = length(x_axis_vector), ncol = length(x_axis_vector))
for (k in seq_along(x_axis_vector)) {
point_x1 <- x_axis_vector[k]
point_y1 <- errors[k, j]
slope <- theoretical_rates[j]
line <- line_equation_fun(x1 = point_x1, y1 = point_y1, slope = slope)
guiding_lines_aux[, k] <- line(x_axis_vector)
}
guiding_lines[, j] <- rowMeans(guiding_lines_aux)
}
return(guiding_lines)
}
Below we present closed-form expressions for \(\displaystyle G_j(t)= \int_0^t e^{\lambda^{\alpha/2}_jr}g(r)dr\) corresponding to some choices of \(g(r)\).
\[\begin{aligned} g(r) &= Ae^{-\lambda^{\alpha/2}_j r} &\implies\quad G_j(t) &= At, \quad A\in\mathbb{R} \\[1.5ex] g(r) &= Ae^{\mu r} &\implies\quad G_j(t) &= A \frac{e^{(\lambda^{\alpha/2}_j+\mu)t} - 1}{\lambda^{\alpha/2}_j + \mu}, \quad -\lambda^{\alpha/2}_j \ne \mu \in \mathbb{R} \\[1.5ex] g(r) &= Ar^n &\implies\quad G_j(t) &= A\frac{(-1)^{n+1} n!}{(\lambda_j^{\alpha/2})^{n+1}} \left(1 - e^{\lambda_j^{\alpha/2} t} \sum_{k=0}^n \frac{(-\lambda_j^{\alpha/2} t)^k}{k!} \right), \quad n=0,1,\dots \\[1.5ex] g(r) &= A\sin(\omega r) &\implies\quad G_j(t) &= A \frac{e^{\lambda_j^{\alpha/2} t} \left( \lambda_j^{\alpha/2} \sin(\omega t) - \omega \cos(\omega t) \right) + \omega}{(\lambda_j^{\alpha/2})^2 + \omega^2}, \quad \omega \in \mathbb{R} \\[1.5ex] g(r) &= A\cos(\theta r) &\implies\quad G_j(t) &= A \frac{e^{\lambda_j^{\alpha/2} t} \left( \lambda_j^{\alpha/2} \cos(\theta t) + \theta \sin(\theta t) \right) - \lambda_j^{\alpha/2}}{(\lambda_j^{\alpha/2})^2 + \theta^2}, \quad \theta \in \mathbb{R} \end{aligned}\]# Functions to compute the exact solution to the fractional diffusion equation
g_linear <- function(r, A, lambda_j_alpha_half) {
return(A * exp(-lambda_j_alpha_half * r))
}
G_linear <- function(t, A) {
return(A * t)
}
g_exp <- function(r, A, mu) {
return(A * exp(mu * r))
}
G_exp <- function(t, A, lambda_j_alpha_half, mu) {
exponent <- lambda_j_alpha_half + mu
return(A * (exp(exponent * t) - 1) / exponent)
}
g_poly <- function(r, A, n) {
return(A * r^n)
}
G_poly <- function(t, A, lambda_j_alpha_half, n) {
t <- as.vector(t)
k_vals <- 0:n
sum_term <- sapply(t, function(tt) {
sum(((-lambda_j_alpha_half * tt)^k_vals) / factorial(k_vals))
})
coeff <- ((-1)^(n + 1)) * factorial(n) / (lambda_j_alpha_half^(n + 1))
return(A * coeff * (1 - exp(lambda_j_alpha_half * t) * sum_term))
}
g_sin <- function(r, A, omega) {
return(A * sin(omega * r))
}
G_sin <- function(t, A, lambda_j_alpha_half, omega) {
denom <- lambda_j_alpha_half^2 + omega^2
numerator <- exp(lambda_j_alpha_half * t) * (lambda_j_alpha_half * sin(omega * t) - omega * cos(omega * t)) + omega
return(A * numerator / denom)
}
g_cos <- function(r, A, theta) {
return(A * cos(theta * r))
}
G_cos <- function(t, A, lambda_j_alpha_half, theta) {
denom <- lambda_j_alpha_half^2 + theta^2
numerator <- exp(lambda_j_alpha_half * t) * (lambda_j_alpha_half * cos(theta * t) + theta * sin(theta * t)) - lambda_j_alpha_half
return(A * numerator / denom)
}
reversecolumns()
Given a matrix mat
, function
reversecolumns()
reverses the order of its columns.
# helper: measure change relative to the size of the previous iterate
change_comparer <- function(X_new, X_old, time_step, time_seq, weights, relative = TRUE) {
num <- sqrt(as.double(t(weights) %*% ((X_new - X_old)^2) %*% rep(time_step, length(time_seq))))
if (!relative) {
return(num)
}
den <- sqrt(as.double(t(weights) %*% (X_new^2) %*% rep(time_step, length(time_seq))))
if (den < .Machine$double.eps) {
return(ifelse(num < .Machine$double.eps, 0, num))
} else {
return(num / den)
}
}
# Coupled solver with multi-criteria convergence
solve_coupled_system_multi_tol <- function(
my_op_frac, # operator
time_step, # tau
time_seq, # vector of times
u_0, # initial state U^0
F_proj, # matrix of F
V_d, # matrix of
Psi, # Psi matrix
R, # R matrix
A, B, # lower/upper bounds (vector or matrix broadcastable to time grid)
mu, # positive scalar
weights,
tol = 1e-8, # scalar or named list: list(Z=..., U=..., P=...)
maxit = 200,
verbose = FALSE,
true_sol
) {
if (is.numeric(tol) && length(tol) == 1) {
tol_list <- list(Z = tol, U = tol, P = tol)
} else if (is.list(tol)) {
tol_list <- modifyList(list(Z = 1e-8, U = 1e-8, P = 1e-8), tol)
} else stop("tol must be scalar or list(Z=...,U=...,P=...)")
it <- 0
converged <- FALSE
rel_history <- data.frame(iter = integer(0), variable = character(0), value = numeric(0))
abs_history <- data.frame(iter = integer(0), variable = character(0), value = numeric(0))
z_prev <- F_proj*100
Z_mat <- R %*% Psi %*% z_prev
U_prev <- F_proj*100
P_prev <- F_proj*100
repeat {
it <- it + 1
U_mat <- solve_fractional_evolution(my_op_frac, time_step, time_seq, val_at_0 = u_0, RHST = F_proj + Z_mat)
V_mat <- reversecolumns(R %*% Psi %*% U_mat)
Q_mat <- solve_fractional_evolution(my_op_frac, time_step, time_seq, val_at_0 = u_0 * 0, RHST = V_mat - V_d)
P_mat <- reversecolumns(Q_mat)
z_new <- pmax(A, pmin(B, - P_mat / mu))
Z_mat <- R %*% Psi %*% z_new
# relative changes
rel_changes_Z <- change_comparer(z_new, z_prev, time_step, time_seq, weights, relative = TRUE)
rel_changes_U <- change_comparer(U_mat, U_prev, time_step, time_seq, weights, relative = TRUE)
rel_changes_P <- change_comparer(P_mat, P_prev, time_step, time_seq, weights, relative = TRUE)
abs_changes_Z <- change_comparer(z_new, true_sol$z_bar, time_step, time_seq, weights, relative = FALSE)
abs_changes_U <- change_comparer(U_mat, true_sol$u_bar, time_step, time_seq, weights, relative = FALSE)
abs_changes_P <- change_comparer(P_mat, true_sol$p_bar, time_step, time_seq, weights, relative = FALSE)
rel_history <- rbind(rel_history,
data.frame(iter = it, variable = "Z", value = rel_changes_Z),
data.frame(iter = it, variable = "U", value = rel_changes_U),
data.frame(iter = it, variable = "P", value = rel_changes_P))
abs_history <- rbind(abs_history,
data.frame(iter = it, variable = "Z", value = abs_changes_Z),
data.frame(iter = it, variable = "U", value = abs_changes_U),
data.frame(iter = it, variable = "P", value = abs_changes_P))
if (verbose) {message(sprintf("iter %3d: rel(Z)=%.3e, rel(U)=%.3e, rel(P)=%.3e", it, rel_changes_Z, rel_changes_U, rel_changes_P))}
# update stored previous iterates
z_prev <- z_new
U_prev <- U_mat
P_prev <- P_mat
# convergence check: require all rel_changes <= respective tol
cond_Z <- rel_changes_Z <= tol_list$Z
cond_U <- rel_changes_U <= tol_list$U
cond_P <- rel_changes_P <= tol_list$P
if ((cond_Z && cond_U && cond_P) || it >= maxit) {
converged <- (cond_Z && cond_U && cond_P)
break
}
}
if (verbose && !converged) {
message(sprintf(
"Stopped at maxit=%d; rel_changes: Z=%.3e (tol %.3e), U=%.3e (tol %.3e), P=%.3e (tol %.3e)",
it, rel_changes_Z, tol_list$Z, rel_changes_U, tol_list$U, rel_changes_P, tol_list$P
))
}
return(list(U = U_mat, # solution U
Z = z_new, # solution z
P = P_mat, # solution P
iterations = it,
converged = converged,
tol_list = tol_list,
rel_history = rel_history,
abs_history = abs_history))
}
plot_convergence_history <- function(history_df, tol_list = NULL, relative = TRUE) {
p <- ggplot(history_df, aes(x = iter, y = value, color = variable)) +
geom_line() +
geom_point(size = 1.5) +
scale_y_log10() +
labs(
title = ifelse(relative, "|X_{iter} - X_{iter-1}| / |X_{iter}|", "|X_{exact} - X_{iter}|"),
x = "Iteration",
y = "Error",
color = "Quantity"
) +
theme_minimal()
# Add tolerance lines if provided
if (!is.null(tol_list)) {
tol_df <- data.frame(
variable = names(tol_list),
tol = unlist(tol_list)
)
p <- p + geom_hline(
data = tol_df,
aes(yintercept = tol, color = variable),
linetype = "dashed"
)
}
return(plotly::ggplotly(p))
}
plotting.order()
Given a vector v
and a graph object graph
,
function plotting.order()
orders the mesh values for
plotting.
global.scene.setter()
Given ranges for the x
, y
, and
z
axes, and an optional aspect ratio for the z
axis, function global.scene.setter()
sets the scene for 3D
plots so that all plots have the same aspect ratio and camera
position.
# Function to set the scene for 3D plots
global.scene.setter <- function(x_range, y_range, z_range, z_aspectratio = 4) {
return(list(xaxis = list(title = "x", range = x_range),
yaxis = list(title = "y", range = y_range),
zaxis = list(title = "z", range = z_range),
aspectratio = list(x = 2*(1+2/pi),
y = 2*(2/pi),
z = z_aspectratio*(2/pi)),
camera = list(eye = list(x = (1+2/pi)/2,
y = 4,
z = 2),
center = list(x = (1+2/pi)/2,
y = 0,
z = 0))))
}
graph.plotter.3d()
Given a graph object graph
, a sequence of time points
time_seq
, and one or more matrices ...
representing function values defined on the mesh of graph
at each time in time_seq
, the
graph.plotter.3d()
function generates an interactive 3D
visualization of these values over time.
# Function to plot in 3D
graph.plotter.3d <- function(graph, time_seq, frame_val_to_display, ...) {
U_list <- list(...)
U_names <- sapply(substitute(list(...))[-1], deparse)
# Spatial coordinates
x <- plotting.order(graph$mesh$V[, 1], graph)
y <- plotting.order(graph$mesh$V[, 2], graph)
weights <- graph$mesh$weights
# Apply plotting.order to each U
U_list <- lapply(U_list, function(U) apply(U, 2, plotting.order, graph = graph))
n_vars <- length(U_list)
# Create plot_data frame with time and position replicated
n_time <- ncol(U_list[[1]])
base_data <- data.frame(
x = rep(x, times = n_time),
y = rep(y, times = n_time),
the_graph = 0,
frame = rep(time_seq, each = length(x))
)
# Add U columns to plot_data
for (i in seq_along(U_list)) {
base_data[[paste0("u", i)]] <- as.vector(U_list[[i]])
}
plot_data <- base_data
# Generate vertical lines
vertical_lines_list <- lapply(seq_along(U_list), function(i) {
do.call(rbind, lapply(time_seq, function(t) {
idx <- which(plot_data$frame == t)
z_vals <- plot_data[[paste0("u", i)]][idx]
data.frame(
x = rep(plot_data$x[idx], each = 3),
y = rep(plot_data$y[idx], each = 3),
z = as.vector(t(cbind(0, z_vals, NA))),
frame = rep(t, each = length(idx) * 3)
)
}))
})
# Set axis ranges
z_range <- range(unlist(U_list))
x_range <- range(x)
y_range <- range(y)
# Create plot
p <- plot_ly(plot_data, frame = ~frame) %>%
add_trace(x = ~x, y = ~y, z = ~the_graph, type = "scatter3d", mode = "lines",
name = "", showlegend = FALSE,
line = list(color = "black", width = 3))
# Add traces for each variable
colors <- RColorBrewer::brewer.pal(min(n_vars, 8), "Set1")
for (i in seq_along(U_list)) {
p <- add_trace(p,
x = ~x, y = ~y, z = as.formula(paste0("~u", i)),
type = "scatter3d", mode = "lines", name = U_names[i],
line = list(color = colors[i], width = 3))
}
# Add vertical lines
for (i in seq_along(vertical_lines_list)) {
p <- add_trace(p,
data = vertical_lines_list[[i]],
x = ~x, y = ~y, z = ~z, frame = ~frame,
type = "scatter3d", mode = "lines",
line = list(color = "gray", width = 0.5),
name = "Vertical lines",
showlegend = FALSE)
}
frame_name <- deparse(substitute(frame_val_to_display))
# Layout and animation controls
p <- p %>%
layout(
scene = global.scene.setter(x_range, y_range, z_range),
updatemenus = list(list(type = "buttons", showactive = FALSE,
buttons = list(
list(label = "Play", method = "animate",
args = list(NULL, list(frame = list(duration = 2000 / length(time_seq), redraw = TRUE), fromcurrent = TRUE))),
list(label = "Pause", method = "animate",
args = list(NULL, list(mode = "immediate", frame = list(duration = 0), redraw = FALSE)))
)
)),
title = paste0(frame_name,": ", formatC(frame_val_to_display[1], format = "f", digits = 4))
) %>%
plotly_build()
for (i in seq_along(p$x$frames)) {
p$x$frames[[i]]$layout <- list(title = paste0(frame_name,": ", formatC(frame_val_to_display[i], format = "f", digits = 4)))
}
return(p)
}
error.at.each.time.plotter()
Given a graph object graph
, a matrix U_true
of true values, a matrix U_approx
of approximated values, a
sequence of time points time_seq
, and a time step
time_step
, function
error.at.each.time.plotter()
computes the error at each
time step and generates a plot showing the error over time.
# Function to plot the error at each time step
error.at.each.time.plotter <- function(graph, U_true, U_approx, time_seq, time_step) {
weights <- graph$mesh$weights
error_at_each_time <- t(weights) %*% (U_true - U_approx)^2
error <- sqrt(as.double(t(weights) %*% (U_true - U_approx)^2 %*% rep(time_step, ncol(U_true))))
p <- plot_ly() %>%
add_trace(
x = ~time_seq, y = ~error_at_each_time, type = 'scatter', mode = 'lines+markers',
line = list(color = 'blue', width = 2),
marker = list(color = 'blue', size = 4),
name = "",
showlegend = TRUE
) %>%
layout(
title = paste0("Error at Each Time Step (Total error = ", formatC(error, format = "f", digits = 9), ")"),
xaxis = list(title = "t"),
yaxis = list(title = "Error"),
legend = list(x = 0.1, y = 0.9)
)
return(p)
}
graph.plotter.3d.comparer()
Given a graph object graph
, matrices U_true
and U_approx
representing true and approximated values, and
a sequence of time points time_seq
, function
graph.plotter.3d.comparer()
generates a 3D plot comparing
the true and approximated values over time, with color-coded traces for
each time point.
# Function to plot the 3D comparison of U_true and U_approx
graph.plotter.3d.comparer <- function(graph, U_true, U_approx, time_seq) {
x <- graph$mesh$V[, 1]; y <- graph$mesh$V[, 2]
x <- plotting.order(x, graph); y <- plotting.order(y, graph)
U_true <- apply(U_true, 2, plotting.order, graph = graph)
U_approx <- apply(U_approx, 2, plotting.order, graph = graph)
n_times <- length(time_seq)
x_range <- range(x); y_range <- range(y); z_range <- range(c(U_true, U_approx))
# Normalize time_seq
time_normalized <- (time_seq - min(time_seq)) / (max(time_seq) - min(time_seq))
blues <- colorRampPalette(c("lightblue", "blue"))(n_times)
reds <- colorRampPalette(c("mistyrose", "red"))(n_times)
# Accurate colorscales
colorscale_greens <- Map(function(t, col) list(t, col), time_normalized, blues)
colorscale_reds <- Map(function(t, col) list(t, col), time_normalized, reds)
p <- plot_ly()
# Static black graph structure
p <- p %>%
add_trace(x = x, y = y, z = rep(0, length(x)),
type = "scatter3d", mode = "lines",
line = list(color = "black", width = 4),
name = "Graph", showlegend = FALSE)
# U_true traces (green)
for (i in seq_len(n_times)) {
z <- U_true[, i]
p <- add_trace(
p,
type = "scatter3d",
mode = "lines",
x = x, y = y, z = z,
line = list(color = blues[i], width = 4),
showlegend = FALSE,
scene = "scene"
)
}
# U_approx traces (dashed red)
for (i in seq_len(n_times)) {
z <- U_approx[, i]
p <- add_trace(
p,
type = "scatter3d",
mode = "lines",
x = x, y = y, z = z,
line = list(color = reds[i], width = 4, dash = "dot"),
showlegend = FALSE,
scene = "scene"
)
}
# Dummy green colorbar (True) – with ticks
p <- add_trace(
p,
type = "heatmap",
z = matrix(time_seq, nrow = 1),
showscale = TRUE,
colorscale = colorscale_greens,
colorbar = list(
title = list(font = list(size = 12, color = "black"), text = "Time", side = "top"),
len = 0.9,
thickness = 15,
x = 1.02,
xanchor = "left",
y = 0.5,
yanchor = "middle",
tickvals = NULL, # hide tick values
ticktext = NULL,
ticks = "" # also hides tick marks
),
x = matrix(time_seq, nrow = 1),
y = matrix(1, nrow = 1),
hoverinfo = "skip",
opacity = 0
)
# Dummy red colorbar (Approx) – no ticks
p <- add_trace(
p,
type = "heatmap",
z = matrix(time_seq, nrow = 1),
showscale = TRUE,
colorscale = colorscale_reds,
colorbar = list(
title = list(font = list(size = 12, color = "black"), text = ".", side = "top"),
len = 0.9,
thickness = 15,
x = 1.05,
xanchor = "left",
y = 0.5,
yanchor = "middle"
),
x = matrix(time_seq, nrow = 1),
y = matrix(1, nrow = 1),
hoverinfo = "skip",
opacity = 0
)
p <- p %>%
add_trace(x = x, y = y, z = rep(0, length(x)),
type = "scatter3d", mode = "lines",
line = list(color = "black", width = 4),
name = "Graph", showlegend = FALSE)
p <- layout(p,
scene = global.scene.setter(x_range, y_range, z_range),
xaxis = list(visible = FALSE),
yaxis = list(visible = FALSE),
annotations = list(
list(
text = "Exact",
x = 1.045,
y = 0.5,
xref = "paper",
yref = "paper",
showarrow = FALSE,
font = list(size = 12, color = "black"),
textangle = -90
),
list(
text = "Approx",
x = 1.075,
y = 0.5,
xref = "paper",
yref = "paper",
showarrow = FALSE,
font = list(size = 12, color = "black"),
textangle = -90
)
)
)
return(p)
}
graph.plotter.3d.single()
Given a graph object graph
, a matrix U_true
representing true values, and a sequence of time points
time_seq
, function graph.plotter.3d.single()
generates a 3D plot of the true values over time, with color-coded
traces for each time point.
# Function to plot a single 3D line for
graph.plotter.3d.single <- function(graph, U_true, time_seq) {
x <- graph$mesh$V[, 1]; y <- graph$mesh$V[, 2]
x <- plotting.order(x, graph); y <- plotting.order(y, graph)
U_true <- apply(U_true, 2, plotting.order, graph = graph)
n_times <- length(time_seq)
x_range <- range(x); y_range <- range(y); z_range <- range(U_true)
z_range[1] <- z_range[1] - 10^-6
viridis_colors <- viridisLite::viridis(100)
# Normalize time_seq
time_normalized <- (time_seq - min(time_seq)) / (max(time_seq) - min(time_seq))
#greens <- colorRampPalette(c("palegreen", "darkgreen"))(n_times)
greens <- colorRampPalette(c(viridis_colors[1], viridis_colors[50], viridis_colors[100]))(n_times)
# Accurate colorscales
colorscale_greens <- Map(function(t, col) list(t, col), time_normalized, greens)
p <- plot_ly()
# Add the 3D lines with fading green color
for (i in seq_len(n_times)) {
z <- U_true[, i]
p <- add_trace(
p,
type = "scatter3d",
mode = "lines",
x = x,
y = y,
z = z,
line = list(color = greens[i], width = 2),
showlegend = FALSE,
scene = "scene"
)
}
p <- p %>%
add_trace(x = x, y = y, z = rep(0, length(x)),
type = "scatter3d", mode = "lines",
line = list(color = "black", width = 5),
name = "Graph", showlegend = FALSE)
# Add dummy heatmap to show colorbar (not part of scene)
p <- add_trace(
p,
type = "heatmap",
z = matrix(time_seq, nrow = 1),
showscale = TRUE,
colorscale = colorscale_greens,
colorbar = list(
title = list(font = list(size = 12, color = "black"), text = "Time", side = "top"),
len = 0.9, # height (0 to 1)
thickness = 15, # width in pixels
x = 1.02, # shift it slightly right of the plot
xanchor = "left",
y = 0.5,
yanchor = "middle"),
x = matrix(time_seq, nrow = 1),
y = matrix(1, nrow = 1),
hoverinfo = "skip",
opacity = 0
)
p <- layout(p,
scene = global.scene.setter(x_range, y_range, z_range),
xaxis = list(visible = FALSE),
yaxis = list(visible = FALSE)
)
return(p)
}
error.convergence.plotter()
# Function to plot the error convergence
error.convergence.plotter <- function(x_axis_vector,
alpha_vector,
errors,
theoretical_rates,
observed_rates,
line_equation_fun,
fig_title,
x_axis_label,
apply_sqrt = FALSE) {
x_vec <- if (apply_sqrt) sqrt(x_axis_vector) else x_axis_vector
guiding_lines <- compute_guiding_lines(x_axis_vector = x_vec,
errors = errors,
theoretical_rates = theoretical_rates,
line_equation_fun = line_equation_fun)
default_colors <- scales::hue_pal()(length(alpha_vector))
plot_lines <- lapply(1:ncol(guiding_lines), function(i) {
geom_line(
data = data.frame(x = x_vec, y = guiding_lines[, i]),
aes(x = x, y = y),
color = default_colors[i],
linetype = "dashed",
show.legend = FALSE
)
})
df <- as.data.frame(cbind(x_vec, errors))
colnames(df) <- c("x_axis_vector", alpha_vector)
df_melted <- melt(df, id.vars = "x_axis_vector", variable.name = "column", value.name = "value")
custom_labels <- paste0(formatC(alpha_vector, format = "f", digits = 2),
" | ",
formatC(theoretical_rates, format = "f", digits = 4),
" | ",
formatC(observed_rates, format = "f", digits = 4))
df_melted$column <- factor(df_melted$column, levels = alpha_vector, labels = custom_labels)
p <- ggplot() +
geom_line(data = df_melted, aes(x = x_axis_vector, y = value, color = column)) +
geom_point(data = df_melted, aes(x = x_axis_vector, y = value, color = column)) +
plot_lines +
labs(
title = fig_title,
x = x_axis_label,
y = expression(Error),
color = " α | theo | obs"
) +
(if (apply_sqrt) {
scale_x_continuous(breaks = x_vec, labels = round(x_axis_vector, 4))
} else {
scale_x_log10(breaks = x_axis_vector, labels = round(x_axis_vector, 4))
}) +
(if (apply_sqrt) {
scale_y_continuous(trans = "log", labels = scales::scientific_format())
} else {
scale_y_log10(labels = scales::scientific_format())
}) +
theme_minimal() +
theme(text = element_text(family = "Palatino"),
legend.position = "bottom",
legend.direction = "vertical",
plot.margin = margin(0, 0, 0, 0),
plot.title = element_text(hjust = 0.5, size = 18, face = "bold"))
return(p)
}
graph.plotter.3d.static <- function(graph, ...) {
x <- plotting.order(graph$mesh$V[, 1], graph)
y <- plotting.order(graph$mesh$V[, 2], graph)
z_list <- list(...)
z_list <- lapply(z_list, function(z) plotting.order(z, graph))
U_names <- sapply(substitute(list(...))[-1], deparse)
# Axis ranges
z_range <- range(unlist(z_list))
x_range <- range(x)
y_range <- range(y)
p <- plot_ly()
colors <- RColorBrewer::brewer.pal(max(length(z_list), 3), "Set1")
for (i in seq_along(z_list)) {
z <- z_list[[i]]
# Main 3D curve
p <- add_trace(
p,
x = x, y = y, z = z,
type = "scatter3d", mode = "lines",
line = list(color = colors[i], width = 3),
name = U_names[i], showlegend = TRUE
)
# Efficient vertical lines: one trace with breaks (NA)
x_vert <- rep(x, each = 3)
y_vert <- rep(y, each = 3)
z_vert <- unlist(lapply(z, function(zj) c(0, zj, NA)))
p <- add_trace(
p,
x = x_vert, y = y_vert, z = z_vert,
type = "scatter3d", mode = "lines",
line = list(color = "gray", width = 0.5),
showlegend = FALSE
)
}
p <- p %>% add_trace(x = x, y = y, z = x*0, type = "scatter3d", mode = "lines",
line = list(color = "black", width = 3),
name = "thegraph", showlegend = FALSE) %>%
layout(scene = global.scene.setter(x_range, y_range, z_range))
return(p)
}
We used R version 4.5.0 (R Core Team 2025) and the following R packages: gsignal v. 0.3.7 (Van Boxtel, G.J.M., et al. 2021), here v. 1.0.1 (Müller 2020), htmltools v. 0.5.8.1 (Cheng et al. 2024), knitr v. 1.50 (Xie 2014, 2015, 2025), Matrix v. 1.7.3 (Bates, Maechler, and Jagan 2025), MetricGraph v. 1.5.0.9000 (Bolin, Simas, and Wallin 2023a, 2023b, 2024, 2025; Bolin et al. 2024), patchwork v. 1.3.1 (Pedersen 2025), plotly v. 4.10.4 (Sievert 2020), RColorBrewer v. 1.1.3 (Neuwirth 2022), renv v. 1.0.7 (Ushey and Wickham 2024), reshape2 v. 1.4.4 (Wickham 2007), rmarkdown v. 2.29 (Xie, Allaire, and Grolemund 2018; Xie, Dervieux, and Riederer 2020; Allaire et al. 2024), rSPDE v. 2.5.1.9000 (Bolin and Kirchner 2020; Bolin and Simas 2023; Bolin, Simas, and Xiong 2024), scales v. 1.4.0 (Wickham, Pedersen, and Seidel 2025), slackr v. 3.4.0 (Kaye et al. 2025), tidyverse v. 2.0.0 (Wickham et al. 2019), viridisLite v. 0.4.2 (Garnier et al. 2023), xaringanExtra v. 0.8.0 (Aden-Buie and Warkentin 2024).