Hi, experts
I wrote the code exactly as in this article(An intuitive Bayesian spatial model for disease mapping that accounts for scaling), but it just has crashed. What's the problem?Here is my code
r.map <- 'I:\\CAR/result/Siberian_ADM1.adj'
inc <- read.csv("I:\\CAR/data/final.csv")
y <- inc$ num_positi
n <- nrow(inc)
inc$region <- 1:n
#head(inc)
# ID population num_positi incidence E region
# 1 1539885 25 2 1002 1
# 2 4004984 29 1 2605 2
# 3 3718657 27 1 2419 3
# 4 2611446 161 6 1699 4
# 5 894251 566 63 582 5
# 6 1319145 43 3 858 6
f_car <- y ~ 1+f(region,model = "bym2",
graph = r.map,
scale.model = T,
constr = TRUE,
hyper = list(
phi = list(
prior = 'pc',
param = c(0.5,2/3) ,
initial = -3),
prec = list(
prior = 'pc.prec',
param = c (0.2/0.31,0.01),
initial = 5)))
result <- inla(f_car, family = "poisson", data = inc, E=E,
control.predictor = list(compute = TRUE),
verbose = T)
###console###
Read ntt 24 1 with max.threads 32
Found num.threads = 24:1 max_threads = 24
1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Report bugs to <he...@r-inla.org>
Set reordering to id=[0] and name=[default]
Process file[E:\temp\RtmpYTb9Is\file759425dd37d9/Model.ini] threads[24] max.threads[32] blas_threads[1] nested[24:1]
inla_build...
number of sections=[9]
parse section=[0] name=[INLA.libR] type=[LIBR]
inla_parse_libR...
section[INLA.libR]
R_HOME=[d:/softwares/R/R-4.1.1]
parse section=[7] name=[INLA.Expert] type=[EXPERT]
inla_parse_expert...
section[INLA.Expert]
disable.gaussian.check=[0]
cpo.manual=[0]
jp.file=[(null)]
jp.model=[(null)]
parse section=[1] name=[INLA.Model] type=[PROBLEM]
inla_parse_problem...
name=[INLA.Model]
R-INLA version=[21.02.23]
R-INLA build date=[Mon Feb 22 11:58:09 PM +03 2021]
Build tag=[Version_21.02.23]
openmp.strategy=[default]
pardiso-library installed and working? = [no]
smtp = [taucs]
strategy = [default]
store results in directory=[E:\temp\RtmpYTb9Is\file759425dd37d9/results.files]
output:
cpo=[0]
po=[0]
dic=[0]
kld=[1]
mlik=[1]
q=[0]
graph=[0]
gdensity=[0]
hyperparameters=[1]
summary=[1]
return.marginals=[1]
nquantiles=[3] [ 0.025 0.5 0.975 ]
ncdf=[0] [ ]
parse section=[3] name=[Predictor] type=[PREDICTOR]
inla_parse_predictor ...
section=[Predictor]
dir=[predictor]
PRIOR->name=[loggamma]
hyperid=[53001|Predictor]
PRIOR->from_theta=[function (x) <<NEWLINE>>exp(x)]
PRIOR->to_theta = [function (x) <<NEWLINE>>log(x)]
PRIOR->PARAMETERS=[1, 1e-005]
initialise log_precision[12]
fixed=[1]
user.scale=[1]
vb.correct=[0]
n=[17]
m=[0]
ndata=[17]
compute=[1]
read offsets from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088]
read n=[34] entries from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088]
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 0/17 (idx,y) = (0, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 1/17 (idx,y) = (1, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 2/17 (idx,y) = (2, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 3/17 (idx,y) = (3, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 4/17 (idx,y) = (4, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 5/17 (idx,y) = (5, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 6/17 (idx,y) = (6, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 7/17 (idx,y) = (7, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 8/17 (idx,y) = (8, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594199b1088] 9/17 (idx,y) = (9, 0)
Aext=[(null)]
AextPrecision=[1e+008]
output:
summary=[1]
return.marginals=[1]
nquantiles=[3] [ 0.025 0.5 0.975 ]
ncdf=[0] [ ]
parse section=[2] name=[INLA.Data1] type=[DATA]
inla_parse_data [section 1]...
tag=[INLA.Data1]
family=[POISSON]
likelihood=[POISSON]
file->name=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75944b261baa]
file->name=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75942feb58b6]
file->name=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759458006ed0]
read n=[51] entries from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75944b261baa]
mdata.nattributes = 0
0/17 (idx,a,y,d) = (0, 1002, 25, 1)
1/17 (idx,a,y,d) = (1, 2605, 29, 1)
2/17 (idx,a,y,d) = (2, 2419, 27, 1)
3/17 (idx,a,y,d) = (3, 1699, 161, 1)
4/17 (idx,a,y,d) = (4, 582, 566, 1)
5/17 (idx,a,y,d) = (5, 858, 43, 1)
6/17 (idx,a,y,d) = (6, 349, 1177, 1)
7/17 (idx,a,y,d) = (7, 1575, 1112, 1)
8/17 (idx,a,y,d) = (8, 905, 2231, 1)
9/17 (idx,a,y,d) = (9, 1044, 7040, 1)
likelihood.variant=[0]
Link model [LOG]
Link order [-1]
Link variant [-1]
Link a [1]
Link ntheta [0]
mix.use[0]
parse section=[5] name=[region] type=[FFIELD]
inla_parse_ffield...
section=[region]
dir=[random.effect00000001]
model=[bym2]
PRIOR0->name=[pcprec]
hyperid=[11001|region]
PRIOR0->from_theta=[function (x) <<NEWLINE>>exp(x)]
PRIOR0->to_theta = [function (x) <<NEWLINE>>log(x)]
PRIOR0->PARAMETERS0=[0.645161 0.01]
PRIOR1->name=[table: E]
hyperid=[11002|region]
PRIOR1->from_theta=[function (x) <<NEWLINE>>exp(x)/(1 + exp(x))]
PRIOR1->to_theta = [function (x) <<NEWLINE>>log(x/(1 - x))]
PRIOR1->table=[table: E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759442346d]
vb.correct=[-1]
correct=[-1]
constr=[0]
diagonal=[1.01511e-005]
id.names=<not present>
compute=[1]
nrep=[1]
ngroup=[1]
read covariates from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5]
read n=[34] entries from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5]
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 0/17 (idx,y) = (0, 0)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 1/17 (idx,y) = (1, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 2/17 (idx,y) = (2, 2)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 3/17 (idx,y) = (3, 3)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 4/17 (idx,y) = (4, 4)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 5/17 (idx,y) = (5, 5)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 6/17 (idx,y) = (6, 6)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 7/17 (idx,y) = (7, 7)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 8/17 (idx,y) = (8, 8)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba5] 9/17 (idx,y) = (9, 9)
read graph from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759422e36d7]
file for locations=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759440a77d80]
nlocations=[17]
locations[0]=[1]
locations[1]=[2]
locations[2]=[3]
locations[3]=[4]
locations[4]=[5]
locations[5]=[6]
locations[6]=[7]
locations[7]=[8]
locations[8]=[9]
locations[9]=[10]
initialise log_precision [5]
fixed=[0]
initialise phi_intern [-3]
fixed=[0]
adjust.for.con.comp[1]
scale.model[1]
connected component[0] size[17] scale[0.530471]
scale.model: prec_scale[0.530471]
read extra constraint from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file7594b9633bb]
Constraint[0]
A[17] = 1.000000
A[18] = 1.000000
A[19] = 1.000000
A[20] = 1.000000
A[21] = 1.000000
A[22] = 1.000000
A[23] = 1.000000
A[24] = 1.000000
A[25] = 1.000000
A[26] = 1.000000
A[27] = 1.000000
e[0] = 0.000000
rank-deficiency is *defined* [1]
output:
summary=[1]
return.marginals=[1]
nquantiles=[3] [ 0.025 0.5 0.975 ]
ncdf=[0] [ ]
section=[4] name=[(Intercept)] type=[LINEAR]
inla_parse_linear...
section[(Intercept)]
dir=[fixed.effect00000001]
file for covariates=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f]
read n=[34] entries from file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f]
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 0/17 (idx,y) = (0, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 1/17 (idx,y) = (1, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 2/17 (idx,y) = (2, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 3/17 (idx,y) = (3, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 4/17 (idx,y) = (4, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 5/17 (idx,y) = (5, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 6/17 (idx,y) = (6, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 7/17 (idx,y) = (7, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 8/17 (idx,y) = (8, 1)
file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798f] 9/17 (idx,y) = (9, 1)
prior mean=[0]
prior precision=[0]
compute=[1]
output:
summary=[1]
return.marginals=[1]
nquantiles=[3] [ 0.025 0.5 0.975 ]
ncdf=[0] [ ]
parse section=[8] name=[INLA.pardiso] type=[PARDISO]
inla_parse_pardiso...
section[INLA.pardiso]
verbose[0]
debug[0]
parallel.reordering[1]
nrhs[-1]
Index table: number of entries[3], total length[52]
tag start-index length
Predictor 0 17
region 17 34
(Intercept) 51 1
parse section=[6] name=[INLA.Parameters] type=[INLA]
inla_parse_INLA...
section[INLA.Parameters]
lincomb.derived.only = [Yes]
lincomb.derived.correlation.matrix = [No]
global_node.factor = 2.000
global_node.degree = 2147483647
reordering = -1
constr.marginal.diagonal = 1.49e-008
Contents of ai_param 0000000004610d90
Optimiser: DEFAULT METHOD
Option for GSL-BFGS2: tol = 0.1
Option for GSL-BFGS2: step_size = 1
Option for GSL-BFGS2: epsx = 0.005
Option for GSL-BFGS2: epsf = 0.01
Option for GSL-BFGS2: epsg = 0.005
Restart: 0
Optimise: try to be smart: Yes
Optimise: use directions: Yes
Mode known: No
Gaussian approximation:
tolerance_func = 0.0005
tolerance_step = 0.0005
optpar_fp = 0
optpar_nr_step_factor = -0.1
Gaussian data: No
Strategy: Use a mean-skew corrected Gaussian by fitting a Skew-Normal
Fast mode: On
Use linear approximation to log(|Q +c|)? Yes
Method: Compute the derivative exact
Parameters for improved approximations
Number of points evaluate: 9
Step length to compute derivatives numerically: 0.000100002
Stencil to compute derivatives numerically: 5
Cutoff value to construct local neigborhood: 0.0001
Log calculations: On
Log calculated marginal for the hyperparameters: On
Integration strategy: Automatic (GRID for dim(theta)=1 and 2 and otherwise CCD)
f0 (CCD only): 1.100
dz (GRID only): 0.750
Adjust weights (GRID only): On
Difference in log-density limit (GRID only): 6.000
Skip configurations with (presumed) small density (GRID only): On
Gradient is computed using Central difference with step-length 0.005000
Hessian is computed using Central difference with step-length 0.070711
Hessian matrix is forced to be a diagonal matrix? [No]
Compute effective number of parameters? [Yes]
Perform a Monte Carlo error-test? [No]
Interpolator [Auto]
CPO required diff in log-density [3]
Stupid search mode:
Status [On]
Max iter [1000]
Factor [1.05]
Numerical integration of hyperparameters:
Maximum number of function evaluations [100000]
Relative error ....................... [1e-005]
Absolute error ....................... [1e-006]
To stabilise the numerical optimisation:
Minimum value of the -Hessian [-inf]
Strategy for the linear term [Keep]
CPO manual calculation[No]
VB-correction is [Disabled]
Laplace-correction is Disabled.
inla_build: check for unused entries in[E:\temp\RtmpYTb9Is\file759425dd37d9/Model.ini]
inla_INLA...
Strategy = [DEFAULT]
Sparse-matrix library.... [taucs]
OpenMP strategy.......... [small]
num.threads.............. [24:1]
blas.num.threads......... [1]
Density-strategy......... [High]
Size of graph............ [52]
Number of constraints.... [1]
Found optimal reordering=[amdc] nnz(L)=[169] and use_global_nodes(user)=[no]
List of hyperparameters:
theta[0] = [Log precision for region]
theta[1] = [Logit phi for region]
Optimise using DEFAULT METHOD
Smart optimise part I: estimate gradient using forward differences
maxld= -2302.856 fn= 1 theta= 5.005 -3.000 [21.7, 0.00]
maxld= -2293.959 fn= 2 theta= 5.000 -2.995 [21.7, 0.00]
maxld= -1119.974 fn= 4 theta= 4.008 -2.875 [17.0, 0.00]
maxld= -1119.375 fn= 6 theta= 4.008 -2.870 [17.1, 0.00]
maxld= -230.253 fn= 8 theta= -4.921 -1.747 [4.5, 0.00]
maxld= -230.007 fn= 9 theta= -4.916 -1.747 [4.5, 0.00]
New directions for numerical gradient
dir01 dir02
-0.992 0.125
0.125 0.992
Iter=1 |grad|=49.1 |x-x.old|=7.07 |f-f.old|=2.06e+003
maxld= -221.969 fn= 12 theta= -4.754 -0.762 [4.5, 0.00]
maxld= -221.939 fn= 14 theta= -4.753 -0.757 [4.5, 0.00]
maxld= -170.652 fn= 16 theta= -3.246 8.111 [4.5, 0.00]
maxld= -170.637 fn= 18 theta= -3.245 8.116 [4.5, 0.00]
file: smtp-taucs.c 1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Function: GMRFLib_build_sparse_matrix_TAUCS(), Line: 759, Thread: 0
Variable evaluates to NAN or INF. idx=(17,17). I will try to fix it...
file: smtp-taucs.c 1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Function: GMRFLib_build_sparse_matrix_TAUCS(), Line: 759, Thread: 0
Variable evaluates to NAN or INF. idx=(17,17). I will try to fix it...
file: smtp-taucs.c 1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Function: GMRFLib_build_sparse_matrix_TAUCS(), Line: 759, Thread: 0
Variable evaluates to NAN or INF. idx=(17,17). I will try to fix it...
file: smtp-taucs.c 1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Function: GMRFLib_build_sparse_matrix_TAUCS(), Line: 759, Thread: 0
Variable evaluates to NAN or INF. idx=(17,17). I will try to fix it...
GMRFLib version 3.0-0-snapshot, has recived error no [12]
Reason : The Newton-Raphson optimizer did not converge
Message : Condition `lambda < 1.0 / lambda_lim' is not TRUE
Function : GMRFLib_init_GMRF_approximation_store__intern
File : approx-inference.c
Line : 2953
GitID : file: approx-inference.c 1f6a39183ef43d8ef33f10ff3f04fd13f8432758 - Mon Feb 22 21:27:50 2021 +0300
Error in inla.inlaprogram.has.crashed() :
The inla-program exited with an error. Unless you interupted it yourself, please rerun with verbose=TRUE and check the output carefully.
If this does not help, please contact the developers at <he...@r-inla.org>.
--########libaray#########
library(INLA)
library(sp)
library(spdep)
####read map
rus.map <- read_sf("siberia_0526.shp")
### generate matrix
rus.map.nb <- poly2nb(rus.map)
nb2INLA(paste("rus.map.nb.adj"), rus.map.nb)
rus.adj <- inla.read.graph("rus.map.nb.adj")
###read data
inc <- read.csv("data.csv")
y <- inc$num_positi
n <- nrow(inc)
inc$factor <- c()
inc$region <- 1:n
#########
f_car <- y ~ 1+f(region,model = "bym2",
graph = rus.adj,
scale.model = T,
constr = TRUE,
hyper = list(
phi = list(
prior = 'pc',
param = c(0.5,2/3) ,
initial = -3),
prec = list(
prior = 'pc.prec',
param = c (0.2/0.31,0.01),
initial = 5)))
##########
result <- inla(f_car, family = "poisson", data = inc, E=E,
control.predictor = list(compute = TRUE),
verbose = T)
summary(result)
Hi,
By the way, when I add a "iid" , the code works. But theoretically the BYM2 model doesn't need to add “iid”. Confused.code
As follows.
loc_id <- inc$region
f_car
<-y ~ 1+f(loc_id,model = "iid")+
f( region ,model = "bym2",
graph = 'rus.adj',scale.model = T,constr = TRUE,
hyper = list(
phi = list(
prior =
'pc',
param =
c(0.5,2/3) ,
initial =
-3),
prec = list(
prior = 'pc.prec',
param = c (0.2/0.31,0.01),
initial = 5)))
####
summary(result)
# Call:
# c("inla(formula = f_car, family = \"poisson\", data = inc, E = E, ", " verbose = T,
# control.predictor = list(compute = TRUE))")
# Time used:
# Pre = 9.13, Running = 1.48, Post = 0.073, Total = 10.7
# Fixed effects:
# mean sd 0.025quant 0.5quant 0.975quant mode kld
# (Intercept) -0.863 0.113 -1.106 -0.863 -0.622 -0.863 0
#
# Random effects:
# Name Model
# loc_id IID model
# inc|S|ID BYM2 model
#
# Model hyperparameters:
# mean sd 0.025quant 0.5quant 0.975quant mode
# Precision for loc_id 2.07e+04 2.24e+04 1464.027 1.40e+04 8.04e+04 4027.267
# Precision for inc|S|ID 8.27e-01 2.32e-01 0.454 7.99e-01 1.36e+00 0.748
# Phi for inc|S|ID 8.81e-01 1.33e-01 0.495 9.30e-01 9.97e-01 0.994
#
# Expected number of effective parameters(stdev): 16.80(0.055)
# Number of equivalent replicates : 1.01
#
# Marginal log-Likelihood: -128.84
# Posterior marginals for the linear predictor and
# the fitted values are computed
library(INLA)
library(sp)
library(spdep)
####read map
rus.map <- read_sf("siberia_0526.shp")
### generate matrix
rus.map.nb <- poly2nb(rus.map)
nb2INLA(paste("rus.map.nb.adj"), rus.map.nb)
rus.adj <- inla.read.graph("rus.map.nb.adj")
###read data
inc <- read.csv("data2.csv")
inc$y <- inc$num_positi
inc$region <- 1:nrow(inc)
#########
f_car <- y ~ 1+f(region,model = "bym2",
graph = rus.adj,
scale.model = T,
constr = TRUE,
hyper = list(
phi = list(
prior = 'pc',
param = c(0.5,2/3) ,
initial = -3),
prec = list(
prior = 'pc.prec',
### >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>vvvvvvvv
param = c(0.2*0.31, 0.01),
initial = 5)))
##########
result <- inla(f_car, family = "poisson", data = inc, E=E,
control.predictor = list(compute = TRUE),
verbose = T)
summary(result)
> > From: INLA help <he...@r-inla.org>
> > Sent: Thursday, November 3, 2022 9:50:57 AM
> > To: newer <wenlo...@gmail.com>; R-inla discussion group
> > <r-inla-disc...@googlegroups.com>
> > Subject: Re: [r-inla] problems with bym2
> >
> > You might have to simplify the model with n=17 data points
> >
> > Sent from Outlook for iOS
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba
> > 5]
> >
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba
> > 5] 0/17 (idx,y) = (0, 0)
> >
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file759412536ba
> > read n=[34] entries from
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798
> > f]
> >
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798
> > f] 0/17 (idx,y) = (0, 1)
> >
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798
> > f] 1/17 (idx,y) = (1, 1)
> >
> > file=[E:/temp/RtmpYTb9Is/file759425dd37d9/data.files/file75946e99798
--
Håvard Rue
he...@r-inla.org