Unofficial supplemental materials for replicating analyses within: B. W. Rolek, D. J. Harrison, D. W. Linden, C. S. Loftin, P. B. Wood. 2021. Habitat associations of breeding conifer-associated birds in managed and regenerating forested stands. Forest Ecology and Management
This RMarkdown document provides R and JAGS code for analyses of abundance in response to vegetation, forestry treatments, and years-since-harvest.
#############################################
# This file is not needed to replicate analyses.
# We provide the simplest version of the model used
# as a template for other studies
#############################################
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.Rdata")
# set up data
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
####################################
# (1) basic Poisson model
####################################
cat("
model {
##### PRIORS ###############################################
pa.beta <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta ~ dunif(0, 250)
lam.beta ~ dnorm(0, 0.01)
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- pa.beta # add covariates for availability (time-removal) here
log(dist.sigma[i,t]) <- log(pp.beta) # add covariates for perceptibility (distance) here
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
N[i,t] ~ dpois(lambda[i,t])
log(lambda[i,t]) <- lam.beta # add covariates for abundance here
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
} # End model
",file="./model-basic-Poisson.txt")
for (i in 1:19){
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.01, 0.99),
pp.beta= runif(1, 5, 100)
) }
params <- c("pa.beta", "p.pa.beta", "pp.beta",
"lam.beta",
"Ntot", "D", "bayesp"
)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
# Run JAGS
out <- jags(datalfoc, inits=inits, params,
"./model-basic-Poisson.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm")
)
fn<- paste( "./", spp, "_basic-pois.RData", sep="" )
save(list= c("out"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.Rdata")
# set up data
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
###################################
# (2) basic zero-inflated Poisson model
###################################
cat("
model {
##### PRIORS ###############################################
pa.beta <- logit(p.pa.beta)
p.pa.beta ~ dunif(0,1)
pp.beta ~ dunif(0, 250)
lam.beta ~ dnorm(0, 0.01)
psi.beta <- logit(p.psi.beta)
p.psi.beta ~ dunif(0,1)
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- pa.beta # add covariates for availability (time-removal) here
log(dist.sigma[i,t]) <- log(pp.beta) # add covariates for perceptibility (distance) here
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
N[i,t] ~ dpois(lambda.eff[i,t])
lambda.eff[i,t] <- lambda[i,t] * w.lam[i,t]
w.lam[i,t] ~ dbern(psi[i,t])
log(lambda[i,t]) <- lam.beta # add covariates for abundance
logit(psi[i,t]) <- psi.beta # add covariates for habitat suitability
# If not running set inits near psi=1 on logit scale. For example psi.beta=10
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
} # End model
",file="./model-basic-ZIP.txt")
for (i in 1:19){
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav, p.psi.beta= 0.99, # setting psi near 1 helps model run
p.pa.beta0= runif(1, 0.1, 0.9),
pp.beta= runif(1, 5, 100)
) }
params <- c("pa.beta", "p.pa.beta", "pp.beta",
"lam.beta", "psi.beta", "p.psi.beta",
"Ntot", "D", "bayesp"
)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
# Run JAGS
out <- jags(datalfoc, inits=inits, params,
"./model-basic-ZIP.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm")
)
fn<- paste( "./", spp, "_basic-ZIP.RData", sep="" )
save(list= c("out"), file=fn)
}
library(MuMIn)
vars <- c("ba", "sf","dbh", "md", "scov", "scomp", "lcr")
tf <- c(TRUE, FALSE)
eg <- expand.grid(tf, tf, tf, tf, tf, tf, tf)
colnames(eg) <- vars
dat <- matrix(rnorm(100*length(vars)),ncol=length(vars))
dat <- as.data.frame(dat)
names(dat) <- vars
dat$y <- rnorm(nrow(dat))
global <- lm(y ~ ba + I(ba^2) + sf + I(sf^2) +
dbh + md + scov + scomp + lcr +
ba:sf + I(ba^2):sf + ba:I(sf^2),
data = dat, na.action = "na.fail")
combos <- dredge(global, eval=F,
subset =
dc(ba,I(ba^2),I(ba^2):sf) &&
dc(sf,I(sf^2),ba:I(sf^2)) &&
dc(ba:sf, ba:I(sf^2)) &&
dc(ba:sf, I(ba^2):sf)
)
## Fixed term is "(Intercept)"
n.mods <- length(combos)
dat2 <- dat[1,]*0 + 1
terms <- colnames(model.matrix(formula(combos[[n.mods]]),dat2))
n.terms <- length(terms)
combo.mat <- matrix(0,nrow=n.mods,ncol=n.terms)
colnames(combo.mat) <- terms
for (i in 1:n.mods){
combo.mat[i,match(colnames(model.matrix(formula(combos[[i]]),dat2)),
terms)] <- 1
}
# total model probs
temp <- apply(combo.mat,2,mean)
vegprobs <- c(w1=temp[1], w2=temp[2], w3=temp[9], w4=temp[4], w5=temp[6],
w6=temp[8], w7=temp[7], w8=temp[5],
w9.ba2=mean(combo.mat[which(combo.mat[,"I(ba^2):sf"]==0),"I(ba^2)"]),
w10.sf2=mean(combo.mat[which(combo.mat[,"ba:I(sf^2)"]==0),"I(sf^2)"]),
w11.ba_sf=mean(combo.mat[which(combo.mat[,"ba:I(sf^2)"]==0 & combo.mat[,"I(ba^2):sf"]==0),"ba:sf"]),
w12.ba2_sf=mean(combo.mat[,"I(ba^2):sf"]) ,
w13.ba_sf2=mean(combo.mat[,"ba:I(sf^2)"]) )
vegprobs
## w1.(Intercept) w2.ba w3.sf w4.dbh w5.md
## 1.0000000 0.8333333 0.8333333 0.5000000 0.5000000
## w6.scov w7.scomp w8.lcr w9.ba2 w10.sf2
## 0.5000000 0.5000000 0.5000000 0.4000000 0.4000000
## w11.ba_sf w12.ba2_sf w13.ba_sf2
## 0.3076923 0.1666667 0.1666667
Next we calculate the prior probabilities for treatments and availability covariates in Gibbs variable selection. The two parts of the model happen to have the same model structure with different covariates so we just calculate these prior probabilites once.
trtprobs <- matrix(c(1,0,0,0,0,0,
1,1,0,0,0,0,
1,0,1,0,0,0,
1,1,1,0,0,0,
1,0,1,1,0,0,
1,1,1,1,0,0,
1,1,1,0,1,0,
1,1,1,1,1,1,
NA, NA, NA, NA, NA, NA),
nrow=9,ncol=6, byrow=T,
dimnames=list(c("null", "trt", "tsh", "trt+tsh", "tsh+tsh*tsh",
"trt+tsh+tsh*tsh", "trt+tsh+trt*tsh", "trt+tsh+tsh*tsh+trt*tsh*tsh", "modprobs"),
c("w1.int","w2.trt", "w3.tsh", "w4.tsh*tsh", "w5.trt*tsh", "w6.trt*tsh*tsh")))
trtprobs[9,1] <- mean(trtprobs[,1], na.rm=T)
trtprobs[9,2] <- mean(trtprobs[1:6,2], na.rm=T)
trtprobs[9,3] <- mean(trtprobs[1:4,3], na.rm=T)
trtprobs[9,4] <- mean(trtprobs[1:7,4], na.rm=T)
trtprobs[9,5] <- mean(trtprobs[1:7,5], na.rm=T)
trtprobs[9,6] <- mean(trtprobs[1:8,6], na.rm=T)
round(trtprobs[9,], 3)
## w1.int w2.trt w3.tsh w4.tsh*tsh w5.trt*tsh
## 1.000 0.500 0.500 0.286 0.143
## w6.trt*tsh*tsh
## 0.125
#############################
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.Rdata")
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(datalfoc$CovsLam)
colnames(dd) <- c("ba2", "ba", "sf2", "sf", "dbh", "md", "scov", "scomp", "lcr")
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~ 1 + ba + sf + dbh + md + scov + scomp + lcr +
ba:sf + I(ba^2) + I(sf^2) + I(ba^2):sf + ba:I(sf^2),
data=dd)
# position of the beta coefficients associated with each bernoilli indicator var (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
w[13] ~ dbern(0.1667)
w[12] ~ dbern(0.1667)
w12_13 <- w[12]+w[13]
p.w11 <- equals(w12_13,0)*.3077 + (1-equals(w12_13,0))
w[11] ~ dbern(p.w11)
p.w10 <- equals(w[13],0)*0.4 + w[13]
w[10] ~ dbern(p.w10)
p.w9 <- equals(w[12],0)*0.4 + w[12]
w[9] ~ dbern(p.w9)
w[8] ~ dbern(0.5)
w[7] ~ dbern(0.5)
w[6] ~ dbern(0.5)
w[5] ~ dbern(0.5)
w[4] ~ dbern(0.5)
w10_13 <- w[10] + w[11] + w[12] + w[13]
p.w3 <- equals(w10_13,0)*0.5 + (1-equals(w10_13,0))
w[3] ~ dbern(p.w3)
w9.11_13 <- w[9] + w[11] + w[12] + w[13]
p.w2 <- equals(w9.11_13,0)*0.5 + (1-equals(w9.11_13,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5] + wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
#wtemp[b1] <- w[pos[b1]] # this uses GVS
wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
lam.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
#wpa.temp[b1] <- wpa[pos.pa[b1]]
wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
#wpp.temp[b1] <- wpp[pos.pp[b1]]
wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
lam.mu <- mm[,] %*% (lam.beta[]*wtemp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t] + lam.mu[i]
N[i,t] ~ dpois(lambda[i,t])
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(0, stand.tau)
} #S
# Random effects
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau)}
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
",file="./model_V-pois-global.txt")
for (i in 1:19){ #Create 5 files: 1:4, 5:8, 9:12, 13:16, 17:19
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta",
"lam.beta", "lam.beta1", "lam.beta2",
"Ntot", "D",
"stand.sig", "lam.beta",
"bayesp", "w", "wpa", "wpp",
"yr.eps", "yr.sig", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# these should be replaced with actual posterior means & sds from a global model run when using GVS
datalfoc$post.b <- rep(0, n.betas) # out$mean$post.b
datalfoc$sd.b <- rep(10, n.betas)
datalfoc$post.b.pa <- rep(0, n.betas.pa) # out$mean$post.b
datalfoc$sd.b.pa <- rep(10, n.betas.pa)
datalfoc$post.b.pp <- rep(0, n.betas.pp) # out$mean$post.b
datalfoc$sd.b.pp <- rep(10, n.betas.pp)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 4 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./model_V-pois-global.txt",
#"V14.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_V-pois-global.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
library (jagsUI)
load ("./DATA.Rdata")
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(datalfoc$CovsLam)
colnames(dd) <- c("ba2", "ba", "sf2", "sf", "dbh", "md", "scov", "scomp", "lcr")
# model matrix of stand effects (contr.sum is critical here)
mmzi <- mm <- model.matrix(~ 1 + ba + sf + dbh + md + scov + scomp + lcr +
ba:sf + I(ba^2) + I(sf^2) + I(ba^2):sf + ba:I(sf^2),
data=dd)
# position of the beta coefficients associated with each bernoilli indicator var (i.e., treat has 7 terms w/ intercept)
pos.zi <- pos <- as.numeric(attr(mm,"assign")+1)
n.betas.zi <- n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
psi.beta[1] <- logit(p.psi.beta0)
p.psi.beta0 ~ dunif(0,1)
# priors for the w model inclusion terms
w[13] ~ dbern(0.1667)
w[12] ~ dbern(0.1667)
w12_13 <- w[12]+w[13]
p.w11 <- equals(w12_13,0)*.3077 + (1-equals(w12_13,0))
w[11] ~ dbern(p.w11)
p.w10 <- equals(w[13],0)*0.4 + w[13]
w[10] ~ dbern(p.w10)
p.w9 <- equals(w[12],0)*0.4 + w[12]
w[9] ~ dbern(p.w9)
w[8] ~ dbern(0.5)
w[7] ~ dbern(0.5)
w[6] ~ dbern(0.5)
w[5] ~ dbern(0.5)
w[4] ~ dbern(0.5)
w10_13 <- w[10] + w[11] + w[12] + w[13]
p.w3 <- equals(w10_13,0)*0.5 + (1-equals(w10_13,0))
w[3] ~ dbern(p.w3)
w9.11_13 <- w[9] + w[11] + w[12] + w[13]
p.w2 <- equals(w9.11_13,0)*0.5 + (1-equals(w9.11_13,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for the w model inclusion terms
wzi[13] ~ dbern(0.1667)
wzi[12] ~ dbern(0.1667)
wzi12_13 <- wzi[12]+wzi[13]
p.wzi11 <- equals(wzi12_13,0)*.3077 + (1-equals(wzi12_13,0))
wzi[11] ~ dbern(p.wzi11)
p.wzi10 <- equals(wzi[13],0)*0.4 + wzi[13]
wzi[10] ~ dbern(p.wzi10)
p.wzi9 <- equals(wzi[12],0)*0.4 + wzi[12]
wzi[9] ~ dbern(p.wzi9)
wzi[8] ~ dbern(0.5)
wzi[7] ~ dbern(0.5)
wzi[6] ~ dbern(0.5)
wzi[5] ~ dbern(0.5)
wzi[4] ~ dbern(0.5)
wzi10_13 <- wzi[10] + wzi[11] + wzi[12] + wzi[13]
p.wzi3 <- equals(wzi10_13,0)*0.5 + (1-equals(wzi10_13,0))
wzi[3] ~ dbern(p.wzi3)
wzi9.11_13 <- wzi[9] + wzi[11] + wzi[12] + wzi[13]
p.wzi2 <- equals(wzi9.11_13,0)*0.5 + (1-equals(wzi9.11_13,0))
wzi[2] ~ dbern(p.wzi2)
wzi[1] ~ dbern(1)
wzi.temp[1] <- 1
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5] + wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
#wtemp[b1] <- w[pos[b1]] # this uses GVS
wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
lam.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 2:n.betas.zi){ # starts at 2 because intercept always requires a diff prior and w=1
#wzi.temp[b1] <- wzi[pos.zi[b1]] # this uses GVS
wzi.temp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b.zi[b1] <- post.b.zi[b1]*(1-wzi.temp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 2:n.betas.zi){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b.zi[b1,b2] <- equals(b1,b2)*((1/sd.b.zi[b1]^2)*(1-wzi.temp[b1])) + (wzi.temp[b1]*b.tau.zi)
} # b2
psi.beta[b1] ~ dnorm(mean.b.zi[b1],tau.b.zi[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
#wpa.temp[b1] <- wpa[pos.pa[b1]]
wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
#wpp.temp[b1] <- wpp[pos.pp[b1]]
wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of site-specific predictors
lam.mu <- mm[,] %*% (lam.beta[]*wtemp[])
psi.mu <- mmzi[,] %*% (psi.beta[]*wzi.temp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
psi.stand.tau <- 1/ (psi.stand.sig*psi.stand.sig)
psi.stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
psi.yr.tau <- 1/ (psi.yr.sig*psi.yr.sig)
psi.yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.zi <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
N[i,t] ~ dpois(lambda.eff[i,t])
lambda.eff[i,t] <- lambda[i,t] * w.lam[i,t]
w.lam[i,t] ~ dbern(psi[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t] + lam.mu[i]
logit(psi[i,t]) <- psi.mu[i]
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(0, stand.tau)
} #S
# Random effects
for (y in 1:9){
yr.eps[y] ~ dnorm(0, yr.tau)
} # y
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp <- step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
" ,file="./model_V-zip_global.txt")
for (i in c(11,10,3,5,2)){ #use zips for spp c(11,10,3,5,2)
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.psi.beta0= runif(1, 0.9, 1),
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta", "obs.eps", "obs.sig",
"lam.beta", "stand.sig", "yr.eps", "yr.sig",
"psi.beta",
"Ntot", "D",
"bayesp", "w", "wzi", "wpa", "wpp"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$mmzi <- mmzi
datalfoc$pos <- pos
datalfoc$pos.zi <- pos.zi
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.zi <- n.betas.zi
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
datalfoc$post.b <- rep(0, n.betas) # out$mean$post.b
datalfoc$sd.b <- rep(10, n.betas)
datalfoc$post.b.zi <- rep(0, n.betas.zi) # out$mean$post.b
datalfoc$sd.b.zi <- rep(10, n.betas.zi)
datalfoc$post.b.pa <- rep(0, n.betas.pa) # out$mean$post.b
datalfoc$sd.b.pa <- rep(10, n.betas.pa)
datalfoc$post.b.pp <- rep(0, n.betas.pp) # out$mean$post.b
datalfoc$sd.b.pp <- rep(10, n.betas.pp)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 3 ; na=10000
# ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./model_V-zip_global.txt",
# "V14.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm"))
fn<- paste( "./", spp, "_V-zip_global.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.Rdata")
load("./global_est_veg.Rdata") # output file from global model
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(datalfoc$CovsLam)
colnames(dd) <- c("ba2", "ba", "sf2", "sf", "dbh", "md", "scov", "scomp", "lcr")
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~ 1 + ba + sf + dbh + md + scov + scomp + lcr +
ba:sf + I(ba^2) + I(sf^2) + I(ba^2):sf + ba:I(sf^2),
data=dd)
# position of the beta coefficients associated with each bernoilli indicator var (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
w[13] ~ dbern(0.1667)
w[12] ~ dbern(0.1667)
w12_13 <- w[12]+w[13]
p.w11 <- equals(w12_13,0)*.3077 + (1-equals(w12_13,0))
w[11] ~ dbern(p.w11)
p.w10 <- equals(w[13],0)*0.4 + w[13]
w[10] ~ dbern(p.w10)
p.w9 <- equals(w[12],0)*0.4 + w[12]
w[9] ~ dbern(p.w9)
w[8] ~ dbern(0.5)
w[7] ~ dbern(0.5)
w[6] ~ dbern(0.5)
w[5] ~ dbern(0.5)
w[4] ~ dbern(0.5)
w10_13 <- w[10] + w[11] + w[12] + w[13]
p.w3 <- equals(w10_13,0)*0.5 + (1-equals(w10_13,0))
w[3] ~ dbern(p.w3)
w9.11_13 <- w[9] + w[11] + w[12] + w[13]
p.w2 <- equals(w9.11_13,0)*0.5 + (1-equals(w9.11_13,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5] + wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
wtemp[b1] <- w[pos[b1]] # this uses GVS
#wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
lam.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
wpa.temp[b1] <- wpa[pos.pa[b1]]
#wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
wpp.temp[b1] <- wpp[pos.pp[b1]]
#wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
lam.mu <- mm[,] %*% (lam.beta[]*wtemp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t] + lam.mu[i]
N[i,t] ~ dpois(lambda[i,t])
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(0, stand.tau)
} #S
# Random effects
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau)}
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
",file="./model_V-pois-GVS.txt")
for (i in 1:19){ #Skip spp c(10,3,5,2) use ZIPs instead
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta",
"lam.beta", "lam.beta1", "lam.beta2",
"Ntot", "D",
"stand.sig", "lam.beta",
"bayesp", "w", "wpa", "wpp",
"yr.eps", "yr.sig", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# these should be replaced with actual posterior means & sds from a full model run!!
spp.num2 <- which( names(post.b)==spp )
datalfoc$post.b <- post.b[[spp.num2]] # out$mean$post.b
datalfoc$sd.b <-sd.b[[spp.num2]]
datalfoc$post.b.pa <- post.b.pa[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pa <- sd.b.pa[[spp.num2]]
datalfoc$post.b.pp <- post.b.pp[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pp <- sd.b.pp[[spp.num2]]
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./model_V-pois-GVS.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_V-pois-GVS.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.Rdata")
load("./global_est_zi_veg.Rdata") # output file from global model
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(datalfoc$CovsLam)
colnames(dd) <- c("ba2", "ba", "sf2", "sf", "dbh", "md", "scov", "scomp", "lcr")
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~ 1 + ba + sf + dbh + md + scov + scomp + lcr +
ba:sf + I(ba^2) + I(sf^2) + I(ba^2):sf + ba:I(sf^2),
data=dd)
# position of the beta coefficients associated with each bernoilli indicator var (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
w[13] ~ dbern(0.1667)
w[12] ~ dbern(0.1667)
w12_13 <- w[12]+w[13]
p.w11 <- equals(w12_13,0)*.3077 + (1-equals(w12_13,0))
w[11] ~ dbern(p.w11)
p.w10 <- equals(w[13],0)*0.4 + w[13]
w[10] ~ dbern(p.w10)
p.w9 <- equals(w[12],0)*0.4 + w[12]
w[9] ~ dbern(p.w9)
w[8] ~ dbern(0.5)
w[7] ~ dbern(0.5)
w[6] ~ dbern(0.5)
w[5] ~ dbern(0.5)
w[4] ~ dbern(0.5)
w10_13 <- w[10] + w[11] + w[12] + w[13]
p.w3 <- equals(w10_13,0)*0.5 + (1-equals(w10_13,0))
w[3] ~ dbern(p.w3)
w9.11_13 <- w[9] + w[11] + w[12] + w[13]
p.w2 <- equals(w9.11_13,0)*0.5 + (1-equals(w9.11_13,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5] + wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
wtemp[b1] <- w[pos[b1]] # this uses GVS
#wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
lam.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
wpa.temp[b1] <- wpa[pos.pa[b1]]
#wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
wpp.temp[b1] <- wpp[pos.pp[b1]]
#wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
lam.mu <- mm[,] %*% (lam.beta[]*wtemp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t] + lam.mu[i]
N[i,t] ~ dpois(lambda[i,t])
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(0, stand.tau)
} #S
# Random effects
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau)}
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
",file="./model_V-zip-GVS.txt")
for (i in c(11,10,3,5,2)){ #use zips for spp c(11,10,3,5,2)
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta",
"lam.beta", "lam.beta1", "lam.beta2",
"Ntot", "D",
"stand.sig", "lam.beta",
"bayesp", "w", "wpa", "wpp",
"yr.eps", "yr.sig", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# these should be replaced with actual posterior means & sds from a full model run!!
spp.num2 <- which( names(post.b)==spp )
datalfoc$post.b <- post.b[[spp.num2]] # out$mean$post.b
datalfoc$sd.b <-sd.b[[spp.num2]]
datalfoc$post.b.pa <- post.b.pa[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pa <- sd.b.pa[[spp.num2]]
datalfoc$post.b.pp <- post.b.pp[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pp <- sd.b.pp[[spp.num2]]
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 3 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./model_V-zip-GVS.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na,
parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_V-zip-GVS.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load (".\\DATA.Rdata")
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(treat=factor(datalfoc$treat),tsh=datalfoc$tsh,tsh2=datalfoc$tsh^2)
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~treat*tsh+treat*tsh2,dd,contrasts=list(treat="contr.sum"))
# position of the beta coefficients associated with bernoulli indicator variable (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
# this ensures that each of the 8 model combos has equal probability: Pr(m)= 1/8
w[6] ~ dbern(0.125)
p.w5 <- (1-w[6])*0.143 + w[6]
w[5] ~ dbern(p.w5)
p.w4 <- (1-w[6])*0.286 + w[6]
w[4] ~ dbern(p.w4)
w456 <- w[4]+w[5]+w[6]
p.w3 <- equals(w456,0)*0.5 + (1-equals(w456,0))
w[3] ~ dbern(p.w3)
w56 <- w[5]+w[6]
p.w2 <- equals(w56,0)*0.5 + (1-equals(w56,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5]+wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
# wtemp[b1] <- w[pos[b1]] # this uses GVS
wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
s.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
# wpa.temp[b1] <- wpa[pos.pa[b1]]
wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
# wpp.temp[b1] <- wpp[pos.pp[b1]]
wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
stand.mu <- mm[,] %*% (s.beta[]*wtemp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t]
N[i,t] ~ dpois(lambda[i,t])
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(stand.mu[s], stand.tau)
} #S
# Random effects
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau)}
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
",file="./T-pois-global.txt")
# CAUTION: These next lines run the model and
# take a VERY long time to run
# (>1 week, each species took 4 days)
# We ran these on an HPC and specified the
# loop to run 4-5 species sequentially.
for (i in 1:19){ #Create 5 files: 1:4, 5:8, 9:12, 13:16, 17:19
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta",
"lam.beta", "lam.beta1", "lam.beta2",
"Ntot", "D",
"stand.sig", "s.beta",
"bayesp", "w", "wpa", "wpp",
"yr.eps", "yr.sig", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# these should be replaced with actual posterior means & sds from the global model when running GVS
datalfoc$post.b <- rep(0, 21) # out$mean$post.b
datalfoc$sd.b <- rep(100, 21)
datalfoc$post.b.zi <- rep(0, 21) # out$mean$post.b
datalfoc$sd.b.zi <-rep(100, 21)
datalfoc$post.b.pa <- rep(0, 6) # out$mean$post.b
datalfoc$sd.b.pa <- rep(100, 6)
datalfoc$post.b.pp <- rep(0, 4) # out$mean$post.b
datalfoc$sd.b.pp <- rep(100, 4)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na <- 10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./T-pois-global.txt",
#"/scratch/brolek/ch2/Analysis/global/models/SS1TR_p_global_14.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na
, parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_T-pois-global.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.RData")
# data manipulation
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(treat=factor(datalfoc$treat),tsh=datalfoc$tsh,tsh2=datalfoc$tsh^2)
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~treat*tsh+treat*tsh2,dd,contrasts=list(treat="contr.sum"))
# position of the beta coefficients associated with bernoulli indicator variable (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
# this ensures that each of the 8 model combos has equal probability: Pr(m)= 1/8
w[6] ~ dbern(0.125)
p.w5 <- (1-w[6])*0.143 + w[6]
w[5] ~ dbern(p.w5)
p.w4 <- (1-w[6])*0.286 + w[6]
w[4] ~ dbern(p.w4)
w456 <- w[4]+w[5]+w[6]
p.w3 <- equals(w456,0)*0.5 + (1-equals(w456,0))
w[3] ~ dbern(p.w3)
w56 <- w[5]+w[6]
p.w2 <- equals(w56,0)*0.5 + (1-equals(w56,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wzi for zero-inflation param
wzi[6] ~ dbern(0.125)
p.wzi5 <- (1-wzi[6])*0.143 + wzi[6]
wzi[5] ~ dbern(p.wzi5)
p.wzi4 <- (1-wzi[6])*0.286 + wzi[6]
wzi[4] ~ dbern(p.wzi4)
wzi456 <- wzi[4]+wzi[5]+wzi[6]
p.wzi3 <- equals(wzi456,0)*0.5 + (1-equals(wzi456,0))
wzi[3] ~ dbern(p.wzi3)
wzi56 <- wzi[5]+wzi[6]
p.wzi2 <- equals(wzi56,0)*0.5 + (1-equals(wzi56,0))
wzi[2] ~ dbern(p.wzi2)
wzi[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5]+wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){ wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
# wtemp[b1] <- w[pos[b1]] # this uses GVS
wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
s.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, zero-inflation
for(b1 in 1:n.betas){
# wzi.temp[b1] <- wzi[pos[b1]] # this uses GVS
wzi.temp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b.zi[b1] <- post.b.zi[b1]*(1-wzi.temp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b.zi[b1,b2] <- equals(b1,b2)*((1/sd.b.zi[b1]^2)*(1-wzi.temp[b1])) + (wzi.temp[b1]*b.tau.zi)
} # b2
s.beta.psi[b1] ~ dnorm(mean.b.zi[b1],tau.b.zi[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
# wpa.temp[b1] <- wpa[pos.pa[b1]]
wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
#wpp.temp[b1] <- wpp[pos.pp[b1]]
wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
stand.mu <- mm[,] %*% (s.beta[]*wtemp[])
stand.mu.psi <- mm[,] %*% (s.beta.psi[]*wzi.temp[])
stand.tau.psi <- 1/ (stand.sig.psi*stand.sig.psi)
stand.sig.psi ~ dunif(0,10)
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
#yr.tau.psi <- 1/ (yr.sig.psi*yr.sig.psi)
#yr.sig.psi ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.zi <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] + wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
N[i,t] ~ dpois(lambda.eff[i,t])
lambda.eff[i,t] <- lambda[i,t] * w.lam[i,t]
w.lam[i,t] ~ dbern(psi[i,t])
logit(psi[i,t]) <- psi.beta.s[stand.id[i]] #+ yr.eps.psi[t]
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t]
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
# Random effects
for (s in 1:S){ psi.beta.s[s] ~ dnorm(stand.mu.psi[s], stand.tau.psi)
lam.beta.s[s] ~ dnorm(stand.mu[s], stand.tau) } #S
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau) }
#yr.eps.psi[y] ~ dnorm(0, yr.tau.psi) } # y
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)} # o
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
" ,file="./T-zip-global.txt")
# CAUTION: These next lines run the model and
# take a VERY long time to run
# (>1 week, each species took 4 days)
# We ran these on an HPC and specified the
# loop to run 4-5 species sequentially.
# poor model fit for these species
# BOCH, GCKI, GRAJ, MAWA, YBFL, YPWA
spp.zi <- c(10, 11, 3, 5, 2, 8)
for (i in spp.zi){ #Create 5 files: 1:4, 5:8, 9:12, 13:16, 17:19
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
# Inits and parameters to save
# Crunch the numbers, reformat
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
# remove comments to add covariates
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(0, datalfoc$nCovsPP-2)),
s.beta= runif(21, -0.1, 0.1),
s.beta.psi= c(5,runif(20, -0.1, 0.1)),
stand.sig.psi= runif(1, 0, 0.1),
stand.sig= runif(1, 0, 0.1),
obs.sig= runif(1, 0, 0.1),
yr.sig= runif(1, 0, 0.1)
) }
params <- c("pa.beta", "pp.beta",
"Ntot", "D",
"stand.sig", "stand.sig.psi", "s.beta", "s.beta.psi",
"bayesp", "w", "wzi", "wpa", "wpp",
"yr.eps", "yr.sig", "yr.eps.psi", "yr.sig.psi", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# these should be replaced with actual posterior means & sds from a global model run to implement GVS
datalfoc$post.b <- rep(0, 21) # out$mean$post.b
datalfoc$sd.b <- rep(100, 21)
datalfoc$post.b.zi <- rep(0, 21) # out$mean$post.b
datalfoc$sd.b.zi <-rep(100, 21)
datalfoc$post.b.pa <- rep(0, 6) # out$mean$post.b
datalfoc$sd.b.pa <- rep(100, 6)
datalfoc$post.b.pp <- rep(0, 4) # out$mean$post.b
datalfoc$sd.b.pp <- rep(100, 4)
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./T-zip-global.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na
, parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_T-zip-global.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
library (jagsUI) # v1.5.1
load("./global_est.Rdata") # output file from global model
load ("./DATA.RData")
# data manipulation
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(treat=factor(datalfoc$treat),tsh=datalfoc$tsh,tsh2=datalfoc$tsh^2)
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~treat*tsh+treat*tsh2,dd,contrasts=list(treat="contr.sum"))
# position of the beta coefficients associated with bernoulli indicator variable (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
# this ensures that each of the 8 model combos has equal probability: Pr(m)= 1/8
w[6] ~ dbern(0.125)
p.w5 <- (1-w[6])*0.143 + w[6]
w[5] ~ dbern(p.w5)
p.w4 <- (1-w[6])*0.286 + w[6]
w[4] ~ dbern(p.w4)
w456 <- w[4]+w[5]+w[6]
p.w3 <- equals(w456,0)*0.5 + (1-equals(w456,0))
w[3] ~ dbern(p.w3)
w56 <- w[5]+w[6]
p.w2 <- equals(w56,0)*0.5 + (1-equals(w56,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5]+wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
wtemp[b1] <- w[pos[b1]] # this uses GVS
# wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
s.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
wpa.temp[b1] <- wpa[pos.pa[b1]]
# wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
wpp.temp[b1] <- wpp[pos.pp[b1]]
# wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
stand.mu <- mm[,] %*% (s.beta[]*wtemp[])
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] +
wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t]
N[i,t] ~ dpois(lambda[i,t])
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
for (s in 1:S) {
lam.beta.s[s] ~ dnorm(stand.mu[s], stand.tau)
} #S
# Random effects
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau)}
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)}
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
"
,file="./T-pois-GVS.txt")
# CAUTION: These next lines run the model and
# take a VERY long time to run
# (>1 week, each species took 4 days)
# We ran these on an HPC and specified the
# loop to run 4-5 species sequentially.
for (i in 1:19){ #Create 5 files: 1:4, 5:8, 9:12, 13:16, 17:19
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(NA, datalfoc$nCovsPP-2)),
stand.sig= runif(1, 0, 2),
s.beta = runif(n.betas,-.5,.5)
) }
params <- c("pa.beta", "pp.beta",
"lam.beta", "lam.beta1", "lam.beta2",
"Ntot", "D",
"stand.sig", "s.beta",
"bayesp", "w", "wpa", "wpp",
"yr.eps", "yr.sig", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# input posterior means & sds from a global model run when using GVS
spp.num2 <- which( names(post.b)==spp )
datalfoc$post.b <- post.b[[spp.num2]] # out$mean$post.b
datalfoc$sd.b <-sd.b[[spp.num2]]
datalfoc$post.b.pa <- post.b.pa[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pa <- sd.b.pa[[spp.num2]]
datalfoc$post.b.pp <- post.b.pp[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pp <- sd.b.pp[[spp.num2]]
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./T-pois-GVS.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na
, parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_T-pois-GVS.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}
# software used
# JAGS 4.3.0
# R version 4.0.2 (2020-06-22) -- "Taking Off Again"
library (jagsUI) # v1.5.1
load ("./DATA.RData")
load("./global_est_zi.Rdata") # output file from global model
# data manipulation
datalfoc$SPP <- length(spp.list.foc)
yr <- array(NA, dim=c(dim (ab)[1], 9) )
yr[,1:3] <- 1; yr[,4:6] <- 2; yr[,7:9] <- 3
datalfoc$yr <- yr
s.year <- array(NA, dim=c(114, 9))
s.year[,1:3] <- 1; s.year[,4:6] <- 2; s.year[,7:9] <- 3
datalfoc$s.year <- s.year
datalfoc$ba <- datalfoc$CovsLam[, "ba"]
nobs <- datalfoc$nobs
dclass <- datalfoc$dclass
int <- datalfoc$int
site <- datalfoc$site
yr_rot <- datalfoc$yr_rot
# print sample sizes
apply(ab2[,1:2,,,dimnames(ab2)[[5]] %in% spp.list.foc], c(5), sum, na.rm=T)
# create data frame of stand covariates
dd <- data.frame(treat=factor(datalfoc$treat),tsh=datalfoc$tsh,tsh2=datalfoc$tsh^2)
# model matrix of stand effects (contr.sum is critical here)
mm <- model.matrix(~treat*tsh+treat*tsh2,dd,contrasts=list(treat="contr.sum"))
# position of the beta coefficients associated with bernoulli indicator variable (i.e., treat has 7 terms w/ intercept)
pos <- as.numeric(attr(mm,"assign")+1)
n.betas <- length(pos)
pos.pa <- c(1:6)
n.betas.pa <- length(pos.pa)
pos.pp <- c(1:4)
n.betas.pp <- length(pos.pp)
# Define model in BUGS
cat("
model {
##### Variables ##########################################
## indices: i=site, k=visit, t=year, spp=species
## pa.beta = availability/removal parameters
## pp.beta = perceptibility/distance scale parameters
## dist.sigma = distance scale parameter
## N = detection corrected abundance
## Ntot = population size of total area surveyed
## D = density
## bayesp = Bayesian p-value for model fit
##### PRIORS ###############################################
pa.beta[1] <- logit(p.pa.beta0)
p.pa.beta0 ~ dunif(0,1)
pp.beta[1] ~ dunif(0, 250)
# priors for the w model inclusion terms
# this ensures that each of the 8 model combos has equal probability: Pr(m)= 1/8
w[6] ~ dbern(0.125)
p.w5 <- (1-w[6])*0.143 + w[6]
w[5] ~ dbern(p.w5)
p.w4 <- (1-w[6])*0.286 + w[6]
w[4] ~ dbern(p.w4)
w456 <- w[4]+w[5]+w[6]
p.w3 <- equals(w456,0)*0.5 + (1-equals(w456,0))
w[3] ~ dbern(p.w3)
w56 <- w[5]+w[6]
p.w2 <- equals(w56,0)*0.5 + (1-equals(w56,0))
w[2] ~ dbern(p.w2)
w[1] ~ dbern(1)
# priors for wzi for zero-inflation param
wzi[6] ~ dbern(0.125)
p.wzi5 <- (1-wzi[6])*0.143 + wzi[6]
wzi[5] ~ dbern(p.wzi5)
p.wzi4 <- (1-wzi[6])*0.286 + wzi[6]
wzi[4] ~ dbern(p.wzi4)
wzi456 <- wzi[4]+wzi[5]+wzi[6]
p.wzi3 <- equals(wzi456,0)*0.5 + (1-equals(wzi456,0))
wzi[3] ~ dbern(p.wzi3)
wzi56 <- wzi[5]+wzi[6]
p.wzi2 <- equals(wzi56,0)*0.5 + (1-equals(wzi56,0))
wzi[2] ~ dbern(p.wzi2)
wzi[1] ~ dbern(1)
# priors for wpa for availability
wpa[6] ~ dbern(0.125)
p.wpa5 <- (1-wpa[6])*0.143 + wpa[6]
wpa[5] ~ dbern(p.wpa5)
p.wpa4 <- (1-wpa[6])*0.286 + wpa[6]
wpa[4] ~ dbern(p.wpa4)
wpa456 <- wpa[4]+wpa[5]+wpa[6]
p.wpa3 <- equals(wpa456,0)*0.5 + (1-equals(wpa456,0))
wpa[3] ~ dbern(p.wpa3)
wpa56 <- wpa[5]+wpa[6]
p.wpa2 <- equals(wpa56,0)*0.5 + (1-equals(wpa56,0))
wpa[2] ~ dbern(p.wpa2)
wpa[1] ~ dbern(1)
# priors for wpp for perceptibility
wpp[1] ~ dbern(1) #intercept
for (n in 2:4){ wpp[n] ~ dbern(0.5) }
# set up the vectors/matrices for beta estimation, abundance
for(b1 in 1:n.betas){
wtemp[b1] <- w[pos[b1]] # this uses GVS
#wtemp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b[b1] <- post.b[b1]*(1-wtemp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b[b1,b2] <- equals(b1,b2)*((1/sd.b[b1]^2)*(1-wtemp[b1])) + (wtemp[b1]*b.tau)
} # b2
s.beta[b1] ~ dnorm(mean.b[b1],tau.b[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, zero-inflation
for(b1 in 1:n.betas){
wzi.temp[b1] <- wzi[pos[b1]] # this uses GVS
#wzi.temp[b1] <- 1 # this forces you to fit the full model (all terms included)
mean.b.zi[b1] <- post.b.zi[b1]*(1-wzi.temp[b1]) # prior is either 0 or full-model posterior mean
for(b2 in 1:n.betas){ # set up the precision matrix (inverse variance) # allows for betas to be multivariate, if desired
tau.b.zi[b1,b2] <- equals(b1,b2)*((1/sd.b.zi[b1]^2)*(1-wzi.temp[b1])) + (wzi.temp[b1]*b.tau.zi)
} # b2
s.beta.psi[b1] ~ dnorm(mean.b.zi[b1],tau.b.zi[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, availability
for(b1 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
wpa.temp[b1] <- wpa[pos.pa[b1]]
#wpa.temp[b1] <- 1
mean.b.pa[b1] <- post.b.pa[b1]*(1-wpa.temp[b1])
for(b2 in 2:n.betas.pa){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pa[b1,b2] <- equals(b1,b2)*((1/sd.b.pa[b1]^2)*(1-wpa.temp[b1])) + (wpa.temp[b1]*b.tau.pa)
} # b2
pa.beta[b1] ~ dnorm(mean.b.pa[b1],tau.b.pa[b1,b1]) # all beta coefficients
} # b1
# set up the vectors/matrices for beta estimation, perceptility
for(b1 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
wpp.temp[b1] <- wpp[pos.pp[b1]]
#wpp.temp[b1] <- 1
mean.b.pp[b1] <- post.b.pp[b1]*(1-wpp.temp[b1])
for(b2 in 2:n.betas.pp){ # starts at 2 because intercept always requires a diff prior and w=1
tau.b.pp[b1,b2] <- equals(b1,b2)*((1/sd.b.pp[b1]^2)*(1-wpp.temp[b1])) + (wpp.temp[b1]*b.tau.pp)
} # b2
pp.beta[b1] ~ dnorm(mean.b.pp[b1],tau.b.pp[b1,b1]) # all beta coefficients
} # b1
# vector of stand-specific predictors
stand.mu <- mm[,] %*% (s.beta[]*wtemp[])
stand.mu.psi <- mm[,] %*% (s.beta.psi[]*wzi.temp[])
stand.tau.psi <- 1/ (stand.sig.psi*stand.sig.psi)
stand.sig.psi ~ dunif(0,10)
stand.tau <- 1/ (stand.sig*stand.sig)
stand.sig ~ dunif(0,10)
yr.tau <- 1/ (yr.sig*yr.sig)
yr.sig ~ dunif(0,20)
#yr.tau.psi <- 1/ (yr.sig.psi*yr.sig.psi)
#yr.sig.psi ~ dunif(0,20)
obs.tau <- 1/ (obs.sig*obs.sig)
obs.sig ~ dunif(0,20)
b.tau <- 0.01
b.tau.zi <- 0.01
b.tau.pa <- 0.01
b.tau.pp <- 0.01
##### DISTANCE AND REMOVAL #####################################
for (l in 1:L) {
int[l] ~ dcat(pi.pa.c[site[l], yr_rot[l], ]) # removal class frequencies
dclass[l] ~ dcat(pi.pd.c[site[l], yr_rot[l], ]) # distance class frequencies
} # L
# Distance
for(b in 1:nD){
f[b] <- (2*midpt[b]*delta)/(B*B) # radial density function for point counts, change for line transects
}
for (i in 1:nsites){
for(t in 1:YR){
for(b in 1:nD){
g[i,t,b] <- exp(-midpt[b]*midpt[b]/(2*dist.sigma[i,t]*dist.sigma[i,t])) # half-normal distance function
pi.pd[i,t,b] <- g[i,t,b]*f[b]
pi.pd.c[i,t,b] <- pi.pd[i,t,b]/pdet[i,t]
} #nD
pdet[i,t] <- sum(pi.pd[i,t,1:nD]) # Distance class probabilities
# Removal
for (r in 1:R){
pi.pa[i,t,r] <- p.a[i,t]*pow(1-p.a[i,t], (r-1))
pi.pa.c[i,t,r] <- pi.pa[i,t,r] / pcap[i,t]
} #R
pcap[i,t] <- sum(pi.pa[i,t,1:R])
# Detection models
pmarg[i,t] <- pcap[i,t] * pdet[i,t]
logit(p.a[i,t]) <- wpa[1]*pa.beta[1] + wpa.temp[2]*pa.beta[2]*hr[i,t] + wpa.temp[3]*pa.beta[3]*date[i,t] +
wpa.temp[4]*pa.beta[4]*date2[i,t] +
wpa.temp[5]*pa.beta[5]*date[i,t]*hr[i,t] + wpa.temp[6]*pa.beta[6]*date[i,t]*date2[i,t]*hr[i,t]
log(dist.sigma[i,t]) <- wpp[1]*log(pp.beta[1]) + wpp.temp[2]*pp.beta[2]*densiom[i,t] + wpp.temp[3]*pp.beta[3]*noise[i,t] + wpp.temp[4]*pp.beta[4]*ba[i] + obs.eps[obs[i,t]]
##### POINT-LEVEL ABUNDANCE ###########################
nobs[i,t] ~ dbin(pmarg[i,t], N[i,t])
N[i,t] ~ dpois(lambda.eff[i,t])
lambda.eff[i,t] <- lambda[i,t] * w.lam[i,t]
w.lam[i,t] ~ dbern(psi[i,t])
logit(psi[i,t]) <- psi.beta.s[stand.id[i]] #+ yr.eps.psi[t]
log(lambda[i,t]) <- lam.beta.s[stand.id[i]] + yr.eps[t]
##### GOODNESS OF FIT #######################################
nobs.fit[i,t] ~ dbin(pmarg[i,t], N[i,t]) # create new realization of model
e.p[i,t] <- pmarg[i,t] * N[i,t] # original model prediction
E.p[i,t] <- pow((nobs[i,t]- e.p[i,t]),2)/(e.p[i,t]+0.5)
E.New.p[i,t]<- pow((nobs.fit[i,t]-e.p[i,t]),2)/(e.p[i,t]+0.5)
}} #YR #nsites
# Random effects
for (s in 1:S){ psi.beta.s[s] ~ dnorm(stand.mu.psi[s], stand.tau.psi)
lam.beta.s[s] ~ dnorm(stand.mu[s], stand.tau) } #S
for (y in 1:9){ yr.eps[y] ~ dnorm(0, yr.tau) }
#yr.eps.psi[y] ~ dnorm(0, yr.tau.psi) } # y
for (o in 1:28){ obs.eps[o] ~ dnorm(0, obs.tau)} # o
##### DERIVED QUANTITIES ####################################
for(t in 1:YR){
Ntot[t] <- sum(N[1:nsites,t])
D[t] <- Ntot[t] / ((3.14*B*B*nsites)/10000) # dens per ha
} #YR
fit.p <- sum(E.p[1:nsites,1:YR])
fit.new.p <- sum(E.New.p[1:nsites,1:YR])
bayesp<-step(fit.new.p-fit.p) # Bayesian p-value for availability model. =0.5 is good fit, near 0 or 1 is poor fit
} # End model
" ,file="./T-zip-GVS.txt")
# CAUTION: These next lines run the model and
# take a VERY long time to run
# (>1 week, each species took 4 days)
# We ran these on an HPC and specified the
# loop to run 4-5 species sequentially.
# poor model fit for these species
# BOCH, GCKI, GRAJ, MAWA, YBFL, YPWA
spp.zi <- c(10, 11, 3, 5, 2, 8)
#i <- 1
for (i in spp.zi){ #Create 5 files: 1:4, 5:8, 9:12, 13:16, 17:19
try(rm("out"))
spp <- spp.list.foc[i]
spp.num<- which(dimnames(nobs)[[3]]==spp)
datalfoc$nobs <- Nav <- apply(ab2[,1:2,,,spp], c(1,4),sum, na.rm=T)
Mst <- apply(Nav, c(1), max, na.rm=T) +1
# remove comments to add covariates
inits <- function(){ list(
N = Nav,
p.pa.beta0= runif(1, 0.3, 0.8),
pp.beta= c(runif(1, 20, 65), rep(0, datalfoc$nCovsPP-2)),
s.beta= runif(21, -0.1, 0.1),
s.beta.psi= c(5,runif(20, -0.1, 0.1)),
stand.sig.psi= runif(1, 0, 0.1),
stand.sig= runif(1, 0, 0.1),
obs.sig= runif(1, 0, 0.1),
yr.sig= runif(1, 0, 0.1)
) }
params <- c("pa.beta", "pp.beta",
"Ntot", "D",
"stand.sig", "stand.sig.psi", "s.beta", "s.beta.psi",
"bayesp", "w", "wzi", "wpa", "wpp",
"yr.eps", "yr.sig", "yr.eps.psi", "yr.sig.psi", "obs.eps", "obs.sig"
)
datalfoc$nobs <- nobs[,,spp]
datalfoc$dclass <- dclass[datalfoc$species==spp.num]
datalfoc$int <- int[datalfoc$species==spp.num]
datalfoc$site <- site[datalfoc$species==spp.num]
datalfoc$yr_rot <- yr_rot[datalfoc$species==spp.num]
datalfoc$L <- length(datalfoc$species[datalfoc$species==spp.num])
# new objects for beta estimation
datalfoc$mm <- mm
datalfoc$pos <- pos
datalfoc$pos.pa <- pos.pa
datalfoc$pos.pp <- pos.pp
datalfoc$n.betas <- n.betas
datalfoc$n.betas.pa <- n.betas.pa
datalfoc$n.betas.pp <- n.betas.pp
# input posterior means & sds from global model run
spp.num2 <- which( names(post.b)==spp )
datalfoc$post.b <- post.b[[spp.num2]] # out$mean$post.b
datalfoc$sd.b <-sd.b[[spp.num2]]
datalfoc$post.b.zi <- post.b.zi[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.zi <-sd.b.zi[[spp.num2]]
datalfoc$post.b.pa <- post.b.pa[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pa <- sd.b.pa[[spp.num2]]
datalfoc$post.b.pp <- post.b.pp[[spp.num2]] # out$mean$post.b
datalfoc$sd.b.pp <- sd.b.pp[[spp.num2]]
# MCMC settings
ni <- 200000 ; nb <- 100000 ; nt <- 10 ; nc <- 6 ; na=10000
#ni <- 100 ; nb <- 50 ; nt <- 1 ; nc <- 1 ; na <- 100
# Run JAGS
out <- jags(datalfoc, inits=inits,
params, "./T-zip-GVS.txt",
n.thin=nt, n.chains=nc,
n.burnin=nb, n.iter=ni, n.adapt=na
, parallel = T, modules=c("glm"),
codaOnly= "N")
fn<- paste( "./", spp, "_T-zip-GVS.RData", sep="" )
save(list= c("out", "datalfoc"), file=fn)
}