################################################################################################ ## Code by Vanja Dukic, U of Colorado at Boulder ## Bayesian Statistics and Computing 2019 ## ## Week 10 - MH algorithm ## ################################################################################################ ################################################################################ # Example # Target density: Standard Normal N(0,1) # MH with independence proposal with mean mu and stdev sigma # mu and sigma are "tuning parameters" mh.ind = function(n,x0,mu,sigma) { x=rep(0,n) x[1]=x0 for (i in 2:n) { # sample a new candidate value y from the proposal q(y | x[i-1]) # but note that q(y | x[i-1]) = q(y) for independence MH proposals y=rnorm(1,mu,sigma) # calculate the acceptance probability # alpha = pi(y) q (x | y) / pi(x) q(y | x) # again, note that q(x|y) = q(x) and q(y|x) = q(y) for independnece proposals # so alpha = pi(y) q(x) /pi(x) q(y) = [ p(y)/p(x) ] * [ q(x)/q(y) ] # where pi in this example is N(0,1), so pi(x) = exp(-x^2/2) # and pi(y)/pi(x) = exp(-y^2/2 + x^2/2) # and q in this example is N(mu,sigma), so q(y) = exp(-(y-mu)^2/(2*sigma^2) ) # and q(x)/q(y) = exp( ( -(x-mu)^2 + (y-mu)^2) /(2*sigma^2) ) log.alpha1 = -(y^2-x[i-1]^2)/2 log.alpha2 = - ( (x[i-1]-mu)^2 - (y-mu)^2 ) / (2*sigma^2) log.alpha = log.alpha1+log.alpha2 # draw a uniform number from 0 to 1, convert to log u = runif(1) if( log(u) <= log.alpha ) x[i]=y else x[i]=x[i-1] } # return the chain trajectory x } ############################################################### # Example 2 # Target density: Standard Normal # random walk proposal with mean x[i-1] and stdev sigma mh.rw = function(n,x0,sigma) { x=rep(0,n) x[1]=x0 for (i in 2:n) { # sample a new candidate value y from q(y | x[i-1]) # using the random walk proposal # meaning that q is a normal density with mean x[i-1] and stdev sigma: y=rnorm(1,x[i-1],sigma) # calculate the acceptance probability # alpha = pi(y) q (x | y) / pi(x) q(y | x) # but note that q(x|y) = q(y|x) for symmetric random walk proposals # so alpha = pi(y)/pi(x) # where pi in this example is N(0,1) log.alpha = -(y^2-x[i-1]^2)/2 # draw a uniform number from 0 to 1, convert to log u = runif(1) if( log(u) <= log.alpha) x[i]=y else x[i]=x[i-1] } x } ############# RUNNING MH SAMPLER x.mh.ind = mh.ind(10000,0,0,100) x.mh.rw = mh.rw(10000,0,1.1) # Diagnostics #install.packages("coda") require(coda) x.mh.ind.mcmc = as.mcmc.list(as.mcmc(x.mh.ind)) x.mh.rw.mcmc = as.mcmc.list(as.mcmc(x.mh.rw)) # codamenu() # you can follow the instructions from codamenu # ... or # Do the diagnostics yourself: plot(x.mh.ind.mcmc) x.ind.acf = acf(x.mh.ind,lag.max=200) x.ind.acfs = x.ind.acf$acf[,1,1] # find the first lag for which the ACF is below 0.1: which(x.ind.acfs<0.1) x.ind.thin.par = min(which(x.ind.acf$acf<0.1))+1 # obtain a subsample from the original chain: x.ind.thinned = x.mh.ind[seq(from=1, to=length(x.mh.ind),by=x.ind.thin.par)] # alternatively, # x.mh.ind.mcmc = mcmc(x.mh.ind, thin = x.ind.thin.par) # double check to see if thinning worked as planned acf(x.ind.thinned) # work with the thinned chain to obtain estimates and visualize: summary(x.ind.thinned) plot(as.mcmc(x.ind.thinned)) # ps. you'll be able to see more with trace plots that are not as crowded # so some "patterns" may emerge; they may be harmless however # RW chain analysis: plot(x.mh.rw.mcmc) x.rw.acf = acf(x.mh.rw) which(x.rw.acf$acf<0.1) # [1] 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 x.rw.thin.par = min(which(x.rw.acf$acf<0.1))+1 x.rw.thinned = x.mh.rw[seq(from=1, to=length(x.mh.rw),by=x.rw.thin.par)] # alternatively, # x.mh.rw.mcmc = mcmc(x.mh.rw, thin = x.rw.thin.par) acf(x.rw.thinned) summary(x.rw.thinned) plot(as.mcmc(x.rw.thinned)) #################################################### # Adaptive proposals ############################################################### # Known target density: Standard Normal # random walk proposal with mean x[i-1] and stdev sigma # Usually we have no idea what the right "scale" for the proposal should be # in other words, how disperse the proposed values should be. # The rule of thumb is to cast a wide net, and # then, re-evaluate after a batch of iterations # Optimal acceptance rate shold be 50% if posterior is low-dimensional # or 25% if the posterior is high dimensional sdmh = 100 x.mh.ind = mh.ind(1000,0,0,sdmh) x.mh.ind.mcmc = as.mcmc.list(as.mcmc(x.mh.ind)) plot(x.mh.ind.mcmc) accept.rate = length(unique(x.mh.ind))/length(x.mh.ind) accept.rate # while accept.rate < .2, keep reducing the proposal variance, for example: x.mh.ind = mh.ind(1000,x.mh.ind[length(x.mh.ind)],0,sdmh/30) #################################################### # Mixture of proposals ############################################################### # Known target density: Standard Normal # combine independence and random walk proposals with mean x[i-1] mh.mix = function(n,x0,mu,sigma1,sigma2) { x=rep(0,n) x[1]=x0 for (i in 2:n) { # sample a new candidate value y from q(y | x[i-1]) and q(y) # k = rbinom(1,1,0.5) y=rnorm(1,x[i-1],sigma2)*k + rnorm(1,mu,sigma1)*(1-k) # calculate the acceptance probability # alpha = pi(y) q (x | y) / pi(x) q(y | x) # but note that q(x|y) = q(y|x) for random walk proposals # so alpha = pi(y)/pi(x) # where pi in this example is N(0,1) log.alpha1 = -(y^2-x[i-1]^2)/2 log.alpha2 = - ( (x[i-1]-mu)^2 - (y-mu)^2 ) / (2*sigma1^2) log.alpha = (log.alpha1+log.alpha2)*(1-k) + log.alpha1*(k) # draw a uniform number from 0 to 1, convert to log u = runif(1) if( log(u) <= log.alpha) x[i]=y else x[i]=x[i-1] } x } x.mh.mix = mh.mix(10000,0,0,10,2) x.mh.mix.mcmc = as.mcmc.list(as.mcmc(x.mh.mix)) plot(x.mh.mix.mcmc) accept.rate = length(unique(x.mh.mix))/length(x.mh.mix) accept.rate acf(x.mh.mix)