# initialization mu.H0 <- 0; sigma.H0 <- 1 mu.H1 <- 0; sigma.H1 <- 1e-3 p.H1 <- 1/2 mu <- c(mu.H0, mu.H1) sigma <- c(sigma.H0, sigma.H1) # simulation function simulate <- function() { M <- rbinom(1, 1, p.H1); x <- rnorm(1, mu[M+1], sigma[M+1]) x <- rnorm(1, mu[M+1], sigma[M+1]) p.val <- 2 * pnorm(mu[1] - abs(x-mu[1]), mu[1], sigma[1]) BF <- dnorm(x, mu[2], sigma[2]) / dnorm(x, mu[1], sigma[1]) lBF <- dnorm(x, mu[2], sigma[2], log=TRUE) - dnorm(x, mu[1], sigma[1], log=TRUE) cat(sprintf("x = %.5f => p.val = %.2e, BF = %.2e [ log(BF) = %.2e ]\n", x, p.val, BF, lBF)) return(M) }By default
> set.seed(150914); n=12; M <- rep(NA, n); for(i in 1:n) M[i] <- simulate() x = -0.00079 => p.val = 9.99e-01, BF = 7.29e+02 [ log(BF) = 6.59e+00 ] x = -0.62293 => p.val = 5.33e-01, BF = 0.00e+00 [ log(BF) = -1.94e+05 ] x = -0.00029 => p.val = 1.00e+00, BF = 9.57e+02 [ log(BF) = 6.86e+00 ] x = -0.00162 => p.val = 9.99e-01, BF = 2.68e+02 [ log(BF) = 5.59e+00 ] x = -0.39258 => p.val = 6.95e-01, BF = 0.00e+00 [ log(BF) = -7.71e+04 ] x = -0.82578 => p.val = 4.09e-01, BF = 0.00e+00 [ log(BF) = -3.41e+05 ] x = 0.00073 => p.val = 9.99e-01, BF = 7.69e+02 [ log(BF) = 6.64e+00 ] x = -0.00012 => p.val = 1.00e+00, BF = 9.93e+02 [ log(BF) = 6.90e+00 ] x = 0.22295 => p.val = 8.24e-01, BF = 0.00e+00 [ log(BF) = -2.48e+04 ] x = -0.00022 => p.val = 1.00e+00, BF = 9.76e+02 [ log(BF) = 6.88e+00 ] x = 0.00117 => p.val = 9.99e-01, BF = 5.07e+02 [ log(BF) = 6.23e+00 ] x = -1.03815 => p.val = 2.99e-01, BF = 0.00e+00 [ log(BF) = -5.39e+05 ]And the winners are:
> M [1] 1 0 1 1 0 0 1 1 0 1 1 0 0 1 1 0 0 0 0 1 0 1 1It should not be any longer a surprise that the best figure to discriminate between the two models is the Bayes factor and not the p-value.31 You can now play with the simulations, varying the parameters. If you want to get a situation yielding Bayes factors of
Giulio D'Agostini 2016-09-06