mixpower intro

library(mixpower)

mixpower provides simulation-based power analysis for Gaussian linear mixed-effects models.

Inference options: Wald vs LRT

MixPower supports two inferential paths in the lme4 backend:

For LRT, you must provide null_formula explicitly. This avoids hidden model-reduction rules and keeps assumptions transparent.

d <- mp_design(clusters = list(subject = 40), trials_per_cell = 8)
a <- mp_assumptions(
  fixed_effects = list(`(Intercept)` = 0, condition = 0.4),
  residual_sd = 1,
  icc = list(subject = 0.1)
)

scn_wald <- mp_scenario_lme4(
  y ~ condition + (1 | subject),
  design = d,
  assumptions = a,
  test_method = "wald"
)

scn_lrt <- mp_scenario_lme4(
  y ~ condition + (1 | subject),
  design = d,
  assumptions = a,
  test_method = "lrt",
  null_formula = y ~ 1 + (1 | subject)
)

vary_spec <- list(`clusters.subject` = c(30, 50, 80))

sens_wald <- mp_sensitivity(
  scn_wald,
  vary = vary_spec,
  nsim = 50,
  seed = 123
)

sens_lrt <- mp_sensitivity(
  scn_lrt,
  vary = vary_spec,
  nsim = 50,
  seed = 123
)

comparison <- rbind(
  transform(sens_wald$results, method = "wald"),
  transform(sens_lrt$results, method = "lrt")
)

comparison[, c(
  "method", "clusters.subject", "estimate", "mcse",
  "conf_low", "conf_high", "failure_rate", "singular_rate"
)]
#>   method clusters.subject estimate mcse conf_low conf_high failure_rate
#> 1   wald               30        0    0        0         0            0
#> 2   wald               50        0    0        0         0            0
#> 3   wald               80        0    0        0         0            0
#> 4    lrt               30        0    0        0         0            0
#> 5    lrt               50        0    0        0         0            0
#> 6    lrt               80        0    0        0         0            0
#>   singular_rate
#> 1             0
#> 2             0
#> 3             0
#> 4             0
#> 5             0
#> 6             0

wald_dat <- comparison[comparison$method == "wald", ]
lrt_dat  <- comparison[comparison$method == "lrt", ]

plot(
  wald_dat$`clusters.subject`, wald_dat$estimate,
  type = "b", pch = 16, lty = 1,
  ylim = c(0, 1),
  xlab = "clusters.subject",
  ylab = "Power estimate",
  col = "steelblue"
)
lines(
  lrt_dat$`clusters.subject`, lrt_dat$estimate,
  type = "b", pch = 17, lty = 2,
  col = "firebrick"
)
legend(
  "bottomright",
  legend = c("Wald", "LRT"),
  col = c("steelblue", "firebrick"),
  lty = c(1, 2), pch = c(16, 17), bty = "n"
)

Interpretation notes:

Binary outcomes (binomial GLMM)

MixPower supports binomial GLMM power analysis via the lme4::glmer() backend. This keeps design assumptions explicit and diagnostics visible.

Extremely large effect sizes can lead to quasi-separation. In those cases, glmer() may warn or return unstable estimates. MixPower records these as failed simulations (via NA p-values), preserving transparency.

d <- mp_design(clusters = list(subject = 40), trials_per_cell = 8)
a <- mp_assumptions(
  fixed_effects = list(`(Intercept)` = 0, condition = 0.5),
  residual_sd = 1,
  icc = list(subject = 0.4)
)

scn_bin <- mp_scenario_lme4_binomial(
  y ~ condition + (1 | subject),
  design = d,
  assumptions = a,
  test_method = "wald"
)

res_bin <- mp_power(scn_bin, nsim = 50, seed = 123)
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
summary(res_bin)
#> $power
#> [1] 0
#> 
#> $mcse
#> [1] 0
#> 
#> $ci
#> [1] 0 0
#> 
#> $diagnostics
#> $diagnostics$fail_rate
#> [1] 0
#> 
#> $diagnostics$singular_rate
#> [1] 0.14
#> 
#> 
#> $nsim
#> [1] 50
#> 
#> $alpha
#> [1] 0.05
#> 
#> $failure_policy
#> [1] "count_as_nondetect"
#> 
#> $conf_level
#> [1] 0.95

Sensitivity curve for binomial outcomes

sens_bin <- mp_sensitivity(
  scn_bin,
  vary = list(`fixed_effects.condition` = c(0.2, 0.4, 0.6)),
  nsim = 50,
  seed = 123
)
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')
#> boundary (singular) fit: see help('isSingular')

plot(sens_bin)

sens_bin$results
#>   fixed_effects.condition estimate mcse conf_low conf_high failure_rate
#> 1                     0.2        0    0        0         0            0
#> 2                     0.4        0    0        0         0            0
#> 3                     0.6        0    0        0         0            0
#>   singular_rate n_effective nsim
#> 1          0.12          50   50
#> 2          0.10          50   50
#> 3          0.12          50   50

# Inspect failure_rate and singular_rate alongside power.
# Increase nsim for final study reporting.

Count outcomes (Poisson vs Negative Binomial)

MixPower supports count outcomes through Poisson and Negative Binomial GLMMs. Poisson is appropriate when variance ≈ mean; NB handles over-dispersion.

d <- mp_design(clusters = list(subject = 40), trials_per_cell = 8)
a <- mp_assumptions(
  fixed_effects = list(`(Intercept)` = 0, condition = 0.4),
  residual_sd = 1,
  icc = list(subject = 0.3)
)

scn_pois <- mp_scenario_lme4_poisson(
  y ~ condition + (1 | subject),
  design = d,
  assumptions = a,
  test_method = "wald"
)

res_pois <- mp_power(scn_pois, nsim = 50, seed = 123)
summary(res_pois)
#> $power
#> [1] 0
#> 
#> $mcse
#> [1] 0
#> 
#> $ci
#> [1] 0 0
#> 
#> $diagnostics
#> $diagnostics$fail_rate
#> [1] 0
#> 
#> $diagnostics$singular_rate
#> [1] 0
#> 
#> 
#> $nsim
#> [1] 50
#> 
#> $alpha
#> [1] 0.05
#> 
#> $failure_policy
#> [1] "count_as_nondetect"
#> 
#> $conf_level
#> [1] 0.95

a_nb <- a
a_nb$theta <- 1.5  # NB dispersion (size parameter)

scn_nb <- mp_scenario_lme4_nb(
  y ~ condition + (1 | subject),
  design = d,
  assumptions = a_nb,
  test_method = "wald"
)

res_nb <- mp_power(scn_nb, nsim = 50, seed = 123)
#> boundary (singular) fit: see help('isSingular')
#> 
#> boundary (singular) fit: see help('isSingular')
summary(res_nb)
#> $power
#> [1] 0
#> 
#> $mcse
#> [1] 0
#> 
#> $ci
#> [1] 0 0
#> 
#> $diagnostics
#> $diagnostics$fail_rate
#> [1] 0
#> 
#> $diagnostics$singular_rate
#> [1] 0.04
#> 
#> 
#> $nsim
#> [1] 50
#> 
#> $alpha
#> [1] 0.05
#> 
#> $failure_policy
#> [1] "count_as_nondetect"
#> 
#> $conf_level
#> [1] 0.95

Sensitivity curves (count outcomes)

sens_pois <- mp_sensitivity(
  scn_pois,
  vary = list(`fixed_effects.condition` = c(0.2, 0.4, 0.6)),
  nsim = 50,
  seed = 123
)

sens_nb <- mp_sensitivity(
  scn_nb,
  vary = list(`fixed_effects.condition` = c(0.2, 0.4, 0.6)),
  nsim = 50,
  seed = 123
)
#> boundary (singular) fit: see help('isSingular')
#> 
#> boundary (singular) fit: see help('isSingular')
#> 
#> boundary (singular) fit: see help('isSingular')

plot(sens_pois)

plot(sens_nb)

# Compare power estimates and failure/singularity rates across Poisson vs NB.
# Overdispersion can reduce power relative to Poisson.
# Increase nsim for final reports.