Skip to content

Commit

Permalink
Added computation of MDRR for logistic models.
Browse files Browse the repository at this point in the history
  • Loading branch information
schuemie committed Sep 27, 2024
1 parent 5611ed6 commit 6b0ec84
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 14 deletions.
4 changes: 2 additions & 2 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Package: CohortMethod
Type: Package
Title: New-User Cohort Method with Large Scale Propensity and Outcome Models
Version: 5.3.0
Date: 2024-05-31
Version: 5.4.0
Date: 2024-09-27
Authors@R: c(
person("Martijn", "Schuemie", , "[email protected]", role = c("aut", "cre")),
person("Marc", "Suchard", role = c("aut")),
Expand Down
4 changes: 3 additions & 1 deletion NEWS.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
CohortMethod 5.3.1
CohortMethod 5.4.0
==================

Changes:
Expand All @@ -9,6 +9,8 @@ Changes:

3. The `cohorts` argument of `insertExportedResultsInSqlite()` has column `cohortId` renamed to `cohortDefinitionId`.

4. Added computation of MDRR for logistic models.



CohortMethod 5.3.0
Expand Down
26 changes: 18 additions & 8 deletions R/Power.R
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
#' @details
#' Compute the minimum detectable relative risk (MDRR) and expected standard error (SE) for a given
#' study population, using the actual observed sample size and number of outcomes. Currently, only
#' computations for Cox models are implemented. For Cox model, the computations by Schoenfeld (1983) is used.
#' computations for Cox and logistic models are implemented. For Cox model, the computations by
#' Schoenfeld (1983) is used. For logistic models Wald's z-test is used.
#'
#' @param population A data frame describing the study population as created using the
#' \code{\link{createStudyPopulation}} function. This should at least have these
Expand All @@ -46,14 +47,16 @@ computeMdrr <- function(population, alpha = 0.05, power = 0.8, twoSided = TRUE,
checkmate::assertNumber(alpha, lower = 0, upper = 1, add = errorMessages)
checkmate::assertNumber(power, lower = 0, upper = 1, add = errorMessages)
checkmate::assertLogical(twoSided, len = 1, add = errorMessages)
checkmate::assertChoice(modelType, c("cox"), add = errorMessages)
checkmate::assertChoice(modelType, c("cox", "logistic"), add = errorMessages)
checkmate::reportAssertions(collection = errorMessages)

pTarget <- mean(population$treatment)
totalEvents <- sum(population$outcomeCount != 0)
totalSubjects <- nrow(population)
mdrr <- computeMdrrFromAggregateStats(
pTarget = pTarget,
totalEvents = totalEvents,
totalSubjects = totalSubjects,
alpha = alpha,
power = power,
twoSided = twoSided,
Expand All @@ -74,7 +77,7 @@ computeMdrr <- function(population, alpha = 0.05, power = 0.8, twoSided = TRUE,
return(result)
}

computeMdrrFromAggregateStats <- function(pTarget, totalEvents, alpha = 0.05, power = 0.8, twoSided = TRUE, modelType = "cox") {
computeMdrrFromAggregateStats <- function(pTarget, totalEvents, totalSubjects, alpha = 0.05, power = 0.8, twoSided = TRUE, modelType = "cox") {
if (totalEvents == 0) {
return(Inf)
}
Expand All @@ -83,15 +86,22 @@ computeMdrrFromAggregateStats <- function(pTarget, totalEvents, alpha = 0.05, po
} else {
z1MinAlpha <- qnorm(1 - alpha)
}
zBeta <- -qnorm(1 - power)

pComparator <- 1 - pTarget

if (modelType == "cox") {
zBeta <- -qnorm(1 - power)
pComparator <- 1 - pTarget
mdrr <- exp(sqrt((zBeta + z1MinAlpha)^2 / (totalEvents * pTarget * pComparator)))
} else if (modelType == "logistic") {
pBaseline <- totalEvents / totalSubjects
se <- sqrt((1 / (totalEvents * (1 - pBaseline))))
fun <- function(z) {
pnorm(z - z1MinAlpha) - power
}
z <- uniroot(fun, c(0, 1e+07))$root
mdrr <- exp(z * se)
} else {
return(NA)
stop("Unknown model type: ", modelType)
}
return(mdrr)
}

#' Get the distribution of follow-up time
Expand Down
5 changes: 3 additions & 2 deletions R/RunAnalyses.R
Original file line number Diff line number Diff line change
Expand Up @@ -1723,12 +1723,13 @@ summarizeResults <- function(referenceTable, outputFolder, mainFileName, interac
twoSided = FALSE,
upper = TRUE)
}
pTarget <- outcomeModel$populationCounts$targetExposures /
(outcomeModel$populationCounts$targetExposures + outcomeModel$populationCounts$comparatorExposures)
totalSubjects <- outcomeModel$populationCounts$targetExposures + outcomeModel$populationCounts$comparatorExposures
totalEvents <- outcomeModel$outcomeCounts$targetOutcomes + outcomeModel$outcomeCounts$comparatorOutcomes
pTarget <- outcomeModel$populationCounts$targetExposures / totalSubjects
mdrr <- computeMdrrFromAggregateStats(
pTarget = pTarget,
totalEvents = totalEvents,
totalSubjects = totalSubjects,
modelType = outcomeModel$outcomeModelType
)
attrition <- getAttritionTable(outcomeModel)
Expand Down
3 changes: 2 additions & 1 deletion man/computeMdrr.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

34 changes: 34 additions & 0 deletions tests/testthat/test-power.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
library(testthat)
library(CohortMethod)

test_that("Logistic regression power calculations", {
alpha <- 0.05 # Significance level
power <- 0.80 # Desired power
p_null <- 0.1 # Baseline probability of event
n <- 400 # Sample size

mdrr <- CohortMethod:::computeMdrrFromAggregateStats(pTarget = NA,
totalEvents = n * p_null,
totalSubjects = n,
alpha = alpha,
power = power,
twoSided = TRUE,
modelType = "logistic")



# Estimate the coefficient (log(odds ratio))
beta <- log(mdrr)

# Estimate the standard error (for a simple logistic model, it's based on p_null and sample size)
se_beta <- sqrt((1 / (n * p_null * (1 - p_null))))

# Wald Z statistic
z_value <- beta / se_beta

# Determine power using normal distribution
z_critical <- qnorm(1 - alpha / 2)
power_achieved <- pnorm(z_value - z_critical)

expect_equal(power_achieved, power, tolerance = 1e-3)
})

0 comments on commit 6b0ec84

Please sign in to comment.