From dda645d9e991817df8ef4b8be61985a695d05c37 Mon Sep 17 00:00:00 2001 From: overdodactyl Date: Sat, 6 Jan 2024 14:42:34 -0700 Subject: [PATCH] docs: use @inherit instead of @inheritParams to get return value --- R/dx_metrics.R | 46 +++++++++++++++++----------------- man/dx_accuracy.Rd | 6 +++++ man/dx_balanced_accuracy.Rd | 6 +++++ man/dx_brier.Rd | 6 +++++ man/dx_detection_prevalence.Rd | 6 +++++ man/dx_fdr.Rd | 6 +++++ man/dx_fowlkes_mallows.Rd | 6 +++++ man/dx_g_mean.Rd | 6 +++++ man/dx_lrt_neg.Rd | 6 +++++ man/dx_lrt_pos.Rd | 6 +++++ man/dx_markedness.Rd | 6 +++++ man/dx_nir.Rd | 6 +++++ man/dx_npv.Rd | 6 +++++ man/dx_odds_ratio.Rd | 6 +++++ man/dx_prevalence.Rd | 6 +++++ man/fnr.Rd | 6 +++++ man/fpr.Rd | 6 +++++ man/informedness.Rd | 6 +++++ man/ppv.Rd | 6 +++++ man/sensitivity.Rd | 6 +++++ man/specificity.Rd | 6 +++++ 21 files changed, 143 insertions(+), 23 deletions(-) diff --git a/R/dx_metrics.R b/R/dx_metrics.R index 7d769bb..3097887 100644 --- a/R/dx_metrics.R +++ b/R/dx_metrics.R @@ -29,7 +29,7 @@ metricparams <- function() { #' over all cases from a confusion matrix object, providing a measure #' of the classifier's overall correctness. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' \eqn{Accuracy = \frac{True Positives + True Negatives}{Total Cases}}{Accuracy = (TP + TN) / N} @@ -66,7 +66,7 @@ dx_accuracy <- function(cm, detail = "full", ...) { #' PPV can be influenced by the prevalence of the condition and should be used #' alongside other metrics. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' PPV, also known as precision, is the ratio of true positives to the sum of true @@ -107,7 +107,7 @@ dx_precision <- dx_ppv #' Note that NPV, like other metrics, may not fully represent classifier performance #' in unbalanced datasets and should be used alongside other metrics. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' NPV is the ratio of true negatives to the sum of true and false negatives. It is an @@ -143,7 +143,7 @@ dx_npv <- function(cm, detail = "full", ...) { #' as the miss rate and is a critical measure in evaluating the performance of a classifier, #' especially in scenarios where failing to detect positives is costly. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' FNR is an important measure in situations where the cost of missing a positive classification @@ -181,7 +181,7 @@ dx_miss_rate <- dx_fnr #' that were incorrectly identified as positives by the classifier. FPR is also known #' as the fall-out rate and is crucial in evaluating the specificity of a classifier. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' FPR is particularly important in contexts where false alarms are costly. It is @@ -219,7 +219,7 @@ dx_fall_out <- dx_fpr #' among all positive predictions. FDR is a critical measure in many classification contexts, #' particularly where the cost of a false positive is high. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' FDR is an important measure when the consequences of false discoveries (false positives) @@ -254,7 +254,7 @@ dx_fdr <- function(cm, detail = "full", ...) { #' classifier. Sensitivity is a key measure in evaluating the effectiveness of a classifier #' in identifying positive instances. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Sensitivity or TPR is an important measure in scenarios where missing a positive @@ -298,7 +298,7 @@ dx_tpr <- dx_sensitivity #' Specificity is a key measure in evaluating the effectiveness of a classifier in #' identifying negative instances. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Specificity or TNR measures how well the classifier can identify negative instances, @@ -339,7 +339,7 @@ dx_tnr <- dx_specificity #' This metric is particularly useful for imbalanced datasets as it accounts for both the #' positive and negative classes equally and doesn't inherently favor the majority class. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Balanced Accuracy mitigates the issue of the regular accuracy metric favoring models @@ -383,7 +383,7 @@ calculate_balanced_accuracy <- function(cm) { #' the condition of interest over the total number of cases. Prevalence provides #' a measure of how widespread a condition is within the population at a given time. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Prevalence is a measure of the burden of a condition or disease in a population. @@ -419,7 +419,7 @@ dx_prevalence <- function(cm, detail = "full", ...) { #' positive by the classifier over the total number of cases. Detection Prevalence provides #' a measure of how often the condition is identified by the model, regardless of its actual prevalence. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Detection Prevalence is a measure of the frequency with which a classifier predicts @@ -473,7 +473,7 @@ get_kappa_interpretation <- function(kappa) { #' to be a more robust measure than simple percent agreement calculation since #' Kappa takes into account the agreement occurring by chance. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @return If `detail` is "simple", returns a single numeric value of Cohen's Kappa. #' If `detail` is "full", returns a list or data frame that includes Cohen's Kappa, @@ -560,7 +560,7 @@ dx_cohens_kappa <- function(cm, detail = "full") { #' disagreement between prediction and observation. The function can also return a #' confidence interval for the MCC value using bootstrapping if detail is set to "full". #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @return If `detail` is "simple", returns a single numeric value of MCC. #' If `detail` is "full", returns a data frame that includes MCC, its @@ -682,7 +682,7 @@ ci_binomial <- function(x, n, citype = "exact", ...) { #' will occur given a particular exposure, compared to the odds of the outcome occurring #' in the absence of that exposure. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' The odds ratio is calculated as (TP * TN) / (FP * FN). It is used in case-control @@ -713,7 +713,7 @@ dx_odds_ratio <- function(cm, detail = "full", ...) { #' LR- compares the probability of a negative test result among patients with the #' disease to the probability of a negative test result among patients without the disease. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' The negative likelihood ratio is calculated as (FN / (TP + FN)) / (TN / (FP + TN)). @@ -743,7 +743,7 @@ dx_lrt_neg <- function(cm, detail = "full", ...) { #' LR+ compares the probability of a positive test result among patients with the #' disease to the probability of a positive test result among patients without the disease. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' The positive likelihood ratio is calculated as (TP / (TP + FN)) / (FP / (FP + TN)). @@ -1008,7 +1008,7 @@ calculate_fbeta <- function(cm, beta = 1) { #' generalization of the F1 score, allowing different importance to precision #' and recall via the beta parameter. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' @param beta The beta value determining the weight of precision in the F-score. #' @return Depending on the `detail` parameter, returns a single numeric value of #' F-beta or a data frame with the F-beta and its confidence intervals. @@ -1086,7 +1086,7 @@ dx_f2 <- function(cm, detail = "full", boot = FALSE, bootreps = 1000) { #' of Sensitivity (True Positive Rate) and Specificity (True Negative Rate). It reflects the #' probability that a classifier is informed about the true class, ranging from -1 to 1. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Informedness is defined as \code{Informedness = Sensitivity + Specificity - 1}. It is the sum of the true positive rate @@ -1131,7 +1131,7 @@ dx_youden_j <- dx_informedness #' of PPV (Positive Predictive Value) and NPV (Negative Predictive Value). It reflects the #' effectiveness of a classifier in marking class labels correctly, ranging from -1 to 1. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Markedness is defined as \code{Markedness = PPV + NPV - 1}. It is the sum of the proportions @@ -1174,7 +1174,7 @@ calculate_markedness <- function(cm) { #' (True Positive Rate) and specificity (True Negative Rate), especially useful in #' imbalanced datasets. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' G-mean is the geometric mean of sensitivity and specificity. It tries to maximize @@ -1215,7 +1215,7 @@ calculate_g_mean <- function(cm) { #' FM Index is the geometric mean of precision and recall, providing a balance #' measure between these two metrics. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' Fowlkes-Mallows Index is defined as the geometric mean of the precision (Positive Predictive Value) @@ -1345,7 +1345,7 @@ get_roc <- function(true_varname, pred_varname, data, direction) { #' exclusive discrete outcomes. For binary classification, the Brier score is a measure of how far #' the predicted probabilities are from the actual outcomes. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' #' @details #' The formula for the Brier score in a binary classification is: @@ -1404,7 +1404,7 @@ calculate_brier <- function(truth, predprob) { #' It represents the accuracy that a naive model would achieve by always predicting #' the most frequent class. It's a baseline measure for classification performance. #' -#' @inheritParams metrics-params +#' @inherit metrics-params #' @examples #' cm <- dx_cm(dx_heart_failure$predicted, dx_heart_failure$truth, threshold = 0.5, poslabel = 1) #' nir <- dx_nir(cm) diff --git a/man/dx_accuracy.Rd b/man/dx_accuracy.Rd index 9afdd78..72b0ec6 100644 --- a/man/dx_accuracy.Rd +++ b/man/dx_accuracy.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the proportion of correct predictions (True Positives + True Negatives) over all cases from a confusion matrix object, providing a measure diff --git a/man/dx_balanced_accuracy.Rd b/man/dx_balanced_accuracy.Rd index 2635ef8..ba1c0c4 100644 --- a/man/dx_balanced_accuracy.Rd +++ b/man/dx_balanced_accuracy.Rd @@ -18,6 +18,12 @@ via bootstrapping. Note, this can be slow.} \item{bootreps}{The number of bootstrap replications for calculating confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Balanced Accuracy, which is the average of sensitivity (recall) and specificity. This metric is particularly useful for imbalanced datasets as it accounts for both the diff --git a/man/dx_brier.Rd b/man/dx_brier.Rd index cf33ee3..3299f81 100644 --- a/man/dx_brier.Rd +++ b/man/dx_brier.Rd @@ -15,6 +15,12 @@ dx_brier(predprob, truth, detail = "full") "simple" for raw estimate, "full" for detailed estimate including 95\% confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ The Brier score is a proper score function that measures the accuracy of probabilistic predictions. It is applicable to tasks in which predictions must assign probabilities to a set of mutually diff --git a/man/dx_detection_prevalence.Rd b/man/dx_detection_prevalence.Rd index f460f51..1c37abd 100644 --- a/man/dx_detection_prevalence.Rd +++ b/man/dx_detection_prevalence.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Detection Prevalence, which is the proportion of cases that are predicted positive by the classifier over the total number of cases. Detection Prevalence provides diff --git a/man/dx_fdr.Rd b/man/dx_fdr.Rd index dc5dfaa..cb80728 100644 --- a/man/dx_fdr.Rd +++ b/man/dx_fdr.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the False Discovery Rate (FDR), which is the proportion of false positives among all positive predictions. FDR is a critical measure in many classification contexts, diff --git a/man/dx_fowlkes_mallows.Rd b/man/dx_fowlkes_mallows.Rd index 0c253aa..0edf901 100644 --- a/man/dx_fowlkes_mallows.Rd +++ b/man/dx_fowlkes_mallows.Rd @@ -18,6 +18,12 @@ via bootstrapping. Note, this can be slow.} \item{bootreps}{The number of bootstrap replications for calculating confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the Fowlkes-Mallows Index (FM Index) for the provided confusion matrix. FM Index is the geometric mean of precision and recall, providing a balance diff --git a/man/dx_g_mean.Rd b/man/dx_g_mean.Rd index 36f9e13..89b145f 100644 --- a/man/dx_g_mean.Rd +++ b/man/dx_g_mean.Rd @@ -18,6 +18,12 @@ via bootstrapping. Note, this can be slow.} \item{bootreps}{The number of bootstrap replications for calculating confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the Geometric Mean (G-mean) for the provided confusion matrix. G-mean is a measure of a model's performance that considers both the sensitivity diff --git a/man/dx_lrt_neg.Rd b/man/dx_lrt_neg.Rd index 5997a97..862e1f8 100644 --- a/man/dx_lrt_neg.Rd +++ b/man/dx_lrt_neg.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the Negative Likelihood Ratio (LR-) from a confusion matrix object. LR- compares the probability of a negative test result among patients with the diff --git a/man/dx_lrt_pos.Rd b/man/dx_lrt_pos.Rd index aefd2fd..30b0532 100644 --- a/man/dx_lrt_pos.Rd +++ b/man/dx_lrt_pos.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the Positive Likelihood Ratio (LR+) from a confusion matrix object. LR+ compares the probability of a positive test result among patients with the diff --git a/man/dx_markedness.Rd b/man/dx_markedness.Rd index f608dcd..fcc08b2 100644 --- a/man/dx_markedness.Rd +++ b/man/dx_markedness.Rd @@ -18,6 +18,12 @@ via bootstrapping. Note, this can be slow.} \item{bootreps}{The number of bootstrap replications for calculating confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Markedness for the provided confusion matrix. Markedness is a combined measure of PPV (Positive Predictive Value) and NPV (Negative Predictive Value). It reflects the diff --git a/man/dx_nir.Rd b/man/dx_nir.Rd index c49ecc0..523301d 100644 --- a/man/dx_nir.Rd +++ b/man/dx_nir.Rd @@ -13,6 +13,12 @@ dx_nir(cm, detail = "full") "simple" for raw estimate, "full" for detailed estimate including 95\% confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ The No Information Rate is the proportion of the largest class in the actual outcomes. It represents the accuracy that a naive model would achieve by always predicting diff --git a/man/dx_npv.Rd b/man/dx_npv.Rd index 6944698..bb202c4 100644 --- a/man/dx_npv.Rd +++ b/man/dx_npv.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the proportion of true negatives out of the total predicted negatives (true negatives + false negatives), known as the Negative Predictive Value (NPV). diff --git a/man/dx_odds_ratio.Rd b/man/dx_odds_ratio.Rd index 7db3333..95349fa 100644 --- a/man/dx_odds_ratio.Rd +++ b/man/dx_odds_ratio.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the Odds Ratio (OR) from a confusion matrix object. OR is a measure of association between exposure and an outcome. It represents the odds that an outcome diff --git a/man/dx_prevalence.Rd b/man/dx_prevalence.Rd index 2514869..746ad9c 100644 --- a/man/dx_prevalence.Rd +++ b/man/dx_prevalence.Rd @@ -16,6 +16,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Prevalence, which is the proportion of cases that are positive for the condition of interest over the total number of cases. Prevalence provides diff --git a/man/fnr.Rd b/man/fnr.Rd index 95e1dd0..f284002 100644 --- a/man/fnr.Rd +++ b/man/fnr.Rd @@ -20,6 +20,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the False Negative Rate (FNR), which is the proportion of actual positives that were incorrectly identified as negatives by the classifier. FNR is also known diff --git a/man/fpr.Rd b/man/fpr.Rd index 29b5a33..1af2f5d 100644 --- a/man/fpr.Rd +++ b/man/fpr.Rd @@ -20,6 +20,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the False Positive Rate (FPR), which is the proportion of actual negatives that were incorrectly identified as positives by the classifier. FPR is also known diff --git a/man/informedness.Rd b/man/informedness.Rd index 7e9b22b..4f07039 100644 --- a/man/informedness.Rd +++ b/man/informedness.Rd @@ -22,6 +22,12 @@ via bootstrapping. Note, this can be slow.} \item{bootreps}{The number of bootstrap replications for calculating confidence intervals.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Informedness for the provided confusion matrix. Informedness is a combined measure of Sensitivity (True Positive Rate) and Specificity (True Negative Rate). It reflects the diff --git a/man/ppv.Rd b/man/ppv.Rd index 9290df1..2b7c070 100644 --- a/man/ppv.Rd +++ b/man/ppv.Rd @@ -20,6 +20,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates the proportion of true positives out of the total predicted positives (true positives + false positives). PPV is also known as precision.Note that diff --git a/man/sensitivity.Rd b/man/sensitivity.Rd index b798ae6..97595e5 100644 --- a/man/sensitivity.Rd +++ b/man/sensitivity.Rd @@ -24,6 +24,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Sensitivity, also known as the True Positive Rate (TPR) or recall, which is the proportion of actual positives that are correctly identified as such by the diff --git a/man/specificity.Rd b/man/specificity.Rd index a257c5b..e3800c0 100644 --- a/man/specificity.Rd +++ b/man/specificity.Rd @@ -20,6 +20,12 @@ including 95\% confidence intervals.} \item{...}{Additional arguments to pass to metric_binomial function, such as `citype` for type of confidence interval method.} } +\value{ +Depending on the `detail` parameter, returns a numeric value + representing the calculated metric or a data frame/tibble with + detailed diagnostics including confidence intervals and possibly other + metrics relevant to understanding the metric. +} \description{ Calculates Specificity, also known as the True Negative Rate (TNR), which is the proportion of actual negatives that are correctly identified as such by the classifier.