## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>",
  fig.width = 8, 
  fig.height = 6
)

## ----setup--------------------------------------------------------------------
library(QualityMeasure)

## ----eval = FALSE-------------------------------------------------------------
# ### Data parameters
# n.entity = 100  # number of accountable entities
# n.obs = 50      # average number of observations per accountable entity
# mu = .2         # marginal probability of the outcome
# r = .7          # median reliability
# 
# ### Simulate data
# example_df_1 <- simulateData(n.entity = n.entity, n.obs = n.obs, mu = mu, r = r)

## -----------------------------------------------------------------------------
profiling.results <- calcPerformance(df = example_df_1)
perf.results <- profiling.results$perf.results

## ----results = 'asis', echo = FALSE-------------------------------------------
rate.summary = rbind(summary(perf.results$p))
perf.summary = cbind(Method = c('Unadjusted'), round(rate.summary, 3))
perf.summary = as.data.frame(perf.summary)
knitr::kable(perf.summary, caption = 'Performance summary statistics across entities')

## -----------------------------------------------------------------------------
plotN(perf.results$n)

## -----------------------------------------------------------------------------
plotPerformance(df = perf.results)

## ----results = 'asis', echo = FALSE-------------------------------------------
knitr::kable(table(perf.results$category.p), col.names = c('Category', 'Number of entities'), caption = 'Categorization of entities based on their outcome rates')

## -----------------------------------------------------------------------------
plotPerformance(df = perf.results, plot.type = 'OR')

## ----eval = FALSE-------------------------------------------------------------
# tutorial_BB_results <- calcBetaBin(df = example_df_1)

## -----------------------------------------------------------------------------
summary(tutorial_BB_results$est.BB)

## ----eval = FALSE-------------------------------------------------------------
# df.agg <- data.frame(n = aggregate(y ~ entity, data = example_df_1, length)$y,
#                      x = aggregate(y ~ entity, data = example_df_1, sum)$y)
# 
# tutorial_BB_agg_results <- calcBetaBin(df = df.agg, df.aggregate = T, n = 'n', x = 'x')`

## ----echo = FALSE-------------------------------------------------------------
summary(tutorial_BB_agg_results$est.BB)

## ----eval = FALSE-------------------------------------------------------------
# tutorial_reliability_results_1 <- calcReliability(df = example_df_1, ctrRel = controlRel(n.resamples = 100))

## ----results = 'asis', echo = FALSE-------------------------------------------
rel.results <- tutorial_reliability_results_1$rel.results
rel.results.sub <- rel.results[,c('method', 'reliability', 'reliability_min', 'reliability_max')]
rel.results.sub$reliability <- round(rel.results.sub$reliability, 3)
rel.results.sub$reliability_min <- round(rel.results.sub$reliability_min, 3)
rel.results.sub$reliability_max <- round(rel.results.sub$reliability_max, 3)
names(rel.results.sub) <- c('Method', 'Reliability', 'Min Reliability', 'Max Reliability')

knitr::kable(rel.results.sub, caption = 'Reliability estimates')

## -----------------------------------------------------------------------------
plotReliability(tutorial_reliability_results_1)

## ----eval = FALSE-------------------------------------------------------------
# ### Data parameters
# n.entity = 100  # number of accountable entities
# n.obs = 50 # average number of patients/cases per accountable entity
# mu = .2 # marginal probability of the outcome
# r = .7 # reliability for entity with an median number of patients
# beta1 = log(1.5) # parameter for risk adjustment model---coefficient for x1 which is simulated from a standard Normal
# 
# 
# ### Simulate data
# example_df_2 <- simulateData(n.entity = n.entity, n.obs = n.obs, mu = mu, r = r, beta1 = beta1)

## ----echo = FALSE, results = 'asis'-------------------------------------------
knitr::kable(head(example_df_2, 10), caption = 'Simulated data with a covariate')

## -----------------------------------------------------------------------------
model = 'y ~ x1 + (1 | entity)'

## -----------------------------------------------------------------------------
model.perf <- model_performance(df = example_df_2, model = model)
plotEstimates(model.perf)

## -----------------------------------------------------------------------------
plotPredictedDistribution(model.perf)

## -----------------------------------------------------------------------------
plotCalibration(model.perf, quantiles = 5)

## ----eval = FALSE-------------------------------------------------------------
# perf.out <- calcPerformance(df = example_df_2, model = model, ctrPerf = controlPerf(n.boots = 1000, n.cores = 2))
# tutorial_profiling_results <- perf.out$perf.results

## ----echo = FALSE, results = 'asis'-------------------------------------------
knitr::kable(tutorial_profiling_results$perf.summary, caption = 'Performance summary statistics across entities')

## -----------------------------------------------------------------------------
plotN(tutorial_profiling_results$n)

## -----------------------------------------------------------------------------
plotPerformance(df = tutorial_profiling_results)

## -----------------------------------------------------------------------------
plotPerformance(df = tutorial_profiling_results, plot.type = 'oe')

## ----echo = FALSE, results = 'asis'-------------------------------------------
knitr::kable(table(tutorial_profiling_results$category.oe), col.names = c('Category', 'Number of entities'), caption = 'Categorization of entities based on OE-risk-standardized rates')

## -----------------------------------------------------------------------------
plotPerformance(df = tutorial_profiling_results, plot.type = 'pe')

## ----echo = FALSE, results = 'asis'-------------------------------------------
knitr::kable(table(tutorial_profiling_results$category.pe), col.names = c('Category', 'Number of entities'), caption = 'Categorization of entities based on PE-risk-standardized rates')

## -----------------------------------------------------------------------------
plotPerformance(df = tutorial_profiling_results, plot.type = 'OR')

## ----eval = FALSE-------------------------------------------------------------
# tutorial_reliability_results_2 <- calcReliability(df = example_df_2, model = model)

## ----echo = FALSE, results = 'asis'-------------------------------------------
rel.results2 <- tutorial_reliability_results_2$rel.results
rel.results.sub2 <- rel.results2[,c('method', 'reliability', 'reliability_min', 'reliability_max')]
rel.results.sub2$reliability <- round(rel.results.sub2$reliability, 3)
rel.results.sub2$reliability_min <- round(rel.results.sub2$reliability_min, 3)
rel.results.sub2$reliability_max <- round(rel.results.sub2$reliability_max, 3)
names(rel.results.sub2) <- c('Method', 'Reliability', 'Min Reliability', 'Max Reliability')

knitr::kable(rel.results.sub2, caption = 'Reliability estimates')

## -----------------------------------------------------------------------------
plotReliability(tutorial_reliability_results_2)

