## ----include = FALSE----------------------------------------------------------
knitr::opts_chunk$set(
  collapse = TRUE,
  comment = "#>",
  fig.width = 7,
  fig.height = 5
)

## ----setup, message=FALSE, warning=FALSE--------------------------------------
library(twoCoprimary)
library(dplyr)
library(tidyr)
library(knitr)

## ----table3_sozu--------------------------------------------------------------
# Recreate Sozu et al. (2010) Table III
library(dplyr)
library(tidyr)
library(readr)

param_grid_bin_ss <- tibble(
  p11 = c(0.70, 0.87, 0.90, 0.95), 
  p12 = c(0.70, 0.70, 0.90, 0.95),
  p21 = c(0.50, 0.70, 0.70, 0.90),
  p22 = c(0.50, 0.50, 0.70, 0.90)
)

result_bin_ss <- do.call(
  bind_rows,
  lapply(c("AN", "ANc", "AS", "ASc"), function(test) {
    do.call(
      bind_rows,
      design_table(
        param_grid = param_grid_bin_ss,
        rho_values = c(-0.3, 0, 0.3, 0.5, 0.8),
        r = 1,
        alpha = 0.025,
        beta = 0.2,
        endpoint_type = "binary",
        Test = test
      ) %>% 
        mutate(Test = test)
    )
  })
) %>% 
  mutate_at(vars(starts_with("rho_")), ~ . / 2) %>% 
  pivot_longer(
    cols = starts_with("rho_"),
    names_to = "rho",
    values_to = "N",
    names_transform = list(rho = parse_number)
  ) %>% 
  pivot_wider(names_from = Test,  values_from = N) %>% 
  drop_na(AN, ANc, AS, ASc) %>% 
  as.data.frame()

kable(result_bin_ss,
      caption = "Table III: Sample Size per Group (n) for Two Co-Primary Binary Endpoints (α = 0.025, 1-β = 0.80)^a,b^",
      digits = 0,
      col.names = c("p₁,₁", "p₁,₂", "p₂,₁", "p₂,₂", "ρ", "AN", "ANc", "AS", "ASc"))

## ----example1-----------------------------------------------------------------
# Both endpoints: 70% vs 50% (20% difference)
# p_{1,1} = p_{1,2} = 0.7 (treatment group)
# p_{2,1} = p_{2,2} = 0.5 (control group)
ss_equal <- ss2BinaryApprox(
  p11 = 0.7, p12 = 0.7,  # Treatment group
  p21 = 0.5, p22 = 0.5,  # Control group
  rho1 = 0.5,            # Correlation in treatment group
  rho2 = 0.5,            # Correlation in control group
  r = 1,                 # Balanced allocation
  alpha = 0.025,
  beta = 0.2,
  Test = "AN"
)

print(ss_equal)

## ----example2-----------------------------------------------------------------
# Endpoint 1: 75% vs 65% (10% difference)
# Endpoint 2: 80% vs 60% (20% difference)
ss_unequal <- ss2BinaryApprox(
  p11 = 0.75, p12 = 0.80,
  p21 = 0.65, p22 = 0.60,
  rho1 = 0.3, rho2 = 0.3,
  r = 1,
  alpha = 0.025,
  beta = 0.2,
  Test = "AN"
)

print(ss_unequal)

# Compare with individual endpoint sample sizes
ss_ep1 <- ss1BinaryApprox(p1 = 0.75, p2 = 0.65, r = 1, 
                          alpha = 0.025, beta = 0.2, Test = "AN")
ss_ep2 <- ss1BinaryApprox(p1 = 0.80, p2 = 0.60, r = 1,
                          alpha = 0.025, beta = 0.2, Test = "AN")

cat("Single endpoint 1 sample size:", ss_ep1$n2, "\n")
cat("Single endpoint 2 sample size:", ss_ep2$n2, "\n")
cat("Co-primary sample size:", ss_unequal$n2, "\n")

## ----correlation_effect-------------------------------------------------------
# Fixed effect sizes: p_{1,1} = p_{1,2} = 0.7, p_{2,1} = p_{2,2} = 0.5
p11 <- 0.7
p21 <- 0.5
p12 <- 0.7
p22 <- 0.5

# Range of correlations
rho_values <- c(-0.3, 0, 0.3, 0.5, 0.8)

ss_by_rho <- sapply(rho_values, function(rho) {
  ss <- ss2BinaryApprox(
    p11 = p11, p12 = p12,
    p21 = p21, p22 = p22,
    rho1 = rho, rho2 = rho,
    r = 1,
    alpha = 0.025,
    beta = 0.2,
    Test = "AN"
  )
  ss$n2
})

result_df <- data.frame(
  rho = rho_values,
  n_per_group = ss_by_rho,
  N_total = ss_by_rho * 2
)

kable(result_df,
      caption = "Sample Size vs Correlation (p₁,₁ = p₁,₂ = 0.7, p₂,₁ = p₂,₂ = 0.5)",
      col.names = c("ρ", "n per group", "N total"))

# Plot
plot(rho_values, ss_by_rho, 
     type = "b", pch = 19,
     xlab = "Correlation (ρ)", 
     ylab = "Sample size per group (n)",
     main = "Effect of Correlation on Sample Size",
     ylim = c(min(ss_by_rho) * 0.9, max(ss_by_rho) * 1.1))
grid()

## ----unbalanced---------------------------------------------------------------
# 2:1 allocation (treatment:control)
ss_unbalanced <- ss2BinaryApprox(
  p11 = 0.7, p12 = 0.7,
  p21 = 0.5, p22 = 0.5,
  rho1 = 0.5, rho2 = 0.5,
  r = 2,  # 2:1 allocation
  alpha = 0.025,
  beta = 0.2,
  Test = "AN"
)

print(ss_unbalanced)
cat("Total sample size:", ss_unbalanced$N, "\n")

## ----power_verification-------------------------------------------------------
# Use result from Example 1
power_result <- power2BinaryApprox(
  n1 = ss_equal$n1,
  n2 = ss_equal$n2,
  p11 = 0.7, p12 = 0.7,
  p21 = 0.5, p22 = 0.5,
  rho1 = 0.5, rho2 = 0.5,
  alpha = 0.025,
  Test = "AN"
)

cat("Target power: 0.80\n")
cat("Achieved power (Endpoint 1):", round(power_result$power1, 4), "\n")
cat("Achieved power (Endpoint 2):", round(power_result$power2, 4), "\n")
cat("Achieved power (Co-primary):", round(power_result$powerCoprimary, 4), "\n")

## ----method_comparison--------------------------------------------------------
# Fixed design parameters
p11 <- 0.80
p12 <- 0.70
p21 <- 0.55
p22 <- 0.45
rho <- 0.7

# Calculate for each method
methods <- c("AN", "ANc", "AS", "ASc")
comparison <- lapply(methods, function(method) {
    ss <- ss2BinaryApprox(
        p11 = p11, p12 = p12,
        p21 = p21, p22 = p22,
        rho1 = rho, rho2 = rho,
        r = 1,
        alpha = 0.025,
        beta = 0.2,
        Test = method
    )
    
    data.frame(
        Method = method,
        n_per_group = ss$n2,
        N_total = ss$N
    )
})

comparison_table <- bind_rows(comparison)

kable(comparison_table,
      caption = "Comparison of Test Methods (p₁,₁ = p₁,₂ = 0.7, p₂,₁ = p₂,₂ = 0.5, ρ = 0.5)")

